Commit | Line | Data |
---|---|---|
8adaf747 BW |
1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* Copyright(c) 2020 Intel Corporation. */ | |
3 | ||
4 | #ifndef __CXL_H__ | |
5 | #define __CXL_H__ | |
6 | ||
8fdcb170 | 7 | #include <linux/libnvdimm.h> |
8adaf747 BW |
8 | #include <linux/bitfield.h> |
9 | #include <linux/bitops.h> | |
80d10a6c | 10 | #include <linux/log2.h> |
80aa780d | 11 | #include <linux/node.h> |
8adaf747 BW |
12 | #include <linux/io.h> |
13 | ||
4812be97 DW |
14 | /** |
15 | * DOC: cxl objects | |
16 | * | |
17 | * The CXL core objects like ports, decoders, and regions are shared | |
18 | * between the subsystem drivers cxl_acpi, cxl_pci, and core drivers | |
19 | * (port-driver, region-driver, nvdimm object-drivers... etc). | |
20 | */ | |
21 | ||
d17d0540 DW |
22 | /* CXL 2.0 8.2.4 CXL Component Register Layout and Definition */ |
23 | #define CXL_COMPONENT_REG_BLOCK_SIZE SZ_64K | |
24 | ||
08422378 BW |
25 | /* CXL 2.0 8.2.5 CXL.cache and CXL.mem Registers*/ |
26 | #define CXL_CM_OFFSET 0x1000 | |
27 | #define CXL_CM_CAP_HDR_OFFSET 0x0 | |
28 | #define CXL_CM_CAP_HDR_ID_MASK GENMASK(15, 0) | |
29 | #define CM_CAP_HDR_CAP_ID 1 | |
30 | #define CXL_CM_CAP_HDR_VERSION_MASK GENMASK(19, 16) | |
31 | #define CM_CAP_HDR_CAP_VERSION 1 | |
32 | #define CXL_CM_CAP_HDR_CACHE_MEM_VERSION_MASK GENMASK(23, 20) | |
33 | #define CM_CAP_HDR_CACHE_MEM_VERSION 1 | |
34 | #define CXL_CM_CAP_HDR_ARRAY_SIZE_MASK GENMASK(31, 24) | |
35 | #define CXL_CM_CAP_PTR_MASK GENMASK(31, 20) | |
36 | ||
bd09626b | 37 | #define CXL_CM_CAP_CAP_ID_RAS 0x2 |
08422378 BW |
38 | #define CXL_CM_CAP_CAP_ID_HDM 0x5 |
39 | #define CXL_CM_CAP_CAP_HDM_VERSION 1 | |
40 | ||
41 | /* HDM decoders CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure */ | |
42 | #define CXL_HDM_DECODER_CAP_OFFSET 0x0 | |
43 | #define CXL_HDM_DECODER_COUNT_MASK GENMASK(3, 0) | |
44 | #define CXL_HDM_DECODER_TARGET_COUNT_MASK GENMASK(7, 4) | |
d17d0540 DW |
45 | #define CXL_HDM_DECODER_INTERLEAVE_11_8 BIT(8) |
46 | #define CXL_HDM_DECODER_INTERLEAVE_14_12 BIT(9) | |
47 | #define CXL_HDM_DECODER_CTRL_OFFSET 0x4 | |
48 | #define CXL_HDM_DECODER_ENABLE BIT(1) | |
49 | #define CXL_HDM_DECODER0_BASE_LOW_OFFSET(i) (0x20 * (i) + 0x10) | |
50 | #define CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i) (0x20 * (i) + 0x14) | |
51 | #define CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i) (0x20 * (i) + 0x18) | |
52 | #define CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i) (0x20 * (i) + 0x1c) | |
53 | #define CXL_HDM_DECODER0_CTRL_OFFSET(i) (0x20 * (i) + 0x20) | |
54 | #define CXL_HDM_DECODER0_CTRL_IG_MASK GENMASK(3, 0) | |
55 | #define CXL_HDM_DECODER0_CTRL_IW_MASK GENMASK(7, 4) | |
56 | #define CXL_HDM_DECODER0_CTRL_LOCK BIT(8) | |
57 | #define CXL_HDM_DECODER0_CTRL_COMMIT BIT(9) | |
58 | #define CXL_HDM_DECODER0_CTRL_COMMITTED BIT(10) | |
176baefb | 59 | #define CXL_HDM_DECODER0_CTRL_COMMIT_ERROR BIT(11) |
cecbb5da | 60 | #define CXL_HDM_DECODER0_CTRL_HOSTONLY BIT(12) |
d17d0540 DW |
61 | #define CXL_HDM_DECODER0_TL_LOW(i) (0x20 * (i) + 0x24) |
62 | #define CXL_HDM_DECODER0_TL_HIGH(i) (0x20 * (i) + 0x28) | |
9c57cde0 DW |
63 | #define CXL_HDM_DECODER0_SKIP_LOW(i) CXL_HDM_DECODER0_TL_LOW(i) |
64 | #define CXL_HDM_DECODER0_SKIP_HIGH(i) CXL_HDM_DECODER0_TL_HIGH(i) | |
08422378 | 65 | |
3b39fd6c AM |
66 | /* HDM decoder control register constants CXL 3.0 8.2.5.19.7 */ |
67 | #define CXL_DECODER_MIN_GRANULARITY 256 | |
68 | #define CXL_DECODER_MAX_ENCODED_IG 6 | |
69 | ||
6423035f BW |
70 | static inline int cxl_hdm_decoder_count(u32 cap_hdr) |
71 | { | |
72 | int val = FIELD_GET(CXL_HDM_DECODER_COUNT_MASK, cap_hdr); | |
73 | ||
74 | return val ? val * 2 : 1; | |
75 | } | |
76 | ||
419af595 | 77 | /* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */ |
83351ddb | 78 | static inline int eig_to_granularity(u16 eig, unsigned int *granularity) |
419af595 | 79 | { |
83351ddb | 80 | if (eig > CXL_DECODER_MAX_ENCODED_IG) |
419af595 | 81 | return -EINVAL; |
83351ddb | 82 | *granularity = CXL_DECODER_MIN_GRANULARITY << eig; |
419af595 DW |
83 | return 0; |
84 | } | |
85 | ||
86 | /* Encode defined in CXL ECN "3, 6, 12 and 16-way memory Interleaving" */ | |
c99b2e8c | 87 | static inline int eiw_to_ways(u8 eiw, unsigned int *ways) |
419af595 | 88 | { |
c99b2e8c | 89 | switch (eiw) { |
419af595 | 90 | case 0 ... 4: |
c99b2e8c | 91 | *ways = 1 << eiw; |
419af595 DW |
92 | break; |
93 | case 8 ... 10: | |
c99b2e8c | 94 | *ways = 3 << (eiw - 8); |
419af595 DW |
95 | break; |
96 | default: | |
97 | return -EINVAL; | |
98 | } | |
99 | ||
100 | return 0; | |
101 | } | |
102 | ||
83351ddb | 103 | static inline int granularity_to_eig(int granularity, u16 *eig) |
80d10a6c | 104 | { |
83351ddb DJ |
105 | if (granularity > SZ_16K || granularity < CXL_DECODER_MIN_GRANULARITY || |
106 | !is_power_of_2(granularity)) | |
80d10a6c | 107 | return -EINVAL; |
83351ddb | 108 | *eig = ilog2(granularity) - 8; |
80d10a6c BW |
109 | return 0; |
110 | } | |
111 | ||
c99b2e8c | 112 | static inline int ways_to_eiw(unsigned int ways, u8 *eiw) |
80d10a6c BW |
113 | { |
114 | if (ways > 16) | |
115 | return -EINVAL; | |
116 | if (is_power_of_2(ways)) { | |
c99b2e8c | 117 | *eiw = ilog2(ways); |
80d10a6c BW |
118 | return 0; |
119 | } | |
120 | if (ways % 3) | |
121 | return -EINVAL; | |
122 | ways /= 3; | |
123 | if (!is_power_of_2(ways)) | |
124 | return -EINVAL; | |
c99b2e8c | 125 | *eiw = ilog2(ways) + 8; |
80d10a6c BW |
126 | return 0; |
127 | } | |
128 | ||
bd09626b DW |
129 | /* RAS Registers CXL 2.0 8.2.5.9 CXL RAS Capability Structure */ |
130 | #define CXL_RAS_UNCORRECTABLE_STATUS_OFFSET 0x0 | |
131 | #define CXL_RAS_UNCORRECTABLE_STATUS_MASK (GENMASK(16, 14) | GENMASK(11, 0)) | |
132 | #define CXL_RAS_UNCORRECTABLE_MASK_OFFSET 0x4 | |
133 | #define CXL_RAS_UNCORRECTABLE_MASK_MASK (GENMASK(16, 14) | GENMASK(11, 0)) | |
248529ed | 134 | #define CXL_RAS_UNCORRECTABLE_MASK_F256B_MASK BIT(8) |
bd09626b DW |
135 | #define CXL_RAS_UNCORRECTABLE_SEVERITY_OFFSET 0x8 |
136 | #define CXL_RAS_UNCORRECTABLE_SEVERITY_MASK (GENMASK(16, 14) | GENMASK(11, 0)) | |
137 | #define CXL_RAS_CORRECTABLE_STATUS_OFFSET 0xC | |
138 | #define CXL_RAS_CORRECTABLE_STATUS_MASK GENMASK(6, 0) | |
139 | #define CXL_RAS_CORRECTABLE_MASK_OFFSET 0x10 | |
140 | #define CXL_RAS_CORRECTABLE_MASK_MASK GENMASK(6, 0) | |
141 | #define CXL_RAS_CAP_CONTROL_OFFSET 0x14 | |
2905cb52 | 142 | #define CXL_RAS_CAP_CONTROL_FE_MASK GENMASK(5, 0) |
bd09626b DW |
143 | #define CXL_RAS_HEADER_LOG_OFFSET 0x18 |
144 | #define CXL_RAS_CAPABILITY_LENGTH 0x58 | |
4a20bc3e DW |
145 | #define CXL_HEADERLOG_SIZE SZ_512 |
146 | #define CXL_HEADERLOG_SIZE_U32 SZ_512 / sizeof(u32) | |
bd09626b | 147 | |
8adaf747 BW |
148 | /* CXL 2.0 8.2.8.1 Device Capabilities Array Register */ |
149 | #define CXLDEV_CAP_ARRAY_OFFSET 0x0 | |
150 | #define CXLDEV_CAP_ARRAY_CAP_ID 0 | |
151 | #define CXLDEV_CAP_ARRAY_ID_MASK GENMASK_ULL(15, 0) | |
152 | #define CXLDEV_CAP_ARRAY_COUNT_MASK GENMASK_ULL(47, 32) | |
153 | /* CXL 2.0 8.2.8.2 CXL Device Capability Header Register */ | |
154 | #define CXLDEV_CAP_HDR_CAP_ID_MASK GENMASK(15, 0) | |
155 | /* CXL 2.0 8.2.8.2.1 CXL Device Capabilities */ | |
156 | #define CXLDEV_CAP_CAP_ID_DEVICE_STATUS 0x1 | |
157 | #define CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX 0x2 | |
158 | #define CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX 0x3 | |
159 | #define CXLDEV_CAP_CAP_ID_MEMDEV 0x4000 | |
160 | ||
6ebe28f9 IW |
161 | /* CXL 3.0 8.2.8.3.1 Event Status Register */ |
162 | #define CXLDEV_DEV_EVENT_STATUS_OFFSET 0x00 | |
163 | #define CXLDEV_EVENT_STATUS_INFO BIT(0) | |
164 | #define CXLDEV_EVENT_STATUS_WARN BIT(1) | |
165 | #define CXLDEV_EVENT_STATUS_FAIL BIT(2) | |
166 | #define CXLDEV_EVENT_STATUS_FATAL BIT(3) | |
167 | ||
168 | #define CXLDEV_EVENT_STATUS_ALL (CXLDEV_EVENT_STATUS_INFO | \ | |
169 | CXLDEV_EVENT_STATUS_WARN | \ | |
170 | CXLDEV_EVENT_STATUS_FAIL | \ | |
171 | CXLDEV_EVENT_STATUS_FATAL) | |
172 | ||
a49aa814 DB |
173 | /* CXL rev 3.0 section 8.2.9.2.4; Table 8-52 */ |
174 | #define CXLDEV_EVENT_INT_MODE_MASK GENMASK(1, 0) | |
175 | #define CXLDEV_EVENT_INT_MSGNUM_MASK GENMASK(7, 4) | |
176 | ||
8adaf747 BW |
177 | /* CXL 2.0 8.2.8.4 Mailbox Registers */ |
178 | #define CXLDEV_MBOX_CAPS_OFFSET 0x00 | |
179 | #define CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK GENMASK(4, 0) | |
ccadf131 DB |
180 | #define CXLDEV_MBOX_CAP_BG_CMD_IRQ BIT(6) |
181 | #define CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK GENMASK(10, 7) | |
8adaf747 BW |
182 | #define CXLDEV_MBOX_CTRL_OFFSET 0x04 |
183 | #define CXLDEV_MBOX_CTRL_DOORBELL BIT(0) | |
ccadf131 | 184 | #define CXLDEV_MBOX_CTRL_BG_CMD_IRQ BIT(2) |
8adaf747 BW |
185 | #define CXLDEV_MBOX_CMD_OFFSET 0x08 |
186 | #define CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0) | |
187 | #define CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK GENMASK_ULL(36, 16) | |
188 | #define CXLDEV_MBOX_STATUS_OFFSET 0x10 | |
ccadf131 | 189 | #define CXLDEV_MBOX_STATUS_BG_CMD BIT(0) |
8adaf747 BW |
190 | #define CXLDEV_MBOX_STATUS_RET_CODE_MASK GENMASK_ULL(47, 32) |
191 | #define CXLDEV_MBOX_BG_CMD_STATUS_OFFSET 0x18 | |
ccadf131 DB |
192 | #define CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0) |
193 | #define CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK GENMASK_ULL(22, 16) | |
194 | #define CXLDEV_MBOX_BG_CMD_COMMAND_RC_MASK GENMASK_ULL(47, 32) | |
195 | #define CXLDEV_MBOX_BG_CMD_COMMAND_VENDOR_MASK GENMASK_ULL(63, 48) | |
8adaf747 BW |
196 | #define CXLDEV_MBOX_PAYLOAD_OFFSET 0x20 |
197 | ||
8ac75dd6 | 198 | /* |
301e68dd KC |
199 | * Using struct_group() allows for per register-block-type helper routines, |
200 | * without requiring block-type agnostic code to include the prefix. | |
8ac75dd6 DW |
201 | */ |
202 | struct cxl_regs { | |
301e68dd KC |
203 | /* |
204 | * Common set of CXL Component register block base pointers | |
205 | * @hdm_decoder: CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure | |
bd09626b | 206 | * @ras: CXL 2.0 8.2.5.9 CXL RAS Capability Structure |
301e68dd KC |
207 | */ |
208 | struct_group_tagged(cxl_component_regs, component, | |
209 | void __iomem *hdm_decoder; | |
bd09626b | 210 | void __iomem *ras; |
301e68dd KC |
211 | ); |
212 | /* | |
213 | * Common set of CXL Device register block base pointers | |
214 | * @status: CXL 2.0 8.2.8.3 Device Status Registers | |
215 | * @mbox: CXL 2.0 8.2.8.4 Mailbox Registers | |
216 | * @memdev: CXL 2.0 8.2.8.5 Memory Device Registers | |
217 | */ | |
218 | struct_group_tagged(cxl_device_regs, device_regs, | |
219 | void __iomem *status, *mbox, *memdev; | |
220 | ); | |
1ad3f701 JC |
221 | |
222 | struct_group_tagged(cxl_pmu_regs, pmu_regs, | |
223 | void __iomem *pmu; | |
224 | ); | |
6c5f3aac TB |
225 | |
226 | /* | |
227 | * RCH downstream port specific RAS register | |
228 | * @aer: CXL 3.0 8.2.1.1 RCH Downstream Port RCRB | |
229 | */ | |
230 | struct_group_tagged(cxl_rch_regs, rch_regs, | |
231 | void __iomem *dport_aer; | |
232 | ); | |
8ac75dd6 DW |
233 | }; |
234 | ||
30af9729 IW |
235 | struct cxl_reg_map { |
236 | bool valid; | |
a1554e9c | 237 | int id; |
30af9729 IW |
238 | unsigned long offset; |
239 | unsigned long size; | |
240 | }; | |
241 | ||
08422378 BW |
242 | struct cxl_component_reg_map { |
243 | struct cxl_reg_map hdm_decoder; | |
bd09626b | 244 | struct cxl_reg_map ras; |
08422378 BW |
245 | }; |
246 | ||
30af9729 IW |
247 | struct cxl_device_reg_map { |
248 | struct cxl_reg_map status; | |
249 | struct cxl_reg_map mbox; | |
250 | struct cxl_reg_map memdev; | |
251 | }; | |
252 | ||
1ad3f701 JC |
253 | struct cxl_pmu_reg_map { |
254 | struct cxl_reg_map pmu; | |
255 | }; | |
256 | ||
a261e9a1 DW |
257 | /** |
258 | * struct cxl_register_map - DVSEC harvested register block mapping parameters | |
dd22581f | 259 | * @host: device for devm operations and logging |
a261e9a1 | 260 | * @base: virtual base of the register-block-BAR + @block_offset |
6c7f4f1e DW |
261 | * @resource: physical resource base of the register block |
262 | * @max_size: maximum mapping size to perform register search | |
a261e9a1 | 263 | * @reg_type: see enum cxl_regloc_type |
a261e9a1 DW |
264 | * @component_map: cxl_reg_map for component registers |
265 | * @device_map: cxl_reg_maps for device registers | |
1ad3f701 | 266 | * @pmu_map: cxl_reg_maps for CXL Performance Monitoring Units |
a261e9a1 | 267 | */ |
30af9729 | 268 | struct cxl_register_map { |
dd22581f | 269 | struct device *host; |
a261e9a1 | 270 | void __iomem *base; |
6c7f4f1e DW |
271 | resource_size_t resource; |
272 | resource_size_t max_size; | |
30af9729 | 273 | u8 reg_type; |
30af9729 | 274 | union { |
08422378 | 275 | struct cxl_component_reg_map component_map; |
30af9729 | 276 | struct cxl_device_reg_map device_map; |
1ad3f701 | 277 | struct cxl_pmu_reg_map pmu_map; |
30af9729 IW |
278 | }; |
279 | }; | |
280 | ||
08422378 BW |
281 | void cxl_probe_component_regs(struct device *dev, void __iomem *base, |
282 | struct cxl_component_reg_map *map); | |
30af9729 IW |
283 | void cxl_probe_device_regs(struct device *dev, void __iomem *base, |
284 | struct cxl_device_reg_map *map); | |
0c0df631 | 285 | int cxl_map_component_regs(const struct cxl_register_map *map, |
57340804 | 286 | struct cxl_component_regs *regs, |
a1554e9c | 287 | unsigned long map_mask); |
0c0df631 | 288 | int cxl_map_device_regs(const struct cxl_register_map *map, |
57340804 | 289 | struct cxl_device_regs *regs); |
e8db0701 | 290 | int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs); |
399d34eb | 291 | |
303ebc1b | 292 | enum cxl_regloc_type; |
d717d7f3 JC |
293 | int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type); |
294 | int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type, | |
295 | struct cxl_register_map *map, int index); | |
303ebc1b BW |
296 | int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type, |
297 | struct cxl_register_map *map); | |
d076bb8c | 298 | int cxl_setup_regs(struct cxl_register_map *map); |
eb4663b0 RR |
299 | struct cxl_dport; |
300 | resource_size_t cxl_rcd_component_reg_phys(struct device *dev, | |
301 | struct cxl_dport *dport); | |
d5b1a271 | 302 | |
4812be97 | 303 | #define CXL_RESOURCE_NONE ((resource_size_t) -1) |
7d4b5ca2 | 304 | #define CXL_TARGET_STRLEN 20 |
4812be97 | 305 | |
40ba17af DW |
306 | /* |
307 | * cxl_decoder flags that define the type of memory / devices this | |
308 | * decoder supports as well as configuration lock status See "CXL 2.0 | |
309 | * 8.2.5.12.7 CXL HDM Decoder 0 Control Register" for details. | |
a32320b7 DW |
310 | * Additionally indicate whether decoder settings were autodetected, |
311 | * user customized. | |
40ba17af DW |
312 | */ |
313 | #define CXL_DECODER_F_RAM BIT(0) | |
314 | #define CXL_DECODER_F_PMEM BIT(1) | |
315 | #define CXL_DECODER_F_TYPE2 BIT(2) | |
316 | #define CXL_DECODER_F_TYPE3 BIT(3) | |
317 | #define CXL_DECODER_F_LOCK BIT(4) | |
d17d0540 DW |
318 | #define CXL_DECODER_F_ENABLE BIT(5) |
319 | #define CXL_DECODER_F_MASK GENMASK(5, 0) | |
40ba17af DW |
320 | |
321 | enum cxl_decoder_type { | |
5aa39a91 DW |
322 | CXL_DECODER_DEVMEM = 2, |
323 | CXL_DECODER_HOSTONLYMEM = 3, | |
40ba17af DW |
324 | }; |
325 | ||
a5c25802 DW |
326 | /* |
327 | * Current specification goes up to 8, double that seems a reasonable | |
328 | * software max for the foreseeable future | |
329 | */ | |
330 | #define CXL_DECODER_MAX_INTERLEAVE 16 | |
331 | ||
529c0a44 | 332 | #define CXL_QOS_CLASS_INVALID -1 |
e7748305 | 333 | |
40ba17af | 334 | /** |
e636479e | 335 | * struct cxl_decoder - Common CXL HDM Decoder Attributes |
40ba17af DW |
336 | * @dev: this decoder's device |
337 | * @id: kernel device name id | |
e8b7ea58 | 338 | * @hpa_range: Host physical address range mapped by this decoder |
40ba17af DW |
339 | * @interleave_ways: number of cxl_dports in this decode |
340 | * @interleave_granularity: data stride per dport | |
341 | * @target_type: accelerator vs expander (type2 vs type3) selector | |
b9686e8c | 342 | * @region: currently assigned region for this decoder |
40ba17af | 343 | * @flags: memory type capabilities and locking |
176baefb DW |
344 | * @commit: device/decoder-type specific callback to commit settings to hw |
345 | * @reset: device/decoder-type specific callback to reset hw settings | |
346 | */ | |
40ba17af DW |
347 | struct cxl_decoder { |
348 | struct device dev; | |
349 | int id; | |
e50fe01e | 350 | struct range hpa_range; |
40ba17af DW |
351 | int interleave_ways; |
352 | int interleave_granularity; | |
353 | enum cxl_decoder_type target_type; | |
b9686e8c | 354 | struct cxl_region *region; |
40ba17af | 355 | unsigned long flags; |
176baefb DW |
356 | int (*commit)(struct cxl_decoder *cxld); |
357 | int (*reset)(struct cxl_decoder *cxld); | |
e636479e DW |
358 | }; |
359 | ||
b9686e8c DW |
360 | /* |
361 | * CXL_DECODER_DEAD prevents endpoints from being reattached to regions | |
362 | * while cxld_unregister() is running | |
363 | */ | |
2c866903 DW |
364 | enum cxl_decoder_mode { |
365 | CXL_DECODER_NONE, | |
366 | CXL_DECODER_RAM, | |
367 | CXL_DECODER_PMEM, | |
368 | CXL_DECODER_MIXED, | |
b9686e8c | 369 | CXL_DECODER_DEAD, |
2c866903 DW |
370 | }; |
371 | ||
7d505f98 DW |
372 | static inline const char *cxl_decoder_mode_name(enum cxl_decoder_mode mode) |
373 | { | |
374 | static const char * const names[] = { | |
375 | [CXL_DECODER_NONE] = "none", | |
376 | [CXL_DECODER_RAM] = "ram", | |
377 | [CXL_DECODER_PMEM] = "pmem", | |
378 | [CXL_DECODER_MIXED] = "mixed", | |
379 | }; | |
380 | ||
381 | if (mode >= CXL_DECODER_NONE && mode <= CXL_DECODER_MIXED) | |
382 | return names[mode]; | |
383 | return "mixed"; | |
384 | } | |
385 | ||
a32320b7 DW |
386 | /* |
387 | * Track whether this decoder is reserved for region autodiscovery, or | |
388 | * free for userspace provisioning. | |
389 | */ | |
390 | enum cxl_decoder_state { | |
391 | CXL_DECODER_STATE_MANUAL, | |
392 | CXL_DECODER_STATE_AUTO, | |
393 | }; | |
394 | ||
3bf65915 DW |
395 | /** |
396 | * struct cxl_endpoint_decoder - Endpoint / SPA to DPA decoder | |
397 | * @cxld: base cxl_decoder_object | |
398 | * @dpa_res: actively claimed DPA span of this decoder | |
399 | * @skip: offset into @dpa_res where @cxld.hpa_range maps | |
2c866903 | 400 | * @mode: which memory type / access-mode-partition this decoder targets |
a32320b7 | 401 | * @state: autodiscovery state |
b9686e8c | 402 | * @pos: interleave position in @cxld.region |
3bf65915 DW |
403 | */ |
404 | struct cxl_endpoint_decoder { | |
405 | struct cxl_decoder cxld; | |
406 | struct resource *dpa_res; | |
407 | resource_size_t skip; | |
2c866903 | 408 | enum cxl_decoder_mode mode; |
a32320b7 | 409 | enum cxl_decoder_state state; |
b9686e8c | 410 | int pos; |
3bf65915 DW |
411 | }; |
412 | ||
e636479e DW |
413 | /** |
414 | * struct cxl_switch_decoder - Switch specific CXL HDM Decoder | |
415 | * @cxld: base cxl_decoder object | |
e636479e DW |
416 | * @nr_targets: number of elements in @target |
417 | * @target: active ordered target list in current decoder configuration | |
418 | * | |
419 | * The 'switch' decoder type represents the decoder instances of cxl_port's that | |
420 | * route from the root of a CXL memory decode topology to the endpoints. They | |
421 | * come in two flavors, root-level decoders, statically defined by platform | |
422 | * firmware, and mid-level decoders, where interleave-granularity, | |
423 | * interleave-width, and the target list are mutable. | |
424 | */ | |
425 | struct cxl_switch_decoder { | |
426 | struct cxl_decoder cxld; | |
be185c29 | 427 | int nr_targets; |
40ba17af DW |
428 | struct cxl_dport *target[]; |
429 | }; | |
430 | ||
f9db85bf AS |
431 | struct cxl_root_decoder; |
432 | typedef struct cxl_dport *(*cxl_calc_hb_fn)(struct cxl_root_decoder *cxlrd, | |
433 | int pos); | |
8fdcb170 | 434 | |
0f157c7f DW |
435 | /** |
436 | * struct cxl_root_decoder - Static platform CXL address decoder | |
437 | * @res: host / parent resource for region allocations | |
779dd20c | 438 | * @region_id: region id for next region provisioning event |
6aa41144 | 439 | * @calc_hb: which host bridge covers the n'th position by granularity |
f9db85bf | 440 | * @platform_data: platform specific configuration data |
a32320b7 | 441 | * @range_lock: sync region autodiscovery by address range |
529c0a44 | 442 | * @qos_class: QoS performance class cookie |
0f157c7f DW |
443 | * @cxlsd: base cxl switch decoder |
444 | */ | |
445 | struct cxl_root_decoder { | |
446 | struct resource *res; | |
779dd20c | 447 | atomic_t region_id; |
f9db85bf AS |
448 | cxl_calc_hb_fn calc_hb; |
449 | void *platform_data; | |
a32320b7 | 450 | struct mutex range_lock; |
529c0a44 | 451 | int qos_class; |
0f157c7f DW |
452 | struct cxl_switch_decoder cxlsd; |
453 | }; | |
454 | ||
dd5ba0eb BW |
455 | /* |
456 | * enum cxl_config_state - State machine for region configuration | |
457 | * @CXL_CONFIG_IDLE: Any sysfs attribute can be written freely | |
80d10a6c BW |
458 | * @CXL_CONFIG_INTERLEAVE_ACTIVE: region size has been set, no more |
459 | * changes to interleave_ways or interleave_granularity | |
dd5ba0eb BW |
460 | * @CXL_CONFIG_ACTIVE: All targets have been added the region is now |
461 | * active | |
176baefb DW |
462 | * @CXL_CONFIG_RESET_PENDING: see commit_store() |
463 | * @CXL_CONFIG_COMMIT: Soft-config has been committed to hardware | |
dd5ba0eb BW |
464 | */ |
465 | enum cxl_config_state { | |
466 | CXL_CONFIG_IDLE, | |
80d10a6c | 467 | CXL_CONFIG_INTERLEAVE_ACTIVE, |
dd5ba0eb | 468 | CXL_CONFIG_ACTIVE, |
176baefb DW |
469 | CXL_CONFIG_RESET_PENDING, |
470 | CXL_CONFIG_COMMIT, | |
dd5ba0eb BW |
471 | }; |
472 | ||
473 | /** | |
474 | * struct cxl_region_params - region settings | |
475 | * @state: allow the driver to lockdown further parameter changes | |
476 | * @uuid: unique id for persistent regions | |
80d10a6c BW |
477 | * @interleave_ways: number of endpoints in the region |
478 | * @interleave_granularity: capacity each endpoint contributes to a stripe | |
23a22cd1 | 479 | * @res: allocated iomem capacity for this region |
038e6eb8 BS |
480 | * @targets: active ordered targets in current decoder configuration |
481 | * @nr_targets: number of targets | |
dd5ba0eb BW |
482 | * |
483 | * State transitions are protected by the cxl_region_rwsem | |
484 | */ | |
485 | struct cxl_region_params { | |
486 | enum cxl_config_state state; | |
487 | uuid_t uuid; | |
80d10a6c BW |
488 | int interleave_ways; |
489 | int interleave_granularity; | |
23a22cd1 | 490 | struct resource *res; |
b9686e8c DW |
491 | struct cxl_endpoint_decoder *targets[CXL_DECODER_MAX_INTERLEAVE]; |
492 | int nr_targets; | |
dd5ba0eb BW |
493 | }; |
494 | ||
a32320b7 DW |
495 | /* |
496 | * Indicate whether this region has been assembled by autodetection or | |
497 | * userspace assembly. Prevent endpoint decoders outside of automatic | |
498 | * detection from being added to the region. | |
499 | */ | |
d1257d09 | 500 | #define CXL_REGION_F_AUTO 0 |
a32320b7 | 501 | |
2ab47045 DW |
502 | /* |
503 | * Require that a committed region successfully complete a teardown once | |
504 | * any of its associated decoders have been torn down. This maintains | |
505 | * the commit state for the region since there are committed decoders, | |
506 | * but blocks cxl_region_probe(). | |
507 | */ | |
508 | #define CXL_REGION_F_NEEDS_RESET 1 | |
a32320b7 | 509 | |
779dd20c BW |
510 | /** |
511 | * struct cxl_region - CXL region | |
512 | * @dev: This region's device | |
513 | * @id: This region's id. Id is globally unique across all regions | |
514 | * @mode: Endpoint decoder allocation / access mode | |
515 | * @type: Endpoint decoder target type | |
f17b558d DW |
516 | * @cxl_nvb: nvdimm bridge for coordinating @cxlr_pmem setup / shutdown |
517 | * @cxlr_pmem: (for pmem regions) cached copy of the nvdimm bridge | |
d18bc74a | 518 | * @flags: Region state flags |
dd5ba0eb | 519 | * @params: active + config params for the region |
779dd20c BW |
520 | */ |
521 | struct cxl_region { | |
522 | struct device dev; | |
523 | int id; | |
524 | enum cxl_decoder_mode mode; | |
525 | enum cxl_decoder_type type; | |
f17b558d DW |
526 | struct cxl_nvdimm_bridge *cxl_nvb; |
527 | struct cxl_pmem_region *cxlr_pmem; | |
d18bc74a | 528 | unsigned long flags; |
dd5ba0eb | 529 | struct cxl_region_params params; |
779dd20c BW |
530 | }; |
531 | ||
8fdcb170 | 532 | struct cxl_nvdimm_bridge { |
2e52b625 | 533 | int id; |
8fdcb170 DW |
534 | struct device dev; |
535 | struct cxl_port *port; | |
536 | struct nvdimm_bus *nvdimm_bus; | |
537 | struct nvdimm_bus_descriptor nd_desc; | |
8fdcb170 DW |
538 | }; |
539 | ||
b5807c80 DJ |
540 | #define CXL_DEV_ID_LEN 19 |
541 | ||
21083f51 DW |
542 | struct cxl_nvdimm { |
543 | struct device dev; | |
544 | struct cxl_memdev *cxlmd; | |
b5807c80 | 545 | u8 dev_id[CXL_DEV_ID_LEN]; /* for nvdimm, string of 'serial' */ |
04ad63f0 DW |
546 | }; |
547 | ||
548 | struct cxl_pmem_region_mapping { | |
549 | struct cxl_memdev *cxlmd; | |
550 | struct cxl_nvdimm *cxl_nvd; | |
551 | u64 start; | |
552 | u64 size; | |
553 | int position; | |
554 | }; | |
555 | ||
556 | struct cxl_pmem_region { | |
557 | struct device dev; | |
558 | struct cxl_region *cxlr; | |
559 | struct nd_region *nd_region; | |
04ad63f0 DW |
560 | struct range hpa_range; |
561 | int nr_mappings; | |
562 | struct cxl_pmem_region_mapping mapping[]; | |
21083f51 DW |
563 | }; |
564 | ||
09d09e04 DW |
565 | struct cxl_dax_region { |
566 | struct device dev; | |
567 | struct cxl_region *cxlr; | |
568 | struct range hpa_range; | |
569 | }; | |
570 | ||
4812be97 DW |
571 | /** |
572 | * struct cxl_port - logical collection of upstream port devices and | |
573 | * downstream port devices to construct a CXL memory | |
574 | * decode hierarchy. | |
575 | * @dev: this port's device | |
7481653d | 576 | * @uport_dev: PCI or platform device implementing the upstream port capability |
ee800010 | 577 | * @host_bridge: Shortcut to the platform attach point for this port |
4812be97 | 578 | * @id: id for port device-name |
7d4b5ca2 | 579 | * @dports: cxl_dport instances referenced by decoders |
2703c16c | 580 | * @endpoints: cxl_ep instances, endpoints that are a descendant of this port |
384e624b | 581 | * @regions: cxl_region_ref instances, regions mapped by this port |
1b58b4ca | 582 | * @parent_dport: dport that points to this port in the parent |
40ba17af | 583 | * @decoder_ida: allocator for decoder ids |
d8add492 | 584 | * @reg_map: component and ras register mapping parameters |
e4f6dfa9 | 585 | * @nr_dports: number of entries in @dports |
0c33b393 | 586 | * @hdm_end: track last allocated HDM decoder instance for allocation ordering |
176baefb | 587 | * @commit_end: cursor to track highest committed decoder for commit ordering |
2703c16c | 588 | * @dead: last ep has been removed, force port re-creation |
53fa1bff | 589 | * @depth: How deep this port is relative to the root. depth 0 is the root. |
c9700604 IW |
590 | * @cdat: Cached CDAT data |
591 | * @cdat_available: Should a CDAT attribute be available in sysfs | |
4d07a053 | 592 | * @pci_latency: Upstream latency in picoseconds |
4812be97 DW |
593 | */ |
594 | struct cxl_port { | |
595 | struct device dev; | |
7481653d | 596 | struct device *uport_dev; |
ee800010 | 597 | struct device *host_bridge; |
4812be97 | 598 | int id; |
39178585 | 599 | struct xarray dports; |
256d0e9e | 600 | struct xarray endpoints; |
384e624b | 601 | struct xarray regions; |
1b58b4ca | 602 | struct cxl_dport *parent_dport; |
40ba17af | 603 | struct ida decoder_ida; |
d8add492 | 604 | struct cxl_register_map reg_map; |
e4f6dfa9 | 605 | int nr_dports; |
0c33b393 | 606 | int hdm_end; |
176baefb | 607 | int commit_end; |
2703c16c | 608 | bool dead; |
53fa1bff | 609 | unsigned int depth; |
c9700604 IW |
610 | struct cxl_cdat { |
611 | void *table; | |
612 | size_t length; | |
613 | } cdat; | |
614 | bool cdat_available; | |
4d07a053 | 615 | long pci_latency; |
4812be97 DW |
616 | }; |
617 | ||
79081590 DJ |
618 | /** |
619 | * struct cxl_root - logical collection of root cxl_port items | |
620 | * | |
621 | * @port: cxl_port member | |
622 | * @ops: cxl root operations | |
623 | */ | |
624 | struct cxl_root { | |
625 | struct cxl_port port; | |
626 | const struct cxl_root_ops *ops; | |
627 | }; | |
628 | ||
629 | static inline struct cxl_root * | |
630 | to_cxl_root(const struct cxl_port *port) | |
631 | { | |
632 | return container_of(port, struct cxl_root, port); | |
633 | } | |
634 | ||
44cd71ef DJ |
635 | struct cxl_root_ops { |
636 | int (*qos_class)(struct cxl_root *cxl_root, | |
637 | struct access_coordinate *coord, int entries, | |
638 | int *qos_class); | |
639 | }; | |
640 | ||
39178585 DW |
641 | static inline struct cxl_dport * |
642 | cxl_find_dport_by_dev(struct cxl_port *port, const struct device *dport_dev) | |
643 | { | |
644 | return xa_load(&port->dports, (unsigned long)dport_dev); | |
645 | } | |
646 | ||
06193378 DW |
647 | struct cxl_rcrb_info { |
648 | resource_size_t base; | |
649 | u16 aer_cap; | |
650 | }; | |
651 | ||
7d4b5ca2 DW |
652 | /** |
653 | * struct cxl_dport - CXL downstream port | |
227db574 | 654 | * @dport_dev: PCI bridge or firmware device representing the downstream link |
d8add492 | 655 | * @reg_map: component and ras register mapping parameters |
7d4b5ca2 | 656 | * @port_id: unique hardware identifier for dport in decoder target list |
06193378 | 657 | * @rcrb: Data about the Root Complex Register Block layout |
d5b1a271 | 658 | * @rch: Indicate whether this dport was enumerated in RCH or VH mode |
7d4b5ca2 | 659 | * @port: reference to cxl_port that contains this downstream port |
6c5f3aac | 660 | * @regs: Dport parsed register blocks |
80aa780d | 661 | * @sw_coord: access coordinates (performance) for switch from CDAT |
1037b82f | 662 | * @hb_coord: access coordinates (performance) from ACPI generic port (host bridge) |
4d07a053 | 663 | * @link_latency: calculated PCIe downstream latency |
7d4b5ca2 DW |
664 | */ |
665 | struct cxl_dport { | |
227db574 | 666 | struct device *dport_dev; |
d8add492 | 667 | struct cxl_register_map reg_map; |
7d4b5ca2 | 668 | int port_id; |
06193378 | 669 | struct cxl_rcrb_info rcrb; |
d5b1a271 | 670 | bool rch; |
7d4b5ca2 | 671 | struct cxl_port *port; |
6c5f3aac | 672 | struct cxl_regs regs; |
80aa780d | 673 | struct access_coordinate sw_coord; |
1037b82f | 674 | struct access_coordinate hb_coord; |
4d07a053 | 675 | long link_latency; |
7d4b5ca2 DW |
676 | }; |
677 | ||
2703c16c DW |
678 | /** |
679 | * struct cxl_ep - track an endpoint's interest in a port | |
680 | * @ep: device that hosts a generic CXL endpoint (expander or accelerator) | |
de516b40 | 681 | * @dport: which dport routes to this endpoint on @port |
7f8faf96 DW |
682 | * @next: cxl switch port across the link attached to @dport NULL if |
683 | * attached to an endpoint | |
2703c16c DW |
684 | */ |
685 | struct cxl_ep { | |
686 | struct device *ep; | |
de516b40 | 687 | struct cxl_dport *dport; |
7f8faf96 | 688 | struct cxl_port *next; |
2703c16c DW |
689 | }; |
690 | ||
384e624b DW |
691 | /** |
692 | * struct cxl_region_ref - track a region's interest in a port | |
693 | * @port: point in topology to install this reference | |
694 | * @decoder: decoder assigned for @region in @port | |
695 | * @region: region for this reference | |
696 | * @endpoints: cxl_ep references for region members beneath @port | |
27b3f8d1 | 697 | * @nr_targets_set: track how many targets have been programmed during setup |
384e624b DW |
698 | * @nr_eps: number of endpoints beneath @port |
699 | * @nr_targets: number of distinct targets needed to reach @nr_eps | |
700 | */ | |
701 | struct cxl_region_ref { | |
702 | struct cxl_port *port; | |
703 | struct cxl_decoder *decoder; | |
704 | struct cxl_region *region; | |
705 | struct xarray endpoints; | |
27b3f8d1 | 706 | int nr_targets_set; |
384e624b DW |
707 | int nr_eps; |
708 | int nr_targets; | |
709 | }; | |
710 | ||
d54c1bbe BW |
711 | /* |
712 | * The platform firmware device hosting the root is also the top of the | |
713 | * CXL port topology. All other CXL ports have another CXL port as their | |
7481653d | 714 | * parent and their ->uport_dev / host device is out-of-line of the port |
d54c1bbe BW |
715 | * ancestry. |
716 | */ | |
717 | static inline bool is_cxl_root(struct cxl_port *port) | |
718 | { | |
7481653d | 719 | return port->uport_dev == port->dev.parent; |
d54c1bbe BW |
720 | } |
721 | ||
458ba818 | 722 | int cxl_num_decoders_committed(struct cxl_port *port); |
2a81ada3 GKH |
723 | bool is_cxl_port(const struct device *dev); |
724 | struct cxl_port *to_cxl_port(const struct device *dev); | |
98d2d3a2 | 725 | struct pci_bus; |
7481653d | 726 | int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev, |
5ff7316f DW |
727 | struct pci_bus *bus); |
728 | struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port); | |
7481653d DW |
729 | struct cxl_port *devm_cxl_add_port(struct device *host, |
730 | struct device *uport_dev, | |
4812be97 | 731 | resource_size_t component_reg_phys, |
1b58b4ca | 732 | struct cxl_dport *parent_dport); |
79081590 DJ |
733 | struct cxl_root *devm_cxl_add_root(struct device *host, |
734 | const struct cxl_root_ops *ops); | |
44cd71ef | 735 | struct cxl_root *find_cxl_root(struct cxl_port *port); |
98856b2e DJ |
736 | void put_cxl_root(struct cxl_root *cxl_root); |
737 | DEFINE_FREE(put_cxl_root, struct cxl_root *, if (_T) put_cxl_root(_T)) | |
738 | ||
2703c16c | 739 | int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd); |
4029c32f DW |
740 | void cxl_bus_rescan(void); |
741 | void cxl_bus_drain(void); | |
733b57f2 RR |
742 | struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev, |
743 | struct cxl_dport **dport); | |
1b58b4ca DW |
744 | struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd, |
745 | struct cxl_dport **dport); | |
8dd2bc0f | 746 | bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd); |
2703c16c | 747 | |
664bf115 | 748 | struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, |
98d2d3a2 DW |
749 | struct device *dport, int port_id, |
750 | resource_size_t component_reg_phys); | |
d5b1a271 RR |
751 | struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port, |
752 | struct device *dport_dev, int port_id, | |
d5b1a271 | 753 | resource_size_t rcrb); |
2703c16c | 754 | |
f05fd10d RR |
755 | #ifdef CONFIG_PCIEAER_CXL |
756 | void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport); | |
757 | #else | |
758 | static inline void cxl_setup_parent_dport(struct device *host, | |
759 | struct cxl_dport *dport) { } | |
760 | #endif | |
761 | ||
40ba17af | 762 | struct cxl_decoder *to_cxl_decoder(struct device *dev); |
0f157c7f | 763 | struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev); |
3d8f7cca | 764 | struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev); |
3bf65915 | 765 | struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev); |
8fdcb170 | 766 | bool is_root_decoder(struct device *dev); |
3d8f7cca | 767 | bool is_switch_decoder(struct device *dev); |
8ae3cebc | 768 | bool is_endpoint_decoder(struct device *dev); |
0f157c7f | 769 | struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, |
f9db85bf AS |
770 | unsigned int nr_targets, |
771 | cxl_calc_hb_fn calc_hb); | |
772 | struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos); | |
e636479e DW |
773 | struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, |
774 | unsigned int nr_targets); | |
48667f67 | 775 | int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map); |
3bf65915 | 776 | struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port); |
d17d0540 | 777 | int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map); |
48667f67 | 778 | int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld); |
8dd2bc0f BW |
779 | int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint); |
780 | ||
59c3368b DJ |
781 | /** |
782 | * struct cxl_endpoint_dvsec_info - Cached DVSEC info | |
b70c2cf9 | 783 | * @mem_enabled: cached value of mem_enabled in the DVSEC at init time |
59c3368b | 784 | * @ranges: Number of active HDM ranges this device uses. |
b70c2cf9 | 785 | * @port: endpoint port associated with this info instance |
59c3368b DJ |
786 | * @dvsec_range: cached attributes of the ranges in the DVSEC, PCIE_DEVICE |
787 | */ | |
788 | struct cxl_endpoint_dvsec_info { | |
789 | bool mem_enabled; | |
790 | int ranges; | |
b70c2cf9 | 791 | struct cxl_port *port; |
59c3368b DJ |
792 | struct range dvsec_range[2]; |
793 | }; | |
794 | ||
d17d0540 | 795 | struct cxl_hdm; |
4474ce56 DJ |
796 | struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, |
797 | struct cxl_endpoint_dvsec_info *info); | |
b777e9be DJ |
798 | int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, |
799 | struct cxl_endpoint_dvsec_info *info); | |
664bf115 | 800 | int devm_cxl_add_passthrough_decoder(struct cxl_port *port); |
59c3368b DJ |
801 | int cxl_dvsec_rr_decode(struct device *dev, int dvsec, |
802 | struct cxl_endpoint_dvsec_info *info); | |
40ba17af | 803 | |
779dd20c BW |
804 | bool is_cxl_region(struct device *dev); |
805 | ||
b39cb105 | 806 | extern struct bus_type cxl_bus_type; |
6af7139c DW |
807 | |
808 | struct cxl_driver { | |
809 | const char *name; | |
810 | int (*probe)(struct device *dev); | |
811 | void (*remove)(struct device *dev); | |
812 | struct device_driver drv; | |
813 | int id; | |
814 | }; | |
815 | ||
816 | static inline struct cxl_driver *to_cxl_drv(struct device_driver *drv) | |
817 | { | |
818 | return container_of(drv, struct cxl_driver, drv); | |
819 | } | |
820 | ||
821 | int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner, | |
822 | const char *modname); | |
823 | #define cxl_driver_register(x) __cxl_driver_register(x, THIS_MODULE, KBUILD_MODNAME) | |
824 | void cxl_driver_unregister(struct cxl_driver *cxl_drv); | |
825 | ||
c57cae78 BW |
826 | #define module_cxl_driver(__cxl_driver) \ |
827 | module_driver(__cxl_driver, cxl_driver_register, cxl_driver_unregister) | |
828 | ||
21083f51 DW |
829 | #define CXL_DEVICE_NVDIMM_BRIDGE 1 |
830 | #define CXL_DEVICE_NVDIMM 2 | |
54cdbf84 BW |
831 | #define CXL_DEVICE_PORT 3 |
832 | #define CXL_DEVICE_ROOT 4 | |
8dd2bc0f | 833 | #define CXL_DEVICE_MEMORY_EXPANDER 5 |
8d48817d | 834 | #define CXL_DEVICE_REGION 6 |
04ad63f0 | 835 | #define CXL_DEVICE_PMEM_REGION 7 |
09d09e04 | 836 | #define CXL_DEVICE_DAX_REGION 8 |
1ad3f701 | 837 | #define CXL_DEVICE_PMU 9 |
8fdcb170 | 838 | |
6af7139c DW |
839 | #define MODULE_ALIAS_CXL(type) MODULE_ALIAS("cxl:t" __stringify(type) "*") |
840 | #define CXL_MODALIAS_FMT "cxl:t%d" | |
841 | ||
8fdcb170 DW |
842 | struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev); |
843 | struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, | |
844 | struct cxl_port *port); | |
21083f51 DW |
845 | struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev); |
846 | bool is_cxl_nvdimm(struct device *dev); | |
53989fad | 847 | bool is_cxl_nvdimm_bridge(struct device *dev); |
f17b558d | 848 | int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd); |
d35b495d | 849 | struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd); |
04ad63f0 DW |
850 | |
851 | #ifdef CONFIG_CXL_REGION | |
852 | bool is_cxl_pmem_region(struct device *dev); | |
853 | struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev); | |
a32320b7 DW |
854 | int cxl_add_to_region(struct cxl_port *root, |
855 | struct cxl_endpoint_decoder *cxled); | |
09d09e04 | 856 | struct cxl_dax_region *to_cxl_dax_region(struct device *dev); |
04ad63f0 DW |
857 | #else |
858 | static inline bool is_cxl_pmem_region(struct device *dev) | |
859 | { | |
860 | return false; | |
861 | } | |
862 | static inline struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev) | |
863 | { | |
864 | return NULL; | |
865 | } | |
a32320b7 DW |
866 | static inline int cxl_add_to_region(struct cxl_port *root, |
867 | struct cxl_endpoint_decoder *cxled) | |
868 | { | |
869 | return 0; | |
870 | } | |
09d09e04 DW |
871 | static inline struct cxl_dax_region *to_cxl_dax_region(struct device *dev) |
872 | { | |
873 | return NULL; | |
874 | } | |
04ad63f0 | 875 | #endif |
67dcdd4d | 876 | |
ad6f04c0 | 877 | void cxl_endpoint_parse_cdat(struct cxl_port *port); |
80aa780d | 878 | void cxl_switch_parse_cdat(struct cxl_port *port); |
ad6f04c0 | 879 | |
14a6960b DJ |
880 | int cxl_endpoint_get_perf_coordinates(struct cxl_port *port, |
881 | struct access_coordinate *coord); | |
882 | ||
67dcdd4d DW |
883 | /* |
884 | * Unit test builds overrides this to __weak, find the 'strong' version | |
885 | * of these symbols in tools/testing/cxl/. | |
886 | */ | |
887 | #ifndef __mock | |
888 | #define __mock static | |
889 | #endif | |
3c5b9039 | 890 | |
8adaf747 | 891 | #endif /* __CXL_H__ */ |