Commit | Line | Data |
---|---|---|
8adaf747 BW |
1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* Copyright(c) 2020 Intel Corporation. */ | |
3 | ||
4 | #ifndef __CXL_H__ | |
5 | #define __CXL_H__ | |
6 | ||
8fdcb170 | 7 | #include <linux/libnvdimm.h> |
8adaf747 BW |
8 | #include <linux/bitfield.h> |
9 | #include <linux/bitops.h> | |
80d10a6c | 10 | #include <linux/log2.h> |
8adaf747 BW |
11 | #include <linux/io.h> |
12 | ||
4812be97 DW |
13 | /** |
14 | * DOC: cxl objects | |
15 | * | |
16 | * The CXL core objects like ports, decoders, and regions are shared | |
17 | * between the subsystem drivers cxl_acpi, cxl_pci, and core drivers | |
18 | * (port-driver, region-driver, nvdimm object-drivers... etc). | |
19 | */ | |
20 | ||
d17d0540 DW |
21 | /* CXL 2.0 8.2.4 CXL Component Register Layout and Definition */ |
22 | #define CXL_COMPONENT_REG_BLOCK_SIZE SZ_64K | |
23 | ||
08422378 BW |
24 | /* CXL 2.0 8.2.5 CXL.cache and CXL.mem Registers*/ |
25 | #define CXL_CM_OFFSET 0x1000 | |
26 | #define CXL_CM_CAP_HDR_OFFSET 0x0 | |
27 | #define CXL_CM_CAP_HDR_ID_MASK GENMASK(15, 0) | |
28 | #define CM_CAP_HDR_CAP_ID 1 | |
29 | #define CXL_CM_CAP_HDR_VERSION_MASK GENMASK(19, 16) | |
30 | #define CM_CAP_HDR_CAP_VERSION 1 | |
31 | #define CXL_CM_CAP_HDR_CACHE_MEM_VERSION_MASK GENMASK(23, 20) | |
32 | #define CM_CAP_HDR_CACHE_MEM_VERSION 1 | |
33 | #define CXL_CM_CAP_HDR_ARRAY_SIZE_MASK GENMASK(31, 24) | |
34 | #define CXL_CM_CAP_PTR_MASK GENMASK(31, 20) | |
35 | ||
bd09626b | 36 | #define CXL_CM_CAP_CAP_ID_RAS 0x2 |
08422378 BW |
37 | #define CXL_CM_CAP_CAP_ID_HDM 0x5 |
38 | #define CXL_CM_CAP_CAP_HDM_VERSION 1 | |
39 | ||
40 | /* HDM decoders CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure */ | |
41 | #define CXL_HDM_DECODER_CAP_OFFSET 0x0 | |
42 | #define CXL_HDM_DECODER_COUNT_MASK GENMASK(3, 0) | |
43 | #define CXL_HDM_DECODER_TARGET_COUNT_MASK GENMASK(7, 4) | |
d17d0540 DW |
44 | #define CXL_HDM_DECODER_INTERLEAVE_11_8 BIT(8) |
45 | #define CXL_HDM_DECODER_INTERLEAVE_14_12 BIT(9) | |
46 | #define CXL_HDM_DECODER_CTRL_OFFSET 0x4 | |
47 | #define CXL_HDM_DECODER_ENABLE BIT(1) | |
48 | #define CXL_HDM_DECODER0_BASE_LOW_OFFSET(i) (0x20 * (i) + 0x10) | |
49 | #define CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i) (0x20 * (i) + 0x14) | |
50 | #define CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i) (0x20 * (i) + 0x18) | |
51 | #define CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i) (0x20 * (i) + 0x1c) | |
52 | #define CXL_HDM_DECODER0_CTRL_OFFSET(i) (0x20 * (i) + 0x20) | |
53 | #define CXL_HDM_DECODER0_CTRL_IG_MASK GENMASK(3, 0) | |
54 | #define CXL_HDM_DECODER0_CTRL_IW_MASK GENMASK(7, 4) | |
55 | #define CXL_HDM_DECODER0_CTRL_LOCK BIT(8) | |
56 | #define CXL_HDM_DECODER0_CTRL_COMMIT BIT(9) | |
57 | #define CXL_HDM_DECODER0_CTRL_COMMITTED BIT(10) | |
176baefb | 58 | #define CXL_HDM_DECODER0_CTRL_COMMIT_ERROR BIT(11) |
d17d0540 DW |
59 | #define CXL_HDM_DECODER0_CTRL_TYPE BIT(12) |
60 | #define CXL_HDM_DECODER0_TL_LOW(i) (0x20 * (i) + 0x24) | |
61 | #define CXL_HDM_DECODER0_TL_HIGH(i) (0x20 * (i) + 0x28) | |
9c57cde0 DW |
62 | #define CXL_HDM_DECODER0_SKIP_LOW(i) CXL_HDM_DECODER0_TL_LOW(i) |
63 | #define CXL_HDM_DECODER0_SKIP_HIGH(i) CXL_HDM_DECODER0_TL_HIGH(i) | |
08422378 | 64 | |
3b39fd6c AM |
65 | /* HDM decoder control register constants CXL 3.0 8.2.5.19.7 */ |
66 | #define CXL_DECODER_MIN_GRANULARITY 256 | |
67 | #define CXL_DECODER_MAX_ENCODED_IG 6 | |
68 | ||
6423035f BW |
69 | static inline int cxl_hdm_decoder_count(u32 cap_hdr) |
70 | { | |
71 | int val = FIELD_GET(CXL_HDM_DECODER_COUNT_MASK, cap_hdr); | |
72 | ||
73 | return val ? val * 2 : 1; | |
74 | } | |
75 | ||
419af595 | 76 | /* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */ |
83351ddb | 77 | static inline int eig_to_granularity(u16 eig, unsigned int *granularity) |
419af595 | 78 | { |
83351ddb | 79 | if (eig > CXL_DECODER_MAX_ENCODED_IG) |
419af595 | 80 | return -EINVAL; |
83351ddb | 81 | *granularity = CXL_DECODER_MIN_GRANULARITY << eig; |
419af595 DW |
82 | return 0; |
83 | } | |
84 | ||
85 | /* Encode defined in CXL ECN "3, 6, 12 and 16-way memory Interleaving" */ | |
c99b2e8c | 86 | static inline int eiw_to_ways(u8 eiw, unsigned int *ways) |
419af595 | 87 | { |
c99b2e8c | 88 | switch (eiw) { |
419af595 | 89 | case 0 ... 4: |
c99b2e8c | 90 | *ways = 1 << eiw; |
419af595 DW |
91 | break; |
92 | case 8 ... 10: | |
c99b2e8c | 93 | *ways = 3 << (eiw - 8); |
419af595 DW |
94 | break; |
95 | default: | |
96 | return -EINVAL; | |
97 | } | |
98 | ||
99 | return 0; | |
100 | } | |
101 | ||
83351ddb | 102 | static inline int granularity_to_eig(int granularity, u16 *eig) |
80d10a6c | 103 | { |
83351ddb DJ |
104 | if (granularity > SZ_16K || granularity < CXL_DECODER_MIN_GRANULARITY || |
105 | !is_power_of_2(granularity)) | |
80d10a6c | 106 | return -EINVAL; |
83351ddb | 107 | *eig = ilog2(granularity) - 8; |
80d10a6c BW |
108 | return 0; |
109 | } | |
110 | ||
c99b2e8c | 111 | static inline int ways_to_eiw(unsigned int ways, u8 *eiw) |
80d10a6c BW |
112 | { |
113 | if (ways > 16) | |
114 | return -EINVAL; | |
115 | if (is_power_of_2(ways)) { | |
c99b2e8c | 116 | *eiw = ilog2(ways); |
80d10a6c BW |
117 | return 0; |
118 | } | |
119 | if (ways % 3) | |
120 | return -EINVAL; | |
121 | ways /= 3; | |
122 | if (!is_power_of_2(ways)) | |
123 | return -EINVAL; | |
c99b2e8c | 124 | *eiw = ilog2(ways) + 8; |
80d10a6c BW |
125 | return 0; |
126 | } | |
127 | ||
bd09626b DW |
128 | /* RAS Registers CXL 2.0 8.2.5.9 CXL RAS Capability Structure */ |
129 | #define CXL_RAS_UNCORRECTABLE_STATUS_OFFSET 0x0 | |
130 | #define CXL_RAS_UNCORRECTABLE_STATUS_MASK (GENMASK(16, 14) | GENMASK(11, 0)) | |
131 | #define CXL_RAS_UNCORRECTABLE_MASK_OFFSET 0x4 | |
132 | #define CXL_RAS_UNCORRECTABLE_MASK_MASK (GENMASK(16, 14) | GENMASK(11, 0)) | |
248529ed | 133 | #define CXL_RAS_UNCORRECTABLE_MASK_F256B_MASK BIT(8) |
bd09626b DW |
134 | #define CXL_RAS_UNCORRECTABLE_SEVERITY_OFFSET 0x8 |
135 | #define CXL_RAS_UNCORRECTABLE_SEVERITY_MASK (GENMASK(16, 14) | GENMASK(11, 0)) | |
136 | #define CXL_RAS_CORRECTABLE_STATUS_OFFSET 0xC | |
137 | #define CXL_RAS_CORRECTABLE_STATUS_MASK GENMASK(6, 0) | |
138 | #define CXL_RAS_CORRECTABLE_MASK_OFFSET 0x10 | |
139 | #define CXL_RAS_CORRECTABLE_MASK_MASK GENMASK(6, 0) | |
140 | #define CXL_RAS_CAP_CONTROL_OFFSET 0x14 | |
2905cb52 | 141 | #define CXL_RAS_CAP_CONTROL_FE_MASK GENMASK(5, 0) |
bd09626b DW |
142 | #define CXL_RAS_HEADER_LOG_OFFSET 0x18 |
143 | #define CXL_RAS_CAPABILITY_LENGTH 0x58 | |
4a20bc3e DW |
144 | #define CXL_HEADERLOG_SIZE SZ_512 |
145 | #define CXL_HEADERLOG_SIZE_U32 SZ_512 / sizeof(u32) | |
bd09626b | 146 | |
8adaf747 BW |
147 | /* CXL 2.0 8.2.8.1 Device Capabilities Array Register */ |
148 | #define CXLDEV_CAP_ARRAY_OFFSET 0x0 | |
149 | #define CXLDEV_CAP_ARRAY_CAP_ID 0 | |
150 | #define CXLDEV_CAP_ARRAY_ID_MASK GENMASK_ULL(15, 0) | |
151 | #define CXLDEV_CAP_ARRAY_COUNT_MASK GENMASK_ULL(47, 32) | |
152 | /* CXL 2.0 8.2.8.2 CXL Device Capability Header Register */ | |
153 | #define CXLDEV_CAP_HDR_CAP_ID_MASK GENMASK(15, 0) | |
154 | /* CXL 2.0 8.2.8.2.1 CXL Device Capabilities */ | |
155 | #define CXLDEV_CAP_CAP_ID_DEVICE_STATUS 0x1 | |
156 | #define CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX 0x2 | |
157 | #define CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX 0x3 | |
158 | #define CXLDEV_CAP_CAP_ID_MEMDEV 0x4000 | |
159 | ||
6ebe28f9 IW |
160 | /* CXL 3.0 8.2.8.3.1 Event Status Register */ |
161 | #define CXLDEV_DEV_EVENT_STATUS_OFFSET 0x00 | |
162 | #define CXLDEV_EVENT_STATUS_INFO BIT(0) | |
163 | #define CXLDEV_EVENT_STATUS_WARN BIT(1) | |
164 | #define CXLDEV_EVENT_STATUS_FAIL BIT(2) | |
165 | #define CXLDEV_EVENT_STATUS_FATAL BIT(3) | |
166 | ||
167 | #define CXLDEV_EVENT_STATUS_ALL (CXLDEV_EVENT_STATUS_INFO | \ | |
168 | CXLDEV_EVENT_STATUS_WARN | \ | |
169 | CXLDEV_EVENT_STATUS_FAIL | \ | |
170 | CXLDEV_EVENT_STATUS_FATAL) | |
171 | ||
a49aa814 DB |
172 | /* CXL rev 3.0 section 8.2.9.2.4; Table 8-52 */ |
173 | #define CXLDEV_EVENT_INT_MODE_MASK GENMASK(1, 0) | |
174 | #define CXLDEV_EVENT_INT_MSGNUM_MASK GENMASK(7, 4) | |
175 | ||
8adaf747 BW |
176 | /* CXL 2.0 8.2.8.4 Mailbox Registers */ |
177 | #define CXLDEV_MBOX_CAPS_OFFSET 0x00 | |
178 | #define CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK GENMASK(4, 0) | |
179 | #define CXLDEV_MBOX_CTRL_OFFSET 0x04 | |
180 | #define CXLDEV_MBOX_CTRL_DOORBELL BIT(0) | |
181 | #define CXLDEV_MBOX_CMD_OFFSET 0x08 | |
182 | #define CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0) | |
183 | #define CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK GENMASK_ULL(36, 16) | |
184 | #define CXLDEV_MBOX_STATUS_OFFSET 0x10 | |
185 | #define CXLDEV_MBOX_STATUS_RET_CODE_MASK GENMASK_ULL(47, 32) | |
186 | #define CXLDEV_MBOX_BG_CMD_STATUS_OFFSET 0x18 | |
187 | #define CXLDEV_MBOX_PAYLOAD_OFFSET 0x20 | |
188 | ||
8ac75dd6 | 189 | /* |
301e68dd KC |
190 | * Using struct_group() allows for per register-block-type helper routines, |
191 | * without requiring block-type agnostic code to include the prefix. | |
8ac75dd6 DW |
192 | */ |
193 | struct cxl_regs { | |
301e68dd KC |
194 | /* |
195 | * Common set of CXL Component register block base pointers | |
196 | * @hdm_decoder: CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure | |
bd09626b | 197 | * @ras: CXL 2.0 8.2.5.9 CXL RAS Capability Structure |
301e68dd KC |
198 | */ |
199 | struct_group_tagged(cxl_component_regs, component, | |
200 | void __iomem *hdm_decoder; | |
bd09626b | 201 | void __iomem *ras; |
301e68dd KC |
202 | ); |
203 | /* | |
204 | * Common set of CXL Device register block base pointers | |
205 | * @status: CXL 2.0 8.2.8.3 Device Status Registers | |
206 | * @mbox: CXL 2.0 8.2.8.4 Mailbox Registers | |
207 | * @memdev: CXL 2.0 8.2.8.5 Memory Device Registers | |
208 | */ | |
209 | struct_group_tagged(cxl_device_regs, device_regs, | |
210 | void __iomem *status, *mbox, *memdev; | |
211 | ); | |
8ac75dd6 DW |
212 | }; |
213 | ||
30af9729 IW |
214 | struct cxl_reg_map { |
215 | bool valid; | |
a1554e9c | 216 | int id; |
30af9729 IW |
217 | unsigned long offset; |
218 | unsigned long size; | |
219 | }; | |
220 | ||
08422378 BW |
221 | struct cxl_component_reg_map { |
222 | struct cxl_reg_map hdm_decoder; | |
bd09626b | 223 | struct cxl_reg_map ras; |
08422378 BW |
224 | }; |
225 | ||
30af9729 IW |
226 | struct cxl_device_reg_map { |
227 | struct cxl_reg_map status; | |
228 | struct cxl_reg_map mbox; | |
229 | struct cxl_reg_map memdev; | |
230 | }; | |
231 | ||
a261e9a1 DW |
232 | /** |
233 | * struct cxl_register_map - DVSEC harvested register block mapping parameters | |
234 | * @base: virtual base of the register-block-BAR + @block_offset | |
6c7f4f1e DW |
235 | * @resource: physical resource base of the register block |
236 | * @max_size: maximum mapping size to perform register search | |
a261e9a1 | 237 | * @reg_type: see enum cxl_regloc_type |
a261e9a1 DW |
238 | * @component_map: cxl_reg_map for component registers |
239 | * @device_map: cxl_reg_maps for device registers | |
240 | */ | |
30af9729 | 241 | struct cxl_register_map { |
a261e9a1 | 242 | void __iomem *base; |
6c7f4f1e DW |
243 | resource_size_t resource; |
244 | resource_size_t max_size; | |
30af9729 | 245 | u8 reg_type; |
30af9729 | 246 | union { |
08422378 | 247 | struct cxl_component_reg_map component_map; |
30af9729 IW |
248 | struct cxl_device_reg_map device_map; |
249 | }; | |
250 | }; | |
251 | ||
08422378 BW |
252 | void cxl_probe_component_regs(struct device *dev, void __iomem *base, |
253 | struct cxl_component_reg_map *map); | |
30af9729 IW |
254 | void cxl_probe_device_regs(struct device *dev, void __iomem *base, |
255 | struct cxl_device_reg_map *map); | |
6c7f4f1e | 256 | int cxl_map_component_regs(struct device *dev, struct cxl_component_regs *regs, |
a1554e9c DW |
257 | struct cxl_register_map *map, |
258 | unsigned long map_mask); | |
6c7f4f1e | 259 | int cxl_map_device_regs(struct device *dev, struct cxl_device_regs *regs, |
30af9729 | 260 | struct cxl_register_map *map); |
399d34eb | 261 | |
303ebc1b BW |
262 | enum cxl_regloc_type; |
263 | int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type, | |
264 | struct cxl_register_map *map); | |
265 | ||
d5b1a271 RR |
266 | enum cxl_rcrb { |
267 | CXL_RCRB_DOWNSTREAM, | |
268 | CXL_RCRB_UPSTREAM, | |
269 | }; | |
270 | resource_size_t cxl_rcrb_to_component(struct device *dev, | |
271 | resource_size_t rcrb, | |
272 | enum cxl_rcrb which); | |
273 | ||
4812be97 | 274 | #define CXL_RESOURCE_NONE ((resource_size_t) -1) |
7d4b5ca2 | 275 | #define CXL_TARGET_STRLEN 20 |
4812be97 | 276 | |
40ba17af DW |
277 | /* |
278 | * cxl_decoder flags that define the type of memory / devices this | |
279 | * decoder supports as well as configuration lock status See "CXL 2.0 | |
280 | * 8.2.5.12.7 CXL HDM Decoder 0 Control Register" for details. | |
a32320b7 DW |
281 | * Additionally indicate whether decoder settings were autodetected, |
282 | * user customized. | |
40ba17af DW |
283 | */ |
284 | #define CXL_DECODER_F_RAM BIT(0) | |
285 | #define CXL_DECODER_F_PMEM BIT(1) | |
286 | #define CXL_DECODER_F_TYPE2 BIT(2) | |
287 | #define CXL_DECODER_F_TYPE3 BIT(3) | |
288 | #define CXL_DECODER_F_LOCK BIT(4) | |
d17d0540 DW |
289 | #define CXL_DECODER_F_ENABLE BIT(5) |
290 | #define CXL_DECODER_F_MASK GENMASK(5, 0) | |
40ba17af DW |
291 | |
292 | enum cxl_decoder_type { | |
293 | CXL_DECODER_ACCELERATOR = 2, | |
294 | CXL_DECODER_EXPANDER = 3, | |
295 | }; | |
296 | ||
a5c25802 DW |
297 | /* |
298 | * Current specification goes up to 8, double that seems a reasonable | |
299 | * software max for the foreseeable future | |
300 | */ | |
301 | #define CXL_DECODER_MAX_INTERLEAVE 16 | |
302 | ||
e7748305 | 303 | |
40ba17af | 304 | /** |
e636479e | 305 | * struct cxl_decoder - Common CXL HDM Decoder Attributes |
40ba17af DW |
306 | * @dev: this decoder's device |
307 | * @id: kernel device name id | |
e8b7ea58 | 308 | * @hpa_range: Host physical address range mapped by this decoder |
40ba17af DW |
309 | * @interleave_ways: number of cxl_dports in this decode |
310 | * @interleave_granularity: data stride per dport | |
311 | * @target_type: accelerator vs expander (type2 vs type3) selector | |
b9686e8c | 312 | * @region: currently assigned region for this decoder |
40ba17af | 313 | * @flags: memory type capabilities and locking |
176baefb DW |
314 | * @commit: device/decoder-type specific callback to commit settings to hw |
315 | * @reset: device/decoder-type specific callback to reset hw settings | |
316 | */ | |
40ba17af DW |
317 | struct cxl_decoder { |
318 | struct device dev; | |
319 | int id; | |
e50fe01e | 320 | struct range hpa_range; |
40ba17af DW |
321 | int interleave_ways; |
322 | int interleave_granularity; | |
323 | enum cxl_decoder_type target_type; | |
b9686e8c | 324 | struct cxl_region *region; |
40ba17af | 325 | unsigned long flags; |
176baefb DW |
326 | int (*commit)(struct cxl_decoder *cxld); |
327 | int (*reset)(struct cxl_decoder *cxld); | |
e636479e DW |
328 | }; |
329 | ||
b9686e8c DW |
330 | /* |
331 | * CXL_DECODER_DEAD prevents endpoints from being reattached to regions | |
332 | * while cxld_unregister() is running | |
333 | */ | |
2c866903 DW |
334 | enum cxl_decoder_mode { |
335 | CXL_DECODER_NONE, | |
336 | CXL_DECODER_RAM, | |
337 | CXL_DECODER_PMEM, | |
338 | CXL_DECODER_MIXED, | |
b9686e8c | 339 | CXL_DECODER_DEAD, |
2c866903 DW |
340 | }; |
341 | ||
7d505f98 DW |
342 | static inline const char *cxl_decoder_mode_name(enum cxl_decoder_mode mode) |
343 | { | |
344 | static const char * const names[] = { | |
345 | [CXL_DECODER_NONE] = "none", | |
346 | [CXL_DECODER_RAM] = "ram", | |
347 | [CXL_DECODER_PMEM] = "pmem", | |
348 | [CXL_DECODER_MIXED] = "mixed", | |
349 | }; | |
350 | ||
351 | if (mode >= CXL_DECODER_NONE && mode <= CXL_DECODER_MIXED) | |
352 | return names[mode]; | |
353 | return "mixed"; | |
354 | } | |
355 | ||
a32320b7 DW |
356 | /* |
357 | * Track whether this decoder is reserved for region autodiscovery, or | |
358 | * free for userspace provisioning. | |
359 | */ | |
360 | enum cxl_decoder_state { | |
361 | CXL_DECODER_STATE_MANUAL, | |
362 | CXL_DECODER_STATE_AUTO, | |
363 | }; | |
364 | ||
3bf65915 DW |
365 | /** |
366 | * struct cxl_endpoint_decoder - Endpoint / SPA to DPA decoder | |
367 | * @cxld: base cxl_decoder_object | |
368 | * @dpa_res: actively claimed DPA span of this decoder | |
369 | * @skip: offset into @dpa_res where @cxld.hpa_range maps | |
2c866903 | 370 | * @mode: which memory type / access-mode-partition this decoder targets |
a32320b7 | 371 | * @state: autodiscovery state |
b9686e8c | 372 | * @pos: interleave position in @cxld.region |
3bf65915 DW |
373 | */ |
374 | struct cxl_endpoint_decoder { | |
375 | struct cxl_decoder cxld; | |
376 | struct resource *dpa_res; | |
377 | resource_size_t skip; | |
2c866903 | 378 | enum cxl_decoder_mode mode; |
a32320b7 | 379 | enum cxl_decoder_state state; |
b9686e8c | 380 | int pos; |
3bf65915 DW |
381 | }; |
382 | ||
e636479e DW |
383 | /** |
384 | * struct cxl_switch_decoder - Switch specific CXL HDM Decoder | |
385 | * @cxld: base cxl_decoder object | |
386 | * @target_lock: coordinate coherent reads of the target list | |
387 | * @nr_targets: number of elements in @target | |
388 | * @target: active ordered target list in current decoder configuration | |
389 | * | |
390 | * The 'switch' decoder type represents the decoder instances of cxl_port's that | |
391 | * route from the root of a CXL memory decode topology to the endpoints. They | |
392 | * come in two flavors, root-level decoders, statically defined by platform | |
393 | * firmware, and mid-level decoders, where interleave-granularity, | |
394 | * interleave-width, and the target list are mutable. | |
395 | */ | |
396 | struct cxl_switch_decoder { | |
397 | struct cxl_decoder cxld; | |
86c8ea0f | 398 | seqlock_t target_lock; |
be185c29 | 399 | int nr_targets; |
40ba17af DW |
400 | struct cxl_dport *target[]; |
401 | }; | |
402 | ||
f9db85bf AS |
403 | struct cxl_root_decoder; |
404 | typedef struct cxl_dport *(*cxl_calc_hb_fn)(struct cxl_root_decoder *cxlrd, | |
405 | int pos); | |
8fdcb170 | 406 | |
0f157c7f DW |
407 | /** |
408 | * struct cxl_root_decoder - Static platform CXL address decoder | |
409 | * @res: host / parent resource for region allocations | |
779dd20c | 410 | * @region_id: region id for next region provisioning event |
6aa41144 | 411 | * @calc_hb: which host bridge covers the n'th position by granularity |
f9db85bf | 412 | * @platform_data: platform specific configuration data |
a32320b7 | 413 | * @range_lock: sync region autodiscovery by address range |
0f157c7f DW |
414 | * @cxlsd: base cxl switch decoder |
415 | */ | |
416 | struct cxl_root_decoder { | |
417 | struct resource *res; | |
779dd20c | 418 | atomic_t region_id; |
f9db85bf AS |
419 | cxl_calc_hb_fn calc_hb; |
420 | void *platform_data; | |
a32320b7 | 421 | struct mutex range_lock; |
0f157c7f DW |
422 | struct cxl_switch_decoder cxlsd; |
423 | }; | |
424 | ||
dd5ba0eb BW |
425 | /* |
426 | * enum cxl_config_state - State machine for region configuration | |
427 | * @CXL_CONFIG_IDLE: Any sysfs attribute can be written freely | |
80d10a6c BW |
428 | * @CXL_CONFIG_INTERLEAVE_ACTIVE: region size has been set, no more |
429 | * changes to interleave_ways or interleave_granularity | |
dd5ba0eb BW |
430 | * @CXL_CONFIG_ACTIVE: All targets have been added the region is now |
431 | * active | |
176baefb DW |
432 | * @CXL_CONFIG_RESET_PENDING: see commit_store() |
433 | * @CXL_CONFIG_COMMIT: Soft-config has been committed to hardware | |
dd5ba0eb BW |
434 | */ |
435 | enum cxl_config_state { | |
436 | CXL_CONFIG_IDLE, | |
80d10a6c | 437 | CXL_CONFIG_INTERLEAVE_ACTIVE, |
dd5ba0eb | 438 | CXL_CONFIG_ACTIVE, |
176baefb DW |
439 | CXL_CONFIG_RESET_PENDING, |
440 | CXL_CONFIG_COMMIT, | |
dd5ba0eb BW |
441 | }; |
442 | ||
443 | /** | |
444 | * struct cxl_region_params - region settings | |
445 | * @state: allow the driver to lockdown further parameter changes | |
446 | * @uuid: unique id for persistent regions | |
80d10a6c BW |
447 | * @interleave_ways: number of endpoints in the region |
448 | * @interleave_granularity: capacity each endpoint contributes to a stripe | |
23a22cd1 | 449 | * @res: allocated iomem capacity for this region |
038e6eb8 BS |
450 | * @targets: active ordered targets in current decoder configuration |
451 | * @nr_targets: number of targets | |
dd5ba0eb BW |
452 | * |
453 | * State transitions are protected by the cxl_region_rwsem | |
454 | */ | |
455 | struct cxl_region_params { | |
456 | enum cxl_config_state state; | |
457 | uuid_t uuid; | |
80d10a6c BW |
458 | int interleave_ways; |
459 | int interleave_granularity; | |
23a22cd1 | 460 | struct resource *res; |
b9686e8c DW |
461 | struct cxl_endpoint_decoder *targets[CXL_DECODER_MAX_INTERLEAVE]; |
462 | int nr_targets; | |
dd5ba0eb BW |
463 | }; |
464 | ||
d18bc74a DW |
465 | /* |
466 | * Flag whether this region needs to have its HPA span synchronized with | |
467 | * CPU cache state at region activation time. | |
468 | */ | |
469 | #define CXL_REGION_F_INCOHERENT 0 | |
470 | ||
a32320b7 DW |
471 | /* |
472 | * Indicate whether this region has been assembled by autodetection or | |
473 | * userspace assembly. Prevent endpoint decoders outside of automatic | |
474 | * detection from being added to the region. | |
475 | */ | |
476 | #define CXL_REGION_F_AUTO 1 | |
477 | ||
779dd20c BW |
478 | /** |
479 | * struct cxl_region - CXL region | |
480 | * @dev: This region's device | |
481 | * @id: This region's id. Id is globally unique across all regions | |
482 | * @mode: Endpoint decoder allocation / access mode | |
483 | * @type: Endpoint decoder target type | |
f17b558d DW |
484 | * @cxl_nvb: nvdimm bridge for coordinating @cxlr_pmem setup / shutdown |
485 | * @cxlr_pmem: (for pmem regions) cached copy of the nvdimm bridge | |
d18bc74a | 486 | * @flags: Region state flags |
dd5ba0eb | 487 | * @params: active + config params for the region |
779dd20c BW |
488 | */ |
489 | struct cxl_region { | |
490 | struct device dev; | |
491 | int id; | |
492 | enum cxl_decoder_mode mode; | |
493 | enum cxl_decoder_type type; | |
f17b558d DW |
494 | struct cxl_nvdimm_bridge *cxl_nvb; |
495 | struct cxl_pmem_region *cxlr_pmem; | |
d18bc74a | 496 | unsigned long flags; |
dd5ba0eb | 497 | struct cxl_region_params params; |
779dd20c BW |
498 | }; |
499 | ||
8fdcb170 | 500 | struct cxl_nvdimm_bridge { |
2e52b625 | 501 | int id; |
8fdcb170 DW |
502 | struct device dev; |
503 | struct cxl_port *port; | |
504 | struct nvdimm_bus *nvdimm_bus; | |
505 | struct nvdimm_bus_descriptor nd_desc; | |
8fdcb170 DW |
506 | }; |
507 | ||
b5807c80 DJ |
508 | #define CXL_DEV_ID_LEN 19 |
509 | ||
21083f51 DW |
510 | struct cxl_nvdimm { |
511 | struct device dev; | |
512 | struct cxl_memdev *cxlmd; | |
b5807c80 | 513 | u8 dev_id[CXL_DEV_ID_LEN]; /* for nvdimm, string of 'serial' */ |
04ad63f0 DW |
514 | }; |
515 | ||
516 | struct cxl_pmem_region_mapping { | |
517 | struct cxl_memdev *cxlmd; | |
518 | struct cxl_nvdimm *cxl_nvd; | |
519 | u64 start; | |
520 | u64 size; | |
521 | int position; | |
522 | }; | |
523 | ||
524 | struct cxl_pmem_region { | |
525 | struct device dev; | |
526 | struct cxl_region *cxlr; | |
527 | struct nd_region *nd_region; | |
04ad63f0 DW |
528 | struct range hpa_range; |
529 | int nr_mappings; | |
530 | struct cxl_pmem_region_mapping mapping[]; | |
21083f51 DW |
531 | }; |
532 | ||
09d09e04 DW |
533 | struct cxl_dax_region { |
534 | struct device dev; | |
535 | struct cxl_region *cxlr; | |
536 | struct range hpa_range; | |
537 | }; | |
538 | ||
4812be97 DW |
539 | /** |
540 | * struct cxl_port - logical collection of upstream port devices and | |
541 | * downstream port devices to construct a CXL memory | |
542 | * decode hierarchy. | |
543 | * @dev: this port's device | |
544 | * @uport: PCI or platform device implementing the upstream port capability | |
ee800010 | 545 | * @host_bridge: Shortcut to the platform attach point for this port |
4812be97 | 546 | * @id: id for port device-name |
7d4b5ca2 | 547 | * @dports: cxl_dport instances referenced by decoders |
2703c16c | 548 | * @endpoints: cxl_ep instances, endpoints that are a descendant of this port |
384e624b | 549 | * @regions: cxl_region_ref instances, regions mapped by this port |
1b58b4ca | 550 | * @parent_dport: dport that points to this port in the parent |
40ba17af | 551 | * @decoder_ida: allocator for decoder ids |
e4f6dfa9 | 552 | * @nr_dports: number of entries in @dports |
0c33b393 | 553 | * @hdm_end: track last allocated HDM decoder instance for allocation ordering |
176baefb | 554 | * @commit_end: cursor to track highest committed decoder for commit ordering |
4812be97 | 555 | * @component_reg_phys: component register capability base address (optional) |
2703c16c | 556 | * @dead: last ep has been removed, force port re-creation |
53fa1bff | 557 | * @depth: How deep this port is relative to the root. depth 0 is the root. |
c9700604 IW |
558 | * @cdat: Cached CDAT data |
559 | * @cdat_available: Should a CDAT attribute be available in sysfs | |
4812be97 DW |
560 | */ |
561 | struct cxl_port { | |
562 | struct device dev; | |
563 | struct device *uport; | |
ee800010 | 564 | struct device *host_bridge; |
4812be97 | 565 | int id; |
39178585 | 566 | struct xarray dports; |
256d0e9e | 567 | struct xarray endpoints; |
384e624b | 568 | struct xarray regions; |
1b58b4ca | 569 | struct cxl_dport *parent_dport; |
40ba17af | 570 | struct ida decoder_ida; |
e4f6dfa9 | 571 | int nr_dports; |
0c33b393 | 572 | int hdm_end; |
176baefb | 573 | int commit_end; |
4812be97 | 574 | resource_size_t component_reg_phys; |
2703c16c | 575 | bool dead; |
53fa1bff | 576 | unsigned int depth; |
c9700604 IW |
577 | struct cxl_cdat { |
578 | void *table; | |
579 | size_t length; | |
580 | } cdat; | |
581 | bool cdat_available; | |
4812be97 DW |
582 | }; |
583 | ||
39178585 DW |
584 | static inline struct cxl_dport * |
585 | cxl_find_dport_by_dev(struct cxl_port *port, const struct device *dport_dev) | |
586 | { | |
587 | return xa_load(&port->dports, (unsigned long)dport_dev); | |
588 | } | |
589 | ||
7d4b5ca2 DW |
590 | /** |
591 | * struct cxl_dport - CXL downstream port | |
592 | * @dport: PCI bridge or firmware device representing the downstream link | |
593 | * @port_id: unique hardware identifier for dport in decoder target list | |
594 | * @component_reg_phys: downstream port component registers | |
d5b1a271 RR |
595 | * @rcrb: base address for the Root Complex Register Block |
596 | * @rch: Indicate whether this dport was enumerated in RCH or VH mode | |
7d4b5ca2 | 597 | * @port: reference to cxl_port that contains this downstream port |
7d4b5ca2 DW |
598 | */ |
599 | struct cxl_dport { | |
600 | struct device *dport; | |
601 | int port_id; | |
602 | resource_size_t component_reg_phys; | |
d5b1a271 RR |
603 | resource_size_t rcrb; |
604 | bool rch; | |
7d4b5ca2 | 605 | struct cxl_port *port; |
7d4b5ca2 DW |
606 | }; |
607 | ||
2703c16c DW |
608 | /** |
609 | * struct cxl_ep - track an endpoint's interest in a port | |
610 | * @ep: device that hosts a generic CXL endpoint (expander or accelerator) | |
de516b40 | 611 | * @dport: which dport routes to this endpoint on @port |
7f8faf96 DW |
612 | * @next: cxl switch port across the link attached to @dport NULL if |
613 | * attached to an endpoint | |
2703c16c DW |
614 | */ |
615 | struct cxl_ep { | |
616 | struct device *ep; | |
de516b40 | 617 | struct cxl_dport *dport; |
7f8faf96 | 618 | struct cxl_port *next; |
2703c16c DW |
619 | }; |
620 | ||
384e624b DW |
621 | /** |
622 | * struct cxl_region_ref - track a region's interest in a port | |
623 | * @port: point in topology to install this reference | |
624 | * @decoder: decoder assigned for @region in @port | |
625 | * @region: region for this reference | |
626 | * @endpoints: cxl_ep references for region members beneath @port | |
27b3f8d1 | 627 | * @nr_targets_set: track how many targets have been programmed during setup |
384e624b DW |
628 | * @nr_eps: number of endpoints beneath @port |
629 | * @nr_targets: number of distinct targets needed to reach @nr_eps | |
630 | */ | |
631 | struct cxl_region_ref { | |
632 | struct cxl_port *port; | |
633 | struct cxl_decoder *decoder; | |
634 | struct cxl_region *region; | |
635 | struct xarray endpoints; | |
27b3f8d1 | 636 | int nr_targets_set; |
384e624b DW |
637 | int nr_eps; |
638 | int nr_targets; | |
639 | }; | |
640 | ||
d54c1bbe BW |
641 | /* |
642 | * The platform firmware device hosting the root is also the top of the | |
643 | * CXL port topology. All other CXL ports have another CXL port as their | |
644 | * parent and their ->uport / host device is out-of-line of the port | |
645 | * ancestry. | |
646 | */ | |
647 | static inline bool is_cxl_root(struct cxl_port *port) | |
648 | { | |
649 | return port->uport == port->dev.parent; | |
650 | } | |
651 | ||
2a81ada3 GKH |
652 | bool is_cxl_port(const struct device *dev); |
653 | struct cxl_port *to_cxl_port(const struct device *dev); | |
98d2d3a2 | 654 | struct pci_bus; |
5ff7316f DW |
655 | int devm_cxl_register_pci_bus(struct device *host, struct device *uport, |
656 | struct pci_bus *bus); | |
657 | struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port); | |
4812be97 DW |
658 | struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport, |
659 | resource_size_t component_reg_phys, | |
1b58b4ca | 660 | struct cxl_dport *parent_dport); |
d35b495d | 661 | struct cxl_port *find_cxl_root(struct cxl_port *port); |
2703c16c | 662 | int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd); |
4029c32f DW |
663 | void cxl_bus_rescan(void); |
664 | void cxl_bus_drain(void); | |
1b58b4ca DW |
665 | struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd, |
666 | struct cxl_dport **dport); | |
8dd2bc0f | 667 | bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd); |
2703c16c | 668 | |
664bf115 | 669 | struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, |
98d2d3a2 DW |
670 | struct device *dport, int port_id, |
671 | resource_size_t component_reg_phys); | |
d5b1a271 RR |
672 | struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port, |
673 | struct device *dport_dev, int port_id, | |
674 | resource_size_t component_reg_phys, | |
675 | resource_size_t rcrb); | |
2703c16c | 676 | |
40ba17af | 677 | struct cxl_decoder *to_cxl_decoder(struct device *dev); |
0f157c7f | 678 | struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev); |
3d8f7cca | 679 | struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev); |
3bf65915 | 680 | struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev); |
8fdcb170 | 681 | bool is_root_decoder(struct device *dev); |
3d8f7cca | 682 | bool is_switch_decoder(struct device *dev); |
8ae3cebc | 683 | bool is_endpoint_decoder(struct device *dev); |
0f157c7f | 684 | struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, |
f9db85bf AS |
685 | unsigned int nr_targets, |
686 | cxl_calc_hb_fn calc_hb); | |
687 | struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos); | |
e636479e DW |
688 | struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, |
689 | unsigned int nr_targets); | |
48667f67 | 690 | int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map); |
3bf65915 | 691 | struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port); |
d17d0540 | 692 | int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map); |
48667f67 | 693 | int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld); |
8dd2bc0f BW |
694 | int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint); |
695 | ||
59c3368b DJ |
696 | /** |
697 | * struct cxl_endpoint_dvsec_info - Cached DVSEC info | |
b70c2cf9 | 698 | * @mem_enabled: cached value of mem_enabled in the DVSEC at init time |
59c3368b | 699 | * @ranges: Number of active HDM ranges this device uses. |
b70c2cf9 | 700 | * @port: endpoint port associated with this info instance |
59c3368b DJ |
701 | * @dvsec_range: cached attributes of the ranges in the DVSEC, PCIE_DEVICE |
702 | */ | |
703 | struct cxl_endpoint_dvsec_info { | |
704 | bool mem_enabled; | |
705 | int ranges; | |
b70c2cf9 | 706 | struct cxl_port *port; |
59c3368b DJ |
707 | struct range dvsec_range[2]; |
708 | }; | |
709 | ||
d17d0540 | 710 | struct cxl_hdm; |
4474ce56 DJ |
711 | struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, |
712 | struct cxl_endpoint_dvsec_info *info); | |
eb0764b8 | 713 | int devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm); |
b777e9be DJ |
714 | int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, |
715 | struct cxl_endpoint_dvsec_info *info); | |
664bf115 | 716 | int devm_cxl_add_passthrough_decoder(struct cxl_port *port); |
59c3368b DJ |
717 | int cxl_dvsec_rr_decode(struct device *dev, int dvsec, |
718 | struct cxl_endpoint_dvsec_info *info); | |
40ba17af | 719 | |
779dd20c BW |
720 | bool is_cxl_region(struct device *dev); |
721 | ||
b39cb105 | 722 | extern struct bus_type cxl_bus_type; |
6af7139c DW |
723 | |
724 | struct cxl_driver { | |
725 | const char *name; | |
726 | int (*probe)(struct device *dev); | |
727 | void (*remove)(struct device *dev); | |
728 | struct device_driver drv; | |
729 | int id; | |
730 | }; | |
731 | ||
732 | static inline struct cxl_driver *to_cxl_drv(struct device_driver *drv) | |
733 | { | |
734 | return container_of(drv, struct cxl_driver, drv); | |
735 | } | |
736 | ||
737 | int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner, | |
738 | const char *modname); | |
739 | #define cxl_driver_register(x) __cxl_driver_register(x, THIS_MODULE, KBUILD_MODNAME) | |
740 | void cxl_driver_unregister(struct cxl_driver *cxl_drv); | |
741 | ||
c57cae78 BW |
742 | #define module_cxl_driver(__cxl_driver) \ |
743 | module_driver(__cxl_driver, cxl_driver_register, cxl_driver_unregister) | |
744 | ||
21083f51 DW |
745 | #define CXL_DEVICE_NVDIMM_BRIDGE 1 |
746 | #define CXL_DEVICE_NVDIMM 2 | |
54cdbf84 BW |
747 | #define CXL_DEVICE_PORT 3 |
748 | #define CXL_DEVICE_ROOT 4 | |
8dd2bc0f | 749 | #define CXL_DEVICE_MEMORY_EXPANDER 5 |
8d48817d | 750 | #define CXL_DEVICE_REGION 6 |
04ad63f0 | 751 | #define CXL_DEVICE_PMEM_REGION 7 |
09d09e04 | 752 | #define CXL_DEVICE_DAX_REGION 8 |
8fdcb170 | 753 | |
6af7139c DW |
754 | #define MODULE_ALIAS_CXL(type) MODULE_ALIAS("cxl:t" __stringify(type) "*") |
755 | #define CXL_MODALIAS_FMT "cxl:t%d" | |
756 | ||
8fdcb170 DW |
757 | struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev); |
758 | struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, | |
759 | struct cxl_port *port); | |
21083f51 DW |
760 | struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev); |
761 | bool is_cxl_nvdimm(struct device *dev); | |
53989fad | 762 | bool is_cxl_nvdimm_bridge(struct device *dev); |
f17b558d | 763 | int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd); |
d35b495d | 764 | struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd); |
04ad63f0 DW |
765 | |
766 | #ifdef CONFIG_CXL_REGION | |
767 | bool is_cxl_pmem_region(struct device *dev); | |
768 | struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev); | |
a32320b7 DW |
769 | int cxl_add_to_region(struct cxl_port *root, |
770 | struct cxl_endpoint_decoder *cxled); | |
09d09e04 | 771 | struct cxl_dax_region *to_cxl_dax_region(struct device *dev); |
04ad63f0 DW |
772 | #else |
773 | static inline bool is_cxl_pmem_region(struct device *dev) | |
774 | { | |
775 | return false; | |
776 | } | |
777 | static inline struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev) | |
778 | { | |
779 | return NULL; | |
780 | } | |
a32320b7 DW |
781 | static inline int cxl_add_to_region(struct cxl_port *root, |
782 | struct cxl_endpoint_decoder *cxled) | |
783 | { | |
784 | return 0; | |
785 | } | |
09d09e04 DW |
786 | static inline struct cxl_dax_region *to_cxl_dax_region(struct device *dev) |
787 | { | |
788 | return NULL; | |
789 | } | |
04ad63f0 | 790 | #endif |
67dcdd4d DW |
791 | |
792 | /* | |
793 | * Unit test builds overrides this to __weak, find the 'strong' version | |
794 | * of these symbols in tools/testing/cxl/. | |
795 | */ | |
796 | #ifndef __mock | |
797 | #define __mock static | |
798 | #endif | |
3c5b9039 | 799 | |
8adaf747 | 800 | #endif /* __CXL_H__ */ |