Commit | Line | Data |
---|---|---|
8adaf747 BW |
1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* Copyright(c) 2020 Intel Corporation. */ | |
3 | ||
4 | #ifndef __CXL_H__ | |
5 | #define __CXL_H__ | |
6 | ||
8fdcb170 | 7 | #include <linux/libnvdimm.h> |
8adaf747 BW |
8 | #include <linux/bitfield.h> |
9 | #include <linux/bitops.h> | |
80d10a6c | 10 | #include <linux/log2.h> |
8adaf747 BW |
11 | #include <linux/io.h> |
12 | ||
4812be97 DW |
13 | /** |
14 | * DOC: cxl objects | |
15 | * | |
16 | * The CXL core objects like ports, decoders, and regions are shared | |
17 | * between the subsystem drivers cxl_acpi, cxl_pci, and core drivers | |
18 | * (port-driver, region-driver, nvdimm object-drivers... etc). | |
19 | */ | |
20 | ||
d17d0540 DW |
21 | /* CXL 2.0 8.2.4 CXL Component Register Layout and Definition */ |
22 | #define CXL_COMPONENT_REG_BLOCK_SIZE SZ_64K | |
23 | ||
08422378 BW |
24 | /* CXL 2.0 8.2.5 CXL.cache and CXL.mem Registers*/ |
25 | #define CXL_CM_OFFSET 0x1000 | |
26 | #define CXL_CM_CAP_HDR_OFFSET 0x0 | |
27 | #define CXL_CM_CAP_HDR_ID_MASK GENMASK(15, 0) | |
28 | #define CM_CAP_HDR_CAP_ID 1 | |
29 | #define CXL_CM_CAP_HDR_VERSION_MASK GENMASK(19, 16) | |
30 | #define CM_CAP_HDR_CAP_VERSION 1 | |
31 | #define CXL_CM_CAP_HDR_CACHE_MEM_VERSION_MASK GENMASK(23, 20) | |
32 | #define CM_CAP_HDR_CACHE_MEM_VERSION 1 | |
33 | #define CXL_CM_CAP_HDR_ARRAY_SIZE_MASK GENMASK(31, 24) | |
34 | #define CXL_CM_CAP_PTR_MASK GENMASK(31, 20) | |
35 | ||
bd09626b | 36 | #define CXL_CM_CAP_CAP_ID_RAS 0x2 |
08422378 BW |
37 | #define CXL_CM_CAP_CAP_ID_HDM 0x5 |
38 | #define CXL_CM_CAP_CAP_HDM_VERSION 1 | |
39 | ||
40 | /* HDM decoders CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure */ | |
41 | #define CXL_HDM_DECODER_CAP_OFFSET 0x0 | |
42 | #define CXL_HDM_DECODER_COUNT_MASK GENMASK(3, 0) | |
43 | #define CXL_HDM_DECODER_TARGET_COUNT_MASK GENMASK(7, 4) | |
d17d0540 DW |
44 | #define CXL_HDM_DECODER_INTERLEAVE_11_8 BIT(8) |
45 | #define CXL_HDM_DECODER_INTERLEAVE_14_12 BIT(9) | |
46 | #define CXL_HDM_DECODER_CTRL_OFFSET 0x4 | |
47 | #define CXL_HDM_DECODER_ENABLE BIT(1) | |
48 | #define CXL_HDM_DECODER0_BASE_LOW_OFFSET(i) (0x20 * (i) + 0x10) | |
49 | #define CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i) (0x20 * (i) + 0x14) | |
50 | #define CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i) (0x20 * (i) + 0x18) | |
51 | #define CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i) (0x20 * (i) + 0x1c) | |
52 | #define CXL_HDM_DECODER0_CTRL_OFFSET(i) (0x20 * (i) + 0x20) | |
53 | #define CXL_HDM_DECODER0_CTRL_IG_MASK GENMASK(3, 0) | |
54 | #define CXL_HDM_DECODER0_CTRL_IW_MASK GENMASK(7, 4) | |
55 | #define CXL_HDM_DECODER0_CTRL_LOCK BIT(8) | |
56 | #define CXL_HDM_DECODER0_CTRL_COMMIT BIT(9) | |
57 | #define CXL_HDM_DECODER0_CTRL_COMMITTED BIT(10) | |
176baefb | 58 | #define CXL_HDM_DECODER0_CTRL_COMMIT_ERROR BIT(11) |
d17d0540 DW |
59 | #define CXL_HDM_DECODER0_CTRL_TYPE BIT(12) |
60 | #define CXL_HDM_DECODER0_TL_LOW(i) (0x20 * (i) + 0x24) | |
61 | #define CXL_HDM_DECODER0_TL_HIGH(i) (0x20 * (i) + 0x28) | |
9c57cde0 DW |
62 | #define CXL_HDM_DECODER0_SKIP_LOW(i) CXL_HDM_DECODER0_TL_LOW(i) |
63 | #define CXL_HDM_DECODER0_SKIP_HIGH(i) CXL_HDM_DECODER0_TL_HIGH(i) | |
08422378 | 64 | |
3b39fd6c AM |
65 | /* HDM decoder control register constants CXL 3.0 8.2.5.19.7 */ |
66 | #define CXL_DECODER_MIN_GRANULARITY 256 | |
67 | #define CXL_DECODER_MAX_ENCODED_IG 6 | |
68 | ||
6423035f BW |
69 | static inline int cxl_hdm_decoder_count(u32 cap_hdr) |
70 | { | |
71 | int val = FIELD_GET(CXL_HDM_DECODER_COUNT_MASK, cap_hdr); | |
72 | ||
73 | return val ? val * 2 : 1; | |
74 | } | |
75 | ||
419af595 | 76 | /* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */ |
83351ddb | 77 | static inline int eig_to_granularity(u16 eig, unsigned int *granularity) |
419af595 | 78 | { |
83351ddb | 79 | if (eig > CXL_DECODER_MAX_ENCODED_IG) |
419af595 | 80 | return -EINVAL; |
83351ddb | 81 | *granularity = CXL_DECODER_MIN_GRANULARITY << eig; |
419af595 DW |
82 | return 0; |
83 | } | |
84 | ||
85 | /* Encode defined in CXL ECN "3, 6, 12 and 16-way memory Interleaving" */ | |
c99b2e8c | 86 | static inline int eiw_to_ways(u8 eiw, unsigned int *ways) |
419af595 | 87 | { |
c99b2e8c | 88 | switch (eiw) { |
419af595 | 89 | case 0 ... 4: |
c99b2e8c | 90 | *ways = 1 << eiw; |
419af595 DW |
91 | break; |
92 | case 8 ... 10: | |
c99b2e8c | 93 | *ways = 3 << (eiw - 8); |
419af595 DW |
94 | break; |
95 | default: | |
96 | return -EINVAL; | |
97 | } | |
98 | ||
99 | return 0; | |
100 | } | |
101 | ||
83351ddb | 102 | static inline int granularity_to_eig(int granularity, u16 *eig) |
80d10a6c | 103 | { |
83351ddb DJ |
104 | if (granularity > SZ_16K || granularity < CXL_DECODER_MIN_GRANULARITY || |
105 | !is_power_of_2(granularity)) | |
80d10a6c | 106 | return -EINVAL; |
83351ddb | 107 | *eig = ilog2(granularity) - 8; |
80d10a6c BW |
108 | return 0; |
109 | } | |
110 | ||
c99b2e8c | 111 | static inline int ways_to_eiw(unsigned int ways, u8 *eiw) |
80d10a6c BW |
112 | { |
113 | if (ways > 16) | |
114 | return -EINVAL; | |
115 | if (is_power_of_2(ways)) { | |
c99b2e8c | 116 | *eiw = ilog2(ways); |
80d10a6c BW |
117 | return 0; |
118 | } | |
119 | if (ways % 3) | |
120 | return -EINVAL; | |
121 | ways /= 3; | |
122 | if (!is_power_of_2(ways)) | |
123 | return -EINVAL; | |
c99b2e8c | 124 | *eiw = ilog2(ways) + 8; |
80d10a6c BW |
125 | return 0; |
126 | } | |
127 | ||
bd09626b DW |
128 | /* RAS Registers CXL 2.0 8.2.5.9 CXL RAS Capability Structure */ |
129 | #define CXL_RAS_UNCORRECTABLE_STATUS_OFFSET 0x0 | |
130 | #define CXL_RAS_UNCORRECTABLE_STATUS_MASK (GENMASK(16, 14) | GENMASK(11, 0)) | |
131 | #define CXL_RAS_UNCORRECTABLE_MASK_OFFSET 0x4 | |
132 | #define CXL_RAS_UNCORRECTABLE_MASK_MASK (GENMASK(16, 14) | GENMASK(11, 0)) | |
133 | #define CXL_RAS_UNCORRECTABLE_SEVERITY_OFFSET 0x8 | |
134 | #define CXL_RAS_UNCORRECTABLE_SEVERITY_MASK (GENMASK(16, 14) | GENMASK(11, 0)) | |
135 | #define CXL_RAS_CORRECTABLE_STATUS_OFFSET 0xC | |
136 | #define CXL_RAS_CORRECTABLE_STATUS_MASK GENMASK(6, 0) | |
137 | #define CXL_RAS_CORRECTABLE_MASK_OFFSET 0x10 | |
138 | #define CXL_RAS_CORRECTABLE_MASK_MASK GENMASK(6, 0) | |
139 | #define CXL_RAS_CAP_CONTROL_OFFSET 0x14 | |
2905cb52 | 140 | #define CXL_RAS_CAP_CONTROL_FE_MASK GENMASK(5, 0) |
bd09626b DW |
141 | #define CXL_RAS_HEADER_LOG_OFFSET 0x18 |
142 | #define CXL_RAS_CAPABILITY_LENGTH 0x58 | |
4a20bc3e DW |
143 | #define CXL_HEADERLOG_SIZE SZ_512 |
144 | #define CXL_HEADERLOG_SIZE_U32 SZ_512 / sizeof(u32) | |
bd09626b | 145 | |
8adaf747 BW |
146 | /* CXL 2.0 8.2.8.1 Device Capabilities Array Register */ |
147 | #define CXLDEV_CAP_ARRAY_OFFSET 0x0 | |
148 | #define CXLDEV_CAP_ARRAY_CAP_ID 0 | |
149 | #define CXLDEV_CAP_ARRAY_ID_MASK GENMASK_ULL(15, 0) | |
150 | #define CXLDEV_CAP_ARRAY_COUNT_MASK GENMASK_ULL(47, 32) | |
151 | /* CXL 2.0 8.2.8.2 CXL Device Capability Header Register */ | |
152 | #define CXLDEV_CAP_HDR_CAP_ID_MASK GENMASK(15, 0) | |
153 | /* CXL 2.0 8.2.8.2.1 CXL Device Capabilities */ | |
154 | #define CXLDEV_CAP_CAP_ID_DEVICE_STATUS 0x1 | |
155 | #define CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX 0x2 | |
156 | #define CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX 0x3 | |
157 | #define CXLDEV_CAP_CAP_ID_MEMDEV 0x4000 | |
158 | ||
6ebe28f9 IW |
159 | /* CXL 3.0 8.2.8.3.1 Event Status Register */ |
160 | #define CXLDEV_DEV_EVENT_STATUS_OFFSET 0x00 | |
161 | #define CXLDEV_EVENT_STATUS_INFO BIT(0) | |
162 | #define CXLDEV_EVENT_STATUS_WARN BIT(1) | |
163 | #define CXLDEV_EVENT_STATUS_FAIL BIT(2) | |
164 | #define CXLDEV_EVENT_STATUS_FATAL BIT(3) | |
165 | ||
166 | #define CXLDEV_EVENT_STATUS_ALL (CXLDEV_EVENT_STATUS_INFO | \ | |
167 | CXLDEV_EVENT_STATUS_WARN | \ | |
168 | CXLDEV_EVENT_STATUS_FAIL | \ | |
169 | CXLDEV_EVENT_STATUS_FATAL) | |
170 | ||
8adaf747 BW |
171 | /* CXL 2.0 8.2.8.4 Mailbox Registers */ |
172 | #define CXLDEV_MBOX_CAPS_OFFSET 0x00 | |
173 | #define CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK GENMASK(4, 0) | |
174 | #define CXLDEV_MBOX_CTRL_OFFSET 0x04 | |
175 | #define CXLDEV_MBOX_CTRL_DOORBELL BIT(0) | |
176 | #define CXLDEV_MBOX_CMD_OFFSET 0x08 | |
177 | #define CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0) | |
178 | #define CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK GENMASK_ULL(36, 16) | |
179 | #define CXLDEV_MBOX_STATUS_OFFSET 0x10 | |
180 | #define CXLDEV_MBOX_STATUS_RET_CODE_MASK GENMASK_ULL(47, 32) | |
181 | #define CXLDEV_MBOX_BG_CMD_STATUS_OFFSET 0x18 | |
182 | #define CXLDEV_MBOX_PAYLOAD_OFFSET 0x20 | |
183 | ||
8ac75dd6 | 184 | /* |
301e68dd KC |
185 | * Using struct_group() allows for per register-block-type helper routines, |
186 | * without requiring block-type agnostic code to include the prefix. | |
8ac75dd6 DW |
187 | */ |
188 | struct cxl_regs { | |
301e68dd KC |
189 | /* |
190 | * Common set of CXL Component register block base pointers | |
191 | * @hdm_decoder: CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure | |
bd09626b | 192 | * @ras: CXL 2.0 8.2.5.9 CXL RAS Capability Structure |
301e68dd KC |
193 | */ |
194 | struct_group_tagged(cxl_component_regs, component, | |
195 | void __iomem *hdm_decoder; | |
bd09626b | 196 | void __iomem *ras; |
301e68dd KC |
197 | ); |
198 | /* | |
199 | * Common set of CXL Device register block base pointers | |
200 | * @status: CXL 2.0 8.2.8.3 Device Status Registers | |
201 | * @mbox: CXL 2.0 8.2.8.4 Mailbox Registers | |
202 | * @memdev: CXL 2.0 8.2.8.5 Memory Device Registers | |
203 | */ | |
204 | struct_group_tagged(cxl_device_regs, device_regs, | |
205 | void __iomem *status, *mbox, *memdev; | |
206 | ); | |
8ac75dd6 DW |
207 | }; |
208 | ||
30af9729 IW |
209 | struct cxl_reg_map { |
210 | bool valid; | |
a1554e9c | 211 | int id; |
30af9729 IW |
212 | unsigned long offset; |
213 | unsigned long size; | |
214 | }; | |
215 | ||
08422378 BW |
216 | struct cxl_component_reg_map { |
217 | struct cxl_reg_map hdm_decoder; | |
bd09626b | 218 | struct cxl_reg_map ras; |
08422378 BW |
219 | }; |
220 | ||
30af9729 IW |
221 | struct cxl_device_reg_map { |
222 | struct cxl_reg_map status; | |
223 | struct cxl_reg_map mbox; | |
224 | struct cxl_reg_map memdev; | |
225 | }; | |
226 | ||
a261e9a1 DW |
227 | /** |
228 | * struct cxl_register_map - DVSEC harvested register block mapping parameters | |
229 | * @base: virtual base of the register-block-BAR + @block_offset | |
6c7f4f1e DW |
230 | * @resource: physical resource base of the register block |
231 | * @max_size: maximum mapping size to perform register search | |
a261e9a1 | 232 | * @reg_type: see enum cxl_regloc_type |
a261e9a1 DW |
233 | * @component_map: cxl_reg_map for component registers |
234 | * @device_map: cxl_reg_maps for device registers | |
235 | */ | |
30af9729 | 236 | struct cxl_register_map { |
a261e9a1 | 237 | void __iomem *base; |
6c7f4f1e DW |
238 | resource_size_t resource; |
239 | resource_size_t max_size; | |
30af9729 | 240 | u8 reg_type; |
30af9729 | 241 | union { |
08422378 | 242 | struct cxl_component_reg_map component_map; |
30af9729 IW |
243 | struct cxl_device_reg_map device_map; |
244 | }; | |
245 | }; | |
246 | ||
08422378 BW |
247 | void cxl_probe_component_regs(struct device *dev, void __iomem *base, |
248 | struct cxl_component_reg_map *map); | |
30af9729 IW |
249 | void cxl_probe_device_regs(struct device *dev, void __iomem *base, |
250 | struct cxl_device_reg_map *map); | |
6c7f4f1e | 251 | int cxl_map_component_regs(struct device *dev, struct cxl_component_regs *regs, |
a1554e9c DW |
252 | struct cxl_register_map *map, |
253 | unsigned long map_mask); | |
6c7f4f1e | 254 | int cxl_map_device_regs(struct device *dev, struct cxl_device_regs *regs, |
30af9729 | 255 | struct cxl_register_map *map); |
399d34eb | 256 | |
303ebc1b BW |
257 | enum cxl_regloc_type; |
258 | int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type, | |
259 | struct cxl_register_map *map); | |
260 | ||
d5b1a271 RR |
261 | enum cxl_rcrb { |
262 | CXL_RCRB_DOWNSTREAM, | |
263 | CXL_RCRB_UPSTREAM, | |
264 | }; | |
265 | resource_size_t cxl_rcrb_to_component(struct device *dev, | |
266 | resource_size_t rcrb, | |
267 | enum cxl_rcrb which); | |
268 | ||
4812be97 | 269 | #define CXL_RESOURCE_NONE ((resource_size_t) -1) |
7d4b5ca2 | 270 | #define CXL_TARGET_STRLEN 20 |
4812be97 | 271 | |
40ba17af DW |
272 | /* |
273 | * cxl_decoder flags that define the type of memory / devices this | |
274 | * decoder supports as well as configuration lock status See "CXL 2.0 | |
275 | * 8.2.5.12.7 CXL HDM Decoder 0 Control Register" for details. | |
276 | */ | |
277 | #define CXL_DECODER_F_RAM BIT(0) | |
278 | #define CXL_DECODER_F_PMEM BIT(1) | |
279 | #define CXL_DECODER_F_TYPE2 BIT(2) | |
280 | #define CXL_DECODER_F_TYPE3 BIT(3) | |
281 | #define CXL_DECODER_F_LOCK BIT(4) | |
d17d0540 DW |
282 | #define CXL_DECODER_F_ENABLE BIT(5) |
283 | #define CXL_DECODER_F_MASK GENMASK(5, 0) | |
40ba17af DW |
284 | |
285 | enum cxl_decoder_type { | |
286 | CXL_DECODER_ACCELERATOR = 2, | |
287 | CXL_DECODER_EXPANDER = 3, | |
288 | }; | |
289 | ||
a5c25802 DW |
290 | /* |
291 | * Current specification goes up to 8, double that seems a reasonable | |
292 | * software max for the foreseeable future | |
293 | */ | |
294 | #define CXL_DECODER_MAX_INTERLEAVE 16 | |
295 | ||
e7748305 | 296 | |
40ba17af | 297 | /** |
e636479e | 298 | * struct cxl_decoder - Common CXL HDM Decoder Attributes |
40ba17af DW |
299 | * @dev: this decoder's device |
300 | * @id: kernel device name id | |
e8b7ea58 | 301 | * @hpa_range: Host physical address range mapped by this decoder |
40ba17af DW |
302 | * @interleave_ways: number of cxl_dports in this decode |
303 | * @interleave_granularity: data stride per dport | |
304 | * @target_type: accelerator vs expander (type2 vs type3) selector | |
b9686e8c | 305 | * @region: currently assigned region for this decoder |
40ba17af | 306 | * @flags: memory type capabilities and locking |
176baefb DW |
307 | * @commit: device/decoder-type specific callback to commit settings to hw |
308 | * @reset: device/decoder-type specific callback to reset hw settings | |
309 | */ | |
40ba17af DW |
310 | struct cxl_decoder { |
311 | struct device dev; | |
312 | int id; | |
e50fe01e | 313 | struct range hpa_range; |
40ba17af DW |
314 | int interleave_ways; |
315 | int interleave_granularity; | |
316 | enum cxl_decoder_type target_type; | |
b9686e8c | 317 | struct cxl_region *region; |
40ba17af | 318 | unsigned long flags; |
176baefb DW |
319 | int (*commit)(struct cxl_decoder *cxld); |
320 | int (*reset)(struct cxl_decoder *cxld); | |
e636479e DW |
321 | }; |
322 | ||
b9686e8c DW |
323 | /* |
324 | * CXL_DECODER_DEAD prevents endpoints from being reattached to regions | |
325 | * while cxld_unregister() is running | |
326 | */ | |
2c866903 DW |
327 | enum cxl_decoder_mode { |
328 | CXL_DECODER_NONE, | |
329 | CXL_DECODER_RAM, | |
330 | CXL_DECODER_PMEM, | |
331 | CXL_DECODER_MIXED, | |
b9686e8c | 332 | CXL_DECODER_DEAD, |
2c866903 DW |
333 | }; |
334 | ||
3bf65915 DW |
335 | /** |
336 | * struct cxl_endpoint_decoder - Endpoint / SPA to DPA decoder | |
337 | * @cxld: base cxl_decoder_object | |
338 | * @dpa_res: actively claimed DPA span of this decoder | |
339 | * @skip: offset into @dpa_res where @cxld.hpa_range maps | |
2c866903 | 340 | * @mode: which memory type / access-mode-partition this decoder targets |
b9686e8c | 341 | * @pos: interleave position in @cxld.region |
3bf65915 DW |
342 | */ |
343 | struct cxl_endpoint_decoder { | |
344 | struct cxl_decoder cxld; | |
345 | struct resource *dpa_res; | |
346 | resource_size_t skip; | |
2c866903 | 347 | enum cxl_decoder_mode mode; |
b9686e8c | 348 | int pos; |
3bf65915 DW |
349 | }; |
350 | ||
e636479e DW |
351 | /** |
352 | * struct cxl_switch_decoder - Switch specific CXL HDM Decoder | |
353 | * @cxld: base cxl_decoder object | |
354 | * @target_lock: coordinate coherent reads of the target list | |
355 | * @nr_targets: number of elements in @target | |
356 | * @target: active ordered target list in current decoder configuration | |
357 | * | |
358 | * The 'switch' decoder type represents the decoder instances of cxl_port's that | |
359 | * route from the root of a CXL memory decode topology to the endpoints. They | |
360 | * come in two flavors, root-level decoders, statically defined by platform | |
361 | * firmware, and mid-level decoders, where interleave-granularity, | |
362 | * interleave-width, and the target list are mutable. | |
363 | */ | |
364 | struct cxl_switch_decoder { | |
365 | struct cxl_decoder cxld; | |
86c8ea0f | 366 | seqlock_t target_lock; |
be185c29 | 367 | int nr_targets; |
40ba17af DW |
368 | struct cxl_dport *target[]; |
369 | }; | |
370 | ||
f9db85bf AS |
371 | struct cxl_root_decoder; |
372 | typedef struct cxl_dport *(*cxl_calc_hb_fn)(struct cxl_root_decoder *cxlrd, | |
373 | int pos); | |
8fdcb170 | 374 | |
0f157c7f DW |
375 | /** |
376 | * struct cxl_root_decoder - Static platform CXL address decoder | |
377 | * @res: host / parent resource for region allocations | |
779dd20c | 378 | * @region_id: region id for next region provisioning event |
6aa41144 | 379 | * @calc_hb: which host bridge covers the n'th position by granularity |
f9db85bf | 380 | * @platform_data: platform specific configuration data |
0f157c7f DW |
381 | * @cxlsd: base cxl switch decoder |
382 | */ | |
383 | struct cxl_root_decoder { | |
384 | struct resource *res; | |
779dd20c | 385 | atomic_t region_id; |
f9db85bf AS |
386 | cxl_calc_hb_fn calc_hb; |
387 | void *platform_data; | |
0f157c7f DW |
388 | struct cxl_switch_decoder cxlsd; |
389 | }; | |
390 | ||
dd5ba0eb BW |
391 | /* |
392 | * enum cxl_config_state - State machine for region configuration | |
393 | * @CXL_CONFIG_IDLE: Any sysfs attribute can be written freely | |
80d10a6c BW |
394 | * @CXL_CONFIG_INTERLEAVE_ACTIVE: region size has been set, no more |
395 | * changes to interleave_ways or interleave_granularity | |
dd5ba0eb BW |
396 | * @CXL_CONFIG_ACTIVE: All targets have been added the region is now |
397 | * active | |
176baefb DW |
398 | * @CXL_CONFIG_RESET_PENDING: see commit_store() |
399 | * @CXL_CONFIG_COMMIT: Soft-config has been committed to hardware | |
dd5ba0eb BW |
400 | */ |
401 | enum cxl_config_state { | |
402 | CXL_CONFIG_IDLE, | |
80d10a6c | 403 | CXL_CONFIG_INTERLEAVE_ACTIVE, |
dd5ba0eb | 404 | CXL_CONFIG_ACTIVE, |
176baefb DW |
405 | CXL_CONFIG_RESET_PENDING, |
406 | CXL_CONFIG_COMMIT, | |
dd5ba0eb BW |
407 | }; |
408 | ||
409 | /** | |
410 | * struct cxl_region_params - region settings | |
411 | * @state: allow the driver to lockdown further parameter changes | |
412 | * @uuid: unique id for persistent regions | |
80d10a6c BW |
413 | * @interleave_ways: number of endpoints in the region |
414 | * @interleave_granularity: capacity each endpoint contributes to a stripe | |
23a22cd1 | 415 | * @res: allocated iomem capacity for this region |
038e6eb8 BS |
416 | * @targets: active ordered targets in current decoder configuration |
417 | * @nr_targets: number of targets | |
dd5ba0eb BW |
418 | * |
419 | * State transitions are protected by the cxl_region_rwsem | |
420 | */ | |
421 | struct cxl_region_params { | |
422 | enum cxl_config_state state; | |
423 | uuid_t uuid; | |
80d10a6c BW |
424 | int interleave_ways; |
425 | int interleave_granularity; | |
23a22cd1 | 426 | struct resource *res; |
b9686e8c DW |
427 | struct cxl_endpoint_decoder *targets[CXL_DECODER_MAX_INTERLEAVE]; |
428 | int nr_targets; | |
dd5ba0eb BW |
429 | }; |
430 | ||
d18bc74a DW |
431 | /* |
432 | * Flag whether this region needs to have its HPA span synchronized with | |
433 | * CPU cache state at region activation time. | |
434 | */ | |
435 | #define CXL_REGION_F_INCOHERENT 0 | |
436 | ||
779dd20c BW |
437 | /** |
438 | * struct cxl_region - CXL region | |
439 | * @dev: This region's device | |
440 | * @id: This region's id. Id is globally unique across all regions | |
441 | * @mode: Endpoint decoder allocation / access mode | |
442 | * @type: Endpoint decoder target type | |
f17b558d DW |
443 | * @cxl_nvb: nvdimm bridge for coordinating @cxlr_pmem setup / shutdown |
444 | * @cxlr_pmem: (for pmem regions) cached copy of the nvdimm bridge | |
d18bc74a | 445 | * @flags: Region state flags |
dd5ba0eb | 446 | * @params: active + config params for the region |
779dd20c BW |
447 | */ |
448 | struct cxl_region { | |
449 | struct device dev; | |
450 | int id; | |
451 | enum cxl_decoder_mode mode; | |
452 | enum cxl_decoder_type type; | |
f17b558d DW |
453 | struct cxl_nvdimm_bridge *cxl_nvb; |
454 | struct cxl_pmem_region *cxlr_pmem; | |
d18bc74a | 455 | unsigned long flags; |
dd5ba0eb | 456 | struct cxl_region_params params; |
779dd20c BW |
457 | }; |
458 | ||
8fdcb170 | 459 | struct cxl_nvdimm_bridge { |
2e52b625 | 460 | int id; |
8fdcb170 DW |
461 | struct device dev; |
462 | struct cxl_port *port; | |
463 | struct nvdimm_bus *nvdimm_bus; | |
464 | struct nvdimm_bus_descriptor nd_desc; | |
8fdcb170 DW |
465 | }; |
466 | ||
b5807c80 DJ |
467 | #define CXL_DEV_ID_LEN 19 |
468 | ||
21083f51 DW |
469 | struct cxl_nvdimm { |
470 | struct device dev; | |
471 | struct cxl_memdev *cxlmd; | |
b5807c80 | 472 | u8 dev_id[CXL_DEV_ID_LEN]; /* for nvdimm, string of 'serial' */ |
04ad63f0 DW |
473 | }; |
474 | ||
475 | struct cxl_pmem_region_mapping { | |
476 | struct cxl_memdev *cxlmd; | |
477 | struct cxl_nvdimm *cxl_nvd; | |
478 | u64 start; | |
479 | u64 size; | |
480 | int position; | |
481 | }; | |
482 | ||
483 | struct cxl_pmem_region { | |
484 | struct device dev; | |
485 | struct cxl_region *cxlr; | |
486 | struct nd_region *nd_region; | |
04ad63f0 DW |
487 | struct range hpa_range; |
488 | int nr_mappings; | |
489 | struct cxl_pmem_region_mapping mapping[]; | |
21083f51 DW |
490 | }; |
491 | ||
4812be97 DW |
492 | /** |
493 | * struct cxl_port - logical collection of upstream port devices and | |
494 | * downstream port devices to construct a CXL memory | |
495 | * decode hierarchy. | |
496 | * @dev: this port's device | |
497 | * @uport: PCI or platform device implementing the upstream port capability | |
ee800010 | 498 | * @host_bridge: Shortcut to the platform attach point for this port |
4812be97 | 499 | * @id: id for port device-name |
7d4b5ca2 | 500 | * @dports: cxl_dport instances referenced by decoders |
2703c16c | 501 | * @endpoints: cxl_ep instances, endpoints that are a descendant of this port |
384e624b | 502 | * @regions: cxl_region_ref instances, regions mapped by this port |
1b58b4ca | 503 | * @parent_dport: dport that points to this port in the parent |
40ba17af | 504 | * @decoder_ida: allocator for decoder ids |
e4f6dfa9 | 505 | * @nr_dports: number of entries in @dports |
0c33b393 | 506 | * @hdm_end: track last allocated HDM decoder instance for allocation ordering |
176baefb | 507 | * @commit_end: cursor to track highest committed decoder for commit ordering |
4812be97 | 508 | * @component_reg_phys: component register capability base address (optional) |
2703c16c | 509 | * @dead: last ep has been removed, force port re-creation |
53fa1bff | 510 | * @depth: How deep this port is relative to the root. depth 0 is the root. |
c9700604 IW |
511 | * @cdat: Cached CDAT data |
512 | * @cdat_available: Should a CDAT attribute be available in sysfs | |
4812be97 DW |
513 | */ |
514 | struct cxl_port { | |
515 | struct device dev; | |
516 | struct device *uport; | |
ee800010 | 517 | struct device *host_bridge; |
4812be97 | 518 | int id; |
39178585 | 519 | struct xarray dports; |
256d0e9e | 520 | struct xarray endpoints; |
384e624b | 521 | struct xarray regions; |
1b58b4ca | 522 | struct cxl_dport *parent_dport; |
40ba17af | 523 | struct ida decoder_ida; |
e4f6dfa9 | 524 | int nr_dports; |
0c33b393 | 525 | int hdm_end; |
176baefb | 526 | int commit_end; |
4812be97 | 527 | resource_size_t component_reg_phys; |
2703c16c | 528 | bool dead; |
53fa1bff | 529 | unsigned int depth; |
c9700604 IW |
530 | struct cxl_cdat { |
531 | void *table; | |
532 | size_t length; | |
533 | } cdat; | |
534 | bool cdat_available; | |
4812be97 DW |
535 | }; |
536 | ||
39178585 DW |
537 | static inline struct cxl_dport * |
538 | cxl_find_dport_by_dev(struct cxl_port *port, const struct device *dport_dev) | |
539 | { | |
540 | return xa_load(&port->dports, (unsigned long)dport_dev); | |
541 | } | |
542 | ||
7d4b5ca2 DW |
543 | /** |
544 | * struct cxl_dport - CXL downstream port | |
545 | * @dport: PCI bridge or firmware device representing the downstream link | |
546 | * @port_id: unique hardware identifier for dport in decoder target list | |
547 | * @component_reg_phys: downstream port component registers | |
d5b1a271 RR |
548 | * @rcrb: base address for the Root Complex Register Block |
549 | * @rch: Indicate whether this dport was enumerated in RCH or VH mode | |
7d4b5ca2 | 550 | * @port: reference to cxl_port that contains this downstream port |
7d4b5ca2 DW |
551 | */ |
552 | struct cxl_dport { | |
553 | struct device *dport; | |
554 | int port_id; | |
555 | resource_size_t component_reg_phys; | |
d5b1a271 RR |
556 | resource_size_t rcrb; |
557 | bool rch; | |
7d4b5ca2 | 558 | struct cxl_port *port; |
7d4b5ca2 DW |
559 | }; |
560 | ||
2703c16c DW |
561 | /** |
562 | * struct cxl_ep - track an endpoint's interest in a port | |
563 | * @ep: device that hosts a generic CXL endpoint (expander or accelerator) | |
de516b40 | 564 | * @dport: which dport routes to this endpoint on @port |
7f8faf96 DW |
565 | * @next: cxl switch port across the link attached to @dport NULL if |
566 | * attached to an endpoint | |
2703c16c DW |
567 | */ |
568 | struct cxl_ep { | |
569 | struct device *ep; | |
de516b40 | 570 | struct cxl_dport *dport; |
7f8faf96 | 571 | struct cxl_port *next; |
2703c16c DW |
572 | }; |
573 | ||
384e624b DW |
574 | /** |
575 | * struct cxl_region_ref - track a region's interest in a port | |
576 | * @port: point in topology to install this reference | |
577 | * @decoder: decoder assigned for @region in @port | |
578 | * @region: region for this reference | |
579 | * @endpoints: cxl_ep references for region members beneath @port | |
27b3f8d1 | 580 | * @nr_targets_set: track how many targets have been programmed during setup |
384e624b DW |
581 | * @nr_eps: number of endpoints beneath @port |
582 | * @nr_targets: number of distinct targets needed to reach @nr_eps | |
583 | */ | |
584 | struct cxl_region_ref { | |
585 | struct cxl_port *port; | |
586 | struct cxl_decoder *decoder; | |
587 | struct cxl_region *region; | |
588 | struct xarray endpoints; | |
27b3f8d1 | 589 | int nr_targets_set; |
384e624b DW |
590 | int nr_eps; |
591 | int nr_targets; | |
592 | }; | |
593 | ||
d54c1bbe BW |
594 | /* |
595 | * The platform firmware device hosting the root is also the top of the | |
596 | * CXL port topology. All other CXL ports have another CXL port as their | |
597 | * parent and their ->uport / host device is out-of-line of the port | |
598 | * ancestry. | |
599 | */ | |
600 | static inline bool is_cxl_root(struct cxl_port *port) | |
601 | { | |
602 | return port->uport == port->dev.parent; | |
603 | } | |
604 | ||
3c5b9039 | 605 | bool is_cxl_port(struct device *dev); |
4812be97 | 606 | struct cxl_port *to_cxl_port(struct device *dev); |
98d2d3a2 | 607 | struct pci_bus; |
5ff7316f DW |
608 | int devm_cxl_register_pci_bus(struct device *host, struct device *uport, |
609 | struct pci_bus *bus); | |
610 | struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port); | |
4812be97 DW |
611 | struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport, |
612 | resource_size_t component_reg_phys, | |
1b58b4ca | 613 | struct cxl_dport *parent_dport); |
a46cfc0f | 614 | struct cxl_port *find_cxl_root(struct device *dev); |
2703c16c | 615 | int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd); |
4029c32f DW |
616 | void cxl_bus_rescan(void); |
617 | void cxl_bus_drain(void); | |
1b58b4ca DW |
618 | struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd, |
619 | struct cxl_dport **dport); | |
8dd2bc0f | 620 | bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd); |
2703c16c | 621 | |
664bf115 | 622 | struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, |
98d2d3a2 DW |
623 | struct device *dport, int port_id, |
624 | resource_size_t component_reg_phys); | |
d5b1a271 RR |
625 | struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port, |
626 | struct device *dport_dev, int port_id, | |
627 | resource_size_t component_reg_phys, | |
628 | resource_size_t rcrb); | |
2703c16c | 629 | |
40ba17af | 630 | struct cxl_decoder *to_cxl_decoder(struct device *dev); |
0f157c7f | 631 | struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev); |
3bf65915 | 632 | struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev); |
8fdcb170 | 633 | bool is_root_decoder(struct device *dev); |
8ae3cebc | 634 | bool is_endpoint_decoder(struct device *dev); |
0f157c7f | 635 | struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, |
f9db85bf AS |
636 | unsigned int nr_targets, |
637 | cxl_calc_hb_fn calc_hb); | |
638 | struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos); | |
e636479e DW |
639 | struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, |
640 | unsigned int nr_targets); | |
48667f67 | 641 | int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map); |
3bf65915 | 642 | struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port); |
d17d0540 | 643 | int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map); |
48667f67 | 644 | int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld); |
8dd2bc0f BW |
645 | int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint); |
646 | ||
d17d0540 | 647 | struct cxl_hdm; |
664bf115 DW |
648 | struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port); |
649 | int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm); | |
650 | int devm_cxl_add_passthrough_decoder(struct cxl_port *port); | |
40ba17af | 651 | |
779dd20c BW |
652 | bool is_cxl_region(struct device *dev); |
653 | ||
b39cb105 | 654 | extern struct bus_type cxl_bus_type; |
6af7139c DW |
655 | |
656 | struct cxl_driver { | |
657 | const char *name; | |
658 | int (*probe)(struct device *dev); | |
659 | void (*remove)(struct device *dev); | |
660 | struct device_driver drv; | |
661 | int id; | |
662 | }; | |
663 | ||
664 | static inline struct cxl_driver *to_cxl_drv(struct device_driver *drv) | |
665 | { | |
666 | return container_of(drv, struct cxl_driver, drv); | |
667 | } | |
668 | ||
669 | int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner, | |
670 | const char *modname); | |
671 | #define cxl_driver_register(x) __cxl_driver_register(x, THIS_MODULE, KBUILD_MODNAME) | |
672 | void cxl_driver_unregister(struct cxl_driver *cxl_drv); | |
673 | ||
c57cae78 BW |
674 | #define module_cxl_driver(__cxl_driver) \ |
675 | module_driver(__cxl_driver, cxl_driver_register, cxl_driver_unregister) | |
676 | ||
21083f51 DW |
677 | #define CXL_DEVICE_NVDIMM_BRIDGE 1 |
678 | #define CXL_DEVICE_NVDIMM 2 | |
54cdbf84 BW |
679 | #define CXL_DEVICE_PORT 3 |
680 | #define CXL_DEVICE_ROOT 4 | |
8dd2bc0f | 681 | #define CXL_DEVICE_MEMORY_EXPANDER 5 |
8d48817d | 682 | #define CXL_DEVICE_REGION 6 |
04ad63f0 | 683 | #define CXL_DEVICE_PMEM_REGION 7 |
8fdcb170 | 684 | |
6af7139c DW |
685 | #define MODULE_ALIAS_CXL(type) MODULE_ALIAS("cxl:t" __stringify(type) "*") |
686 | #define CXL_MODALIAS_FMT "cxl:t%d" | |
687 | ||
8fdcb170 DW |
688 | struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev); |
689 | struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, | |
690 | struct cxl_port *port); | |
21083f51 DW |
691 | struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev); |
692 | bool is_cxl_nvdimm(struct device *dev); | |
53989fad | 693 | bool is_cxl_nvdimm_bridge(struct device *dev); |
f17b558d | 694 | int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd); |
04ad63f0 DW |
695 | struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *dev); |
696 | ||
697 | #ifdef CONFIG_CXL_REGION | |
698 | bool is_cxl_pmem_region(struct device *dev); | |
699 | struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev); | |
700 | #else | |
701 | static inline bool is_cxl_pmem_region(struct device *dev) | |
702 | { | |
703 | return false; | |
704 | } | |
705 | static inline struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev) | |
706 | { | |
707 | return NULL; | |
708 | } | |
709 | #endif | |
67dcdd4d DW |
710 | |
711 | /* | |
712 | * Unit test builds overrides this to __weak, find the 'strong' version | |
713 | * of these symbols in tools/testing/cxl/. | |
714 | */ | |
715 | #ifndef __mock | |
716 | #define __mock static | |
717 | #endif | |
3c5b9039 | 718 | |
8adaf747 | 719 | #endif /* __CXL_H__ */ |