Commit | Line | Data |
---|---|---|
bfe1d560 DJ |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ | |
3 | #ifndef _IDXD_H_ | |
4 | #define _IDXD_H_ | |
5 | ||
6 | #include <linux/sbitmap.h> | |
8f47d1a5 | 7 | #include <linux/dmaengine.h> |
bfe1d560 DJ |
8 | #include <linux/percpu-rwsem.h> |
9 | #include <linux/wait.h> | |
42d279f9 | 10 | #include <linux/cdev.h> |
47c16ac2 | 11 | #include <linux/idr.h> |
81dd4d4d | 12 | #include <linux/pci.h> |
8b67426e | 13 | #include <linux/ioasid.h> |
de5819b9 | 14 | #include <linux/bitmap.h> |
81dd4d4d | 15 | #include <linux/perf_event.h> |
a9c17152 | 16 | #include <uapi/linux/idxd.h> |
bfe1d560 DJ |
17 | #include "registers.h" |
18 | ||
19 | #define IDXD_DRIVER_VERSION "1.00" | |
20 | ||
21 | extern struct kmem_cache *idxd_desc_pool; | |
ade8a86b | 22 | extern bool tc_override; |
bfe1d560 | 23 | |
39786285 | 24 | struct idxd_wq; |
700af3a0 DJ |
25 | struct idxd_dev; |
26 | ||
27 | enum idxd_dev_type { | |
28 | IDXD_DEV_NONE = -1, | |
29 | IDXD_DEV_DSA = 0, | |
30 | IDXD_DEV_IAX, | |
31 | IDXD_DEV_WQ, | |
32 | IDXD_DEV_GROUP, | |
33 | IDXD_DEV_ENGINE, | |
34 | IDXD_DEV_CDEV, | |
35 | IDXD_DEV_MAX_TYPE, | |
36 | }; | |
37 | ||
38 | struct idxd_dev { | |
39 | struct device conf_dev; | |
40 | enum idxd_dev_type type; | |
41 | }; | |
39786285 | 42 | |
bfe1d560 DJ |
43 | #define IDXD_REG_TIMEOUT 50 |
44 | #define IDXD_DRAIN_TIMEOUT 5000 | |
45 | ||
46 | enum idxd_type { | |
47 | IDXD_TYPE_UNKNOWN = -1, | |
48 | IDXD_TYPE_DSA = 0, | |
f25b4638 DJ |
49 | IDXD_TYPE_IAX, |
50 | IDXD_TYPE_MAX, | |
bfe1d560 DJ |
51 | }; |
52 | ||
53 | #define IDXD_NAME_SIZE 128 | |
81dd4d4d | 54 | #define IDXD_PMU_EVENT_MAX 64 |
bfe1d560 | 55 | |
7930d855 DJ |
56 | #define IDXD_ENQCMDS_RETRIES 32 |
57 | #define IDXD_ENQCMDS_MAX_RETRIES 64 | |
58 | ||
bfe1d560 | 59 | struct idxd_device_driver { |
da5a11d7 | 60 | const char *name; |
5fee6567 | 61 | enum idxd_dev_type *type; |
fcc2281b DJ |
62 | int (*probe)(struct idxd_dev *idxd_dev); |
63 | void (*remove)(struct idxd_dev *idxd_dev); | |
bfe1d560 DJ |
64 | struct device_driver drv; |
65 | }; | |
66 | ||
c05257b5 | 67 | extern struct idxd_device_driver dsa_drv; |
034b3290 | 68 | extern struct idxd_device_driver idxd_drv; |
0cda4f69 | 69 | extern struct idxd_device_driver idxd_dmaengine_drv; |
448c3de8 | 70 | extern struct idxd_device_driver idxd_user_drv; |
c05257b5 | 71 | |
8b67426e | 72 | #define INVALID_INT_HANDLE -1 |
bfe1d560 | 73 | struct idxd_irq_entry { |
bfe1d560 | 74 | int id; |
5fc8e85f | 75 | int vector; |
bfe1d560 DJ |
76 | struct llist_head pending_llist; |
77 | struct list_head work_list; | |
e4f4d8cd DJ |
78 | /* |
79 | * Lock to protect access between irq thread process descriptor | |
80 | * and irq thread processing error descriptor. | |
81 | */ | |
82 | spinlock_t list_lock; | |
8b67426e | 83 | int int_handle; |
8b67426e | 84 | ioasid_t pasid; |
bfe1d560 DJ |
85 | }; |
86 | ||
87 | struct idxd_group { | |
700af3a0 | 88 | struct idxd_dev idxd_dev; |
bfe1d560 DJ |
89 | struct idxd_device *idxd; |
90 | struct grpcfg grpcfg; | |
91 | int id; | |
92 | int num_engines; | |
93 | int num_wqs; | |
7ed6f1b8 DJ |
94 | bool use_rdbuf_limit; |
95 | u8 rdbufs_allowed; | |
96 | u8 rdbufs_reserved; | |
bfe1d560 DJ |
97 | int tc_a; |
98 | int tc_b; | |
1f273752 | 99 | int desc_progress_limit; |
7ca68fa3 | 100 | int batch_progress_limit; |
bfe1d560 DJ |
101 | }; |
102 | ||
81dd4d4d TZ |
103 | struct idxd_pmu { |
104 | struct idxd_device *idxd; | |
105 | ||
106 | struct perf_event *event_list[IDXD_PMU_EVENT_MAX]; | |
107 | int n_events; | |
108 | ||
109 | DECLARE_BITMAP(used_mask, IDXD_PMU_EVENT_MAX); | |
110 | ||
111 | struct pmu pmu; | |
112 | char name[IDXD_NAME_SIZE]; | |
113 | int cpu; | |
114 | ||
115 | int n_counters; | |
116 | int counter_width; | |
117 | int n_event_categories; | |
118 | ||
119 | bool per_counter_caps_supported; | |
120 | unsigned long supported_event_categories; | |
121 | ||
122 | unsigned long supported_filters; | |
123 | int n_filters; | |
124 | ||
125 | struct hlist_node cpuhp_node; | |
126 | }; | |
127 | ||
bfe1d560 DJ |
128 | #define IDXD_MAX_PRIORITY 0xf |
129 | ||
130 | enum idxd_wq_state { | |
131 | IDXD_WQ_DISABLED = 0, | |
132 | IDXD_WQ_ENABLED, | |
133 | }; | |
134 | ||
135 | enum idxd_wq_flag { | |
136 | WQ_FLAG_DEDICATED = 0, | |
8e50d392 | 137 | WQ_FLAG_BLOCK_ON_FAULT, |
22bd0df8 | 138 | WQ_FLAG_ATS_DISABLE, |
bfe1d560 DJ |
139 | }; |
140 | ||
141 | enum idxd_wq_type { | |
142 | IDXD_WQT_NONE = 0, | |
143 | IDXD_WQT_KERNEL, | |
42d279f9 DJ |
144 | IDXD_WQT_USER, |
145 | }; | |
146 | ||
147 | struct idxd_cdev { | |
04922b74 | 148 | struct idxd_wq *wq; |
42d279f9 | 149 | struct cdev cdev; |
700af3a0 | 150 | struct idxd_dev idxd_dev; |
42d279f9 | 151 | int minor; |
bfe1d560 DJ |
152 | }; |
153 | ||
154 | #define IDXD_ALLOCATED_BATCH_SIZE 128U | |
155 | #define WQ_NAME_SIZE 1024 | |
156 | #define WQ_TYPE_SIZE 10 | |
157 | ||
92452a72 DJ |
158 | #define WQ_DEFAULT_QUEUE_DEPTH 16 |
159 | #define WQ_DEFAULT_MAX_XFER SZ_2M | |
160 | #define WQ_DEFAULT_MAX_BATCH 32 | |
161 | ||
d1dfe5b8 DJ |
162 | enum idxd_op_type { |
163 | IDXD_OP_BLOCK = 0, | |
164 | IDXD_OP_NONBLOCK = 1, | |
165 | }; | |
166 | ||
8f47d1a5 DJ |
167 | enum idxd_complete_type { |
168 | IDXD_COMPLETE_NORMAL = 0, | |
169 | IDXD_COMPLETE_ABORT, | |
8e50d392 | 170 | IDXD_COMPLETE_DEV_FAIL, |
8f47d1a5 DJ |
171 | }; |
172 | ||
39786285 DJ |
173 | struct idxd_dma_chan { |
174 | struct dma_chan chan; | |
175 | struct idxd_wq *wq; | |
176 | }; | |
177 | ||
bfe1d560 | 178 | struct idxd_wq { |
8e50d392 | 179 | void __iomem *portal; |
a9c17152 | 180 | u32 portal_offset; |
7930d855 | 181 | unsigned int enqcmds_retries; |
93a40a6d DJ |
182 | struct percpu_ref wq_active; |
183 | struct completion wq_dead; | |
56fc39f5 | 184 | struct completion wq_resurrect; |
700af3a0 | 185 | struct idxd_dev idxd_dev; |
04922b74 DJ |
186 | struct idxd_cdev *idxd_cdev; |
187 | struct wait_queue_head err_queue; | |
bfe1d560 DJ |
188 | struct idxd_device *idxd; |
189 | int id; | |
ec0d6423 | 190 | struct idxd_irq_entry ie; |
bfe1d560 DJ |
191 | enum idxd_wq_type type; |
192 | struct idxd_group *group; | |
193 | int client_count; | |
194 | struct mutex wq_lock; /* mutex for workqueue */ | |
195 | u32 size; | |
196 | u32 threshold; | |
197 | u32 priority; | |
198 | enum idxd_wq_state state; | |
199 | unsigned long flags; | |
d98793b5 | 200 | union wqcfg *wqcfg; |
b0325aef DJ |
201 | unsigned long *opcap_bmap; |
202 | ||
bfe1d560 DJ |
203 | struct dsa_hw_desc **hw_descs; |
204 | int num_descs; | |
f25b4638 DJ |
205 | union { |
206 | struct dsa_completion_record *compls; | |
207 | struct iax_completion_record *iax_compls; | |
208 | }; | |
bfe1d560 DJ |
209 | dma_addr_t compls_addr; |
210 | int compls_size; | |
211 | struct idxd_desc **descs; | |
0705107f | 212 | struct sbitmap_queue sbq; |
39786285 | 213 | struct idxd_dma_chan *idxd_chan; |
bfe1d560 | 214 | char name[WQ_NAME_SIZE + 1]; |
d7aad555 | 215 | u64 max_xfer_bytes; |
e7184b15 | 216 | u32 max_batch_size; |
bfe1d560 DJ |
217 | }; |
218 | ||
219 | struct idxd_engine { | |
700af3a0 | 220 | struct idxd_dev idxd_dev; |
bfe1d560 DJ |
221 | int id; |
222 | struct idxd_group *group; | |
223 | struct idxd_device *idxd; | |
224 | }; | |
225 | ||
226 | /* shadow registers */ | |
227 | struct idxd_hw { | |
228 | u32 version; | |
229 | union gen_cap_reg gen_cap; | |
230 | union wq_cap_reg wq_cap; | |
231 | union group_cap_reg group_cap; | |
232 | union engine_cap_reg engine_cap; | |
233 | struct opcap opcap; | |
eb15e715 | 234 | u32 cmd_cap; |
9f0d99b3 | 235 | union iaa_cap_reg iaa_cap; |
bfe1d560 DJ |
236 | }; |
237 | ||
238 | enum idxd_device_state { | |
239 | IDXD_DEV_HALTED = -1, | |
240 | IDXD_DEV_DISABLED = 0, | |
bfe1d560 DJ |
241 | IDXD_DEV_ENABLED, |
242 | }; | |
243 | ||
244 | enum idxd_device_flag { | |
245 | IDXD_FLAG_CONFIGURABLE = 0, | |
0d5c10b4 | 246 | IDXD_FLAG_CMD_RUNNING, |
8e50d392 | 247 | IDXD_FLAG_PASID_ENABLED, |
42a1b738 | 248 | IDXD_FLAG_USER_PASID_ENABLED, |
bfe1d560 DJ |
249 | }; |
250 | ||
39786285 DJ |
251 | struct idxd_dma_dev { |
252 | struct idxd_device *idxd; | |
253 | struct dma_device dma; | |
254 | }; | |
255 | ||
435b512d DJ |
256 | struct idxd_driver_data { |
257 | const char *name_prefix; | |
bfe1d560 | 258 | enum idxd_type type; |
435b512d DJ |
259 | struct device_type *dev_type; |
260 | int compl_size; | |
261 | int align; | |
262 | }; | |
263 | ||
1649091f | 264 | struct idxd_evl { |
244da66c DJ |
265 | /* Lock to protect event log access. */ |
266 | spinlock_t lock; | |
267 | void *log; | |
268 | dma_addr_t dma; | |
269 | /* Total size of event log = number of entries * entry size. */ | |
270 | unsigned int log_size; | |
271 | /* The number of entries in the event log. */ | |
1649091f | 272 | u16 size; |
244da66c | 273 | u16 head; |
1649091f DJ |
274 | }; |
275 | ||
435b512d | 276 | struct idxd_device { |
700af3a0 | 277 | struct idxd_dev idxd_dev; |
435b512d | 278 | struct idxd_driver_data *data; |
bfe1d560 DJ |
279 | struct list_head list; |
280 | struct idxd_hw hw; | |
281 | enum idxd_device_state state; | |
282 | unsigned long flags; | |
283 | int id; | |
42d279f9 | 284 | int major; |
125d1037 | 285 | u32 cmd_status; |
ec0d6423 | 286 | struct idxd_irq_entry ie; /* misc irq, msix 0 */ |
bfe1d560 DJ |
287 | |
288 | struct pci_dev *pdev; | |
289 | void __iomem *reg_base; | |
290 | ||
291 | spinlock_t dev_lock; /* spinlock for device */ | |
53b2ee7f | 292 | spinlock_t cmd_lock; /* spinlock for device commands */ |
0d5c10b4 | 293 | struct completion *cmd_done; |
defe49f9 | 294 | struct idxd_group **groups; |
7c5dd23e | 295 | struct idxd_wq **wqs; |
75b91130 | 296 | struct idxd_engine **engines; |
bfe1d560 | 297 | |
8e50d392 DJ |
298 | struct iommu_sva *sva; |
299 | unsigned int pasid; | |
300 | ||
bfe1d560 | 301 | int num_groups; |
8b67426e DJ |
302 | int irq_cnt; |
303 | bool request_int_handles; | |
bfe1d560 DJ |
304 | |
305 | u32 msix_perm_offset; | |
306 | u32 wqcfg_offset; | |
307 | u32 grpcfg_offset; | |
308 | u32 perfmon_offset; | |
309 | ||
310 | u64 max_xfer_bytes; | |
311 | u32 max_batch_size; | |
312 | int max_groups; | |
313 | int max_engines; | |
7ed6f1b8 | 314 | int max_rdbufs; |
bfe1d560 DJ |
315 | int max_wqs; |
316 | int max_wq_size; | |
7ed6f1b8 DJ |
317 | int rdbuf_limit; |
318 | int nr_rdbufs; /* non-reserved read buffers */ | |
d98793b5 | 319 | unsigned int wqcfg_size; |
de5819b9 | 320 | unsigned long *wq_enable_map; |
bfe1d560 DJ |
321 | |
322 | union sw_err_reg sw_err; | |
0d5c10b4 | 323 | wait_queue_head_t cmd_waitq; |
8f47d1a5 | 324 | |
39786285 | 325 | struct idxd_dma_dev *idxd_dma; |
0d5c10b4 DJ |
326 | struct workqueue_struct *wq; |
327 | struct work_struct work; | |
eb15e715 | 328 | |
81dd4d4d | 329 | struct idxd_pmu *idxd_pmu; |
a8563a33 DJ |
330 | |
331 | unsigned long *opcap_bmap; | |
1649091f | 332 | struct idxd_evl *evl; |
5fbe6503 DJ |
333 | |
334 | struct dentry *dbgfs_dir; | |
335 | struct dentry *dbgfs_evl_file; | |
bfe1d560 DJ |
336 | }; |
337 | ||
244da66c DJ |
338 | static inline unsigned int evl_ent_size(struct idxd_device *idxd) |
339 | { | |
340 | return idxd->hw.gen_cap.evl_support ? | |
341 | (32 * (1 << idxd->hw.gen_cap.evl_support)) : 0; | |
342 | } | |
343 | ||
344 | static inline unsigned int evl_size(struct idxd_device *idxd) | |
345 | { | |
346 | return idxd->evl->size * evl_ent_size(idxd); | |
347 | } | |
348 | ||
bfe1d560 DJ |
349 | /* IDXD software descriptor */ |
350 | struct idxd_desc { | |
f25b4638 DJ |
351 | union { |
352 | struct dsa_hw_desc *hw; | |
353 | struct iax_hw_desc *iax_hw; | |
354 | }; | |
bfe1d560 | 355 | dma_addr_t desc_dma; |
f25b4638 DJ |
356 | union { |
357 | struct dsa_completion_record *completion; | |
358 | struct iax_completion_record *iax_completion; | |
359 | }; | |
bfe1d560 | 360 | dma_addr_t compl_dma; |
8f47d1a5 | 361 | struct dma_async_tx_descriptor txd; |
bfe1d560 DJ |
362 | struct llist_node llnode; |
363 | struct list_head list; | |
364 | int id; | |
0705107f | 365 | int cpu; |
bfe1d560 DJ |
366 | struct idxd_wq *wq; |
367 | }; | |
368 | ||
6b4b87f2 DJ |
369 | /* |
370 | * This is software defined error for the completion status. We overload the error code | |
371 | * that will never appear in completion status and only SWERR register. | |
372 | */ | |
373 | enum idxd_completion_status { | |
374 | IDXD_COMP_DESC_ABORT = 0xff, | |
375 | }; | |
376 | ||
700af3a0 DJ |
377 | #define idxd_confdev(idxd) &idxd->idxd_dev.conf_dev |
378 | #define wq_confdev(wq) &wq->idxd_dev.conf_dev | |
379 | #define engine_confdev(engine) &engine->idxd_dev.conf_dev | |
380 | #define group_confdev(group) &group->idxd_dev.conf_dev | |
381 | #define cdev_dev(cdev) &cdev->idxd_dev.conf_dev | |
382 | ||
383 | #define confdev_to_idxd_dev(dev) container_of(dev, struct idxd_dev, conf_dev) | |
fcc2281b DJ |
384 | #define idxd_dev_to_idxd(idxd_dev) container_of(idxd_dev, struct idxd_device, idxd_dev) |
385 | #define idxd_dev_to_wq(idxd_dev) container_of(idxd_dev, struct idxd_wq, idxd_dev) | |
700af3a0 DJ |
386 | |
387 | static inline struct idxd_device *confdev_to_idxd(struct device *dev) | |
388 | { | |
389 | struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); | |
390 | ||
fcc2281b | 391 | return idxd_dev_to_idxd(idxd_dev); |
700af3a0 DJ |
392 | } |
393 | ||
394 | static inline struct idxd_wq *confdev_to_wq(struct device *dev) | |
395 | { | |
396 | struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); | |
397 | ||
fcc2281b | 398 | return idxd_dev_to_wq(idxd_dev); |
700af3a0 DJ |
399 | } |
400 | ||
401 | static inline struct idxd_engine *confdev_to_engine(struct device *dev) | |
402 | { | |
403 | struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); | |
404 | ||
405 | return container_of(idxd_dev, struct idxd_engine, idxd_dev); | |
406 | } | |
407 | ||
408 | static inline struct idxd_group *confdev_to_group(struct device *dev) | |
409 | { | |
410 | struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); | |
411 | ||
412 | return container_of(idxd_dev, struct idxd_group, idxd_dev); | |
413 | } | |
414 | ||
415 | static inline struct idxd_cdev *dev_to_cdev(struct device *dev) | |
416 | { | |
417 | struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); | |
418 | ||
419 | return container_of(idxd_dev, struct idxd_cdev, idxd_dev); | |
420 | } | |
421 | ||
422 | static inline void idxd_dev_set_type(struct idxd_dev *idev, int type) | |
423 | { | |
424 | if (type >= IDXD_DEV_MAX_TYPE) { | |
425 | idev->type = IDXD_DEV_NONE; | |
426 | return; | |
427 | } | |
428 | ||
429 | idev->type = type; | |
430 | } | |
bfe1d560 | 431 | |
ec0d6423 DJ |
432 | static inline struct idxd_irq_entry *idxd_get_ie(struct idxd_device *idxd, int idx) |
433 | { | |
434 | return (idx == 0) ? &idxd->ie : &idxd->wqs[idx - 1]->ie; | |
435 | } | |
436 | ||
437 | static inline struct idxd_wq *ie_to_wq(struct idxd_irq_entry *ie) | |
438 | { | |
439 | return container_of(ie, struct idxd_wq, ie); | |
440 | } | |
441 | ||
442 | static inline struct idxd_device *ie_to_idxd(struct idxd_irq_entry *ie) | |
443 | { | |
444 | return container_of(ie, struct idxd_device, ie); | |
445 | } | |
446 | ||
42d279f9 DJ |
447 | extern struct bus_type dsa_bus_type; |
448 | ||
8e50d392 | 449 | extern bool support_enqcmd; |
4b73e4eb | 450 | extern struct ida idxd_ida; |
47c16ac2 DJ |
451 | extern struct device_type dsa_device_type; |
452 | extern struct device_type iax_device_type; | |
7c5dd23e | 453 | extern struct device_type idxd_wq_device_type; |
75b91130 | 454 | extern struct device_type idxd_engine_device_type; |
defe49f9 | 455 | extern struct device_type idxd_group_device_type; |
47c16ac2 | 456 | |
fcc2281b | 457 | static inline bool is_dsa_dev(struct idxd_dev *idxd_dev) |
47c16ac2 | 458 | { |
fcc2281b | 459 | return idxd_dev->type == IDXD_DEV_DSA; |
47c16ac2 DJ |
460 | } |
461 | ||
fcc2281b | 462 | static inline bool is_iax_dev(struct idxd_dev *idxd_dev) |
47c16ac2 | 463 | { |
fcc2281b | 464 | return idxd_dev->type == IDXD_DEV_IAX; |
47c16ac2 DJ |
465 | } |
466 | ||
fcc2281b | 467 | static inline bool is_idxd_dev(struct idxd_dev *idxd_dev) |
47c16ac2 | 468 | { |
fcc2281b | 469 | return is_dsa_dev(idxd_dev) || is_iax_dev(idxd_dev); |
47c16ac2 | 470 | } |
8e50d392 | 471 | |
fcc2281b | 472 | static inline bool is_idxd_wq_dev(struct idxd_dev *idxd_dev) |
7c5dd23e | 473 | { |
fcc2281b | 474 | return idxd_dev->type == IDXD_DEV_WQ; |
7c5dd23e DJ |
475 | } |
476 | ||
477 | static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq) | |
478 | { | |
479 | if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0) | |
480 | return true; | |
481 | return false; | |
482 | } | |
483 | ||
6e7f3ee9 | 484 | static inline bool is_idxd_wq_user(struct idxd_wq *wq) |
7c5dd23e DJ |
485 | { |
486 | return wq->type == IDXD_WQT_USER; | |
487 | } | |
488 | ||
6e7f3ee9 DJ |
489 | static inline bool is_idxd_wq_kernel(struct idxd_wq *wq) |
490 | { | |
491 | return wq->type == IDXD_WQT_KERNEL; | |
492 | } | |
493 | ||
bfe1d560 DJ |
494 | static inline bool wq_dedicated(struct idxd_wq *wq) |
495 | { | |
496 | return test_bit(WQ_FLAG_DEDICATED, &wq->flags); | |
497 | } | |
498 | ||
8e50d392 DJ |
499 | static inline bool wq_shared(struct idxd_wq *wq) |
500 | { | |
501 | return !test_bit(WQ_FLAG_DEDICATED, &wq->flags); | |
502 | } | |
503 | ||
504 | static inline bool device_pasid_enabled(struct idxd_device *idxd) | |
505 | { | |
506 | return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); | |
507 | } | |
508 | ||
42a1b738 | 509 | static inline bool device_user_pasid_enabled(struct idxd_device *idxd) |
8e50d392 | 510 | { |
42a1b738 DJ |
511 | return test_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); |
512 | } | |
513 | ||
514 | static inline bool wq_pasid_enabled(struct idxd_wq *wq) | |
515 | { | |
516 | return (is_idxd_wq_kernel(wq) && device_pasid_enabled(wq->idxd)) || | |
517 | (is_idxd_wq_user(wq) && device_user_pasid_enabled(wq->idxd)); | |
518 | } | |
519 | ||
520 | static inline bool wq_shared_supported(struct idxd_wq *wq) | |
521 | { | |
522 | return (support_enqcmd && wq_pasid_enabled(wq)); | |
8e50d392 DJ |
523 | } |
524 | ||
42d279f9 DJ |
525 | enum idxd_portal_prot { |
526 | IDXD_PORTAL_UNLIMITED = 0, | |
527 | IDXD_PORTAL_LIMITED, | |
528 | }; | |
529 | ||
eb15e715 DJ |
530 | enum idxd_interrupt_type { |
531 | IDXD_IRQ_MSIX = 0, | |
532 | IDXD_IRQ_IMS, | |
533 | }; | |
534 | ||
42d279f9 DJ |
535 | static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot) |
536 | { | |
537 | return prot * 0x1000; | |
538 | } | |
539 | ||
540 | static inline int idxd_get_wq_portal_full_offset(int wq_id, | |
541 | enum idxd_portal_prot prot) | |
542 | { | |
543 | return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot); | |
544 | } | |
545 | ||
a9c17152 DJ |
546 | #define IDXD_PORTAL_MASK (PAGE_SIZE - 1) |
547 | ||
548 | /* | |
549 | * Even though this function can be accessed by multiple threads, it is safe to use. | |
550 | * At worst the address gets used more than once before it gets incremented. We don't | |
551 | * hit a threshold until iops becomes many million times a second. So the occasional | |
552 | * reuse of the same address is tolerable compare to using an atomic variable. This is | |
553 | * safe on a system that has atomic load/store for 32bit integers. Given that this is an | |
554 | * Intel iEP device, that should not be a problem. | |
555 | */ | |
556 | static inline void __iomem *idxd_wq_portal_addr(struct idxd_wq *wq) | |
557 | { | |
558 | int ofs = wq->portal_offset; | |
559 | ||
560 | wq->portal_offset = (ofs + sizeof(struct dsa_raw_desc)) & IDXD_PORTAL_MASK; | |
561 | return wq->portal + ofs; | |
562 | } | |
563 | ||
c52ca478 DJ |
564 | static inline void idxd_wq_get(struct idxd_wq *wq) |
565 | { | |
566 | wq->client_count++; | |
567 | } | |
568 | ||
569 | static inline void idxd_wq_put(struct idxd_wq *wq) | |
570 | { | |
571 | wq->client_count--; | |
572 | } | |
573 | ||
574 | static inline int idxd_wq_refcount(struct idxd_wq *wq) | |
575 | { | |
576 | return wq->client_count; | |
577 | }; | |
578 | ||
e8dbd644 XS |
579 | /* |
580 | * Intel IAA does not support batch processing. | |
581 | * The max batch size of device, max batch size of wq and | |
582 | * max batch shift of wqcfg should be always 0 on IAA. | |
583 | */ | |
584 | static inline void idxd_set_max_batch_size(int idxd_type, struct idxd_device *idxd, | |
585 | u32 max_batch_size) | |
586 | { | |
587 | if (idxd_type == IDXD_TYPE_IAX) | |
588 | idxd->max_batch_size = 0; | |
589 | else | |
590 | idxd->max_batch_size = max_batch_size; | |
591 | } | |
592 | ||
593 | static inline void idxd_wq_set_max_batch_size(int idxd_type, struct idxd_wq *wq, | |
594 | u32 max_batch_size) | |
595 | { | |
596 | if (idxd_type == IDXD_TYPE_IAX) | |
597 | wq->max_batch_size = 0; | |
598 | else | |
599 | wq->max_batch_size = max_batch_size; | |
600 | } | |
601 | ||
602 | static inline void idxd_wqcfg_set_max_batch_shift(int idxd_type, union wqcfg *wqcfg, | |
603 | u32 max_batch_shift) | |
604 | { | |
605 | if (idxd_type == IDXD_TYPE_IAX) | |
606 | wqcfg->max_batch_shift = 0; | |
607 | else | |
608 | wqcfg->max_batch_shift = max_batch_shift; | |
609 | } | |
610 | ||
3ecfc913 DJ |
611 | int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv, |
612 | struct module *module, const char *mod_name); | |
613 | #define idxd_driver_register(driver) \ | |
614 | __idxd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) | |
615 | ||
616 | void idxd_driver_unregister(struct idxd_device_driver *idxd_drv); | |
617 | ||
6e7f3ee9 DJ |
618 | #define module_idxd_driver(__idxd_driver) \ |
619 | module_driver(__idxd_driver, idxd_driver_register, idxd_driver_unregister) | |
620 | ||
c52ca478 DJ |
621 | int idxd_register_bus_type(void); |
622 | void idxd_unregister_bus_type(void); | |
47c16ac2 DJ |
623 | int idxd_register_devices(struct idxd_device *idxd); |
624 | void idxd_unregister_devices(struct idxd_device *idxd); | |
c52ca478 DJ |
625 | int idxd_register_driver(void); |
626 | void idxd_unregister_driver(void); | |
5b0c68c4 | 627 | void idxd_wqs_quiesce(struct idxd_device *idxd); |
f6d442f7 | 628 | bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc); |
34ca0066 | 629 | void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count); |
bfe1d560 DJ |
630 | |
631 | /* device interrupt control */ | |
bfe1d560 DJ |
632 | irqreturn_t idxd_misc_thread(int vec, void *data); |
633 | irqreturn_t idxd_wq_thread(int irq, void *data); | |
634 | void idxd_mask_error_interrupts(struct idxd_device *idxd); | |
635 | void idxd_unmask_error_interrupts(struct idxd_device *idxd); | |
bfe1d560 DJ |
636 | |
637 | /* device control */ | |
034b3290 DJ |
638 | int idxd_register_idxd_drv(void); |
639 | void idxd_unregister_idxd_drv(void); | |
bd42805b | 640 | int idxd_device_drv_probe(struct idxd_dev *idxd_dev); |
745e92a6 | 641 | void idxd_device_drv_remove(struct idxd_dev *idxd_dev); |
1f2bb403 | 642 | int drv_enable_wq(struct idxd_wq *wq); |
69e4f8be | 643 | void drv_disable_wq(struct idxd_wq *wq); |
89e3becd | 644 | int idxd_device_init_reset(struct idxd_device *idxd); |
bfe1d560 DJ |
645 | int idxd_device_enable(struct idxd_device *idxd); |
646 | int idxd_device_disable(struct idxd_device *idxd); | |
0d5c10b4 | 647 | void idxd_device_reset(struct idxd_device *idxd); |
0dcfe41e | 648 | void idxd_device_clear_state(struct idxd_device *idxd); |
bfe1d560 | 649 | int idxd_device_config(struct idxd_device *idxd); |
8e50d392 | 650 | void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid); |
8c66bbdc | 651 | int idxd_device_load_config(struct idxd_device *idxd); |
eb15e715 DJ |
652 | int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle, |
653 | enum idxd_interrupt_type irq_type); | |
654 | int idxd_device_release_int_handle(struct idxd_device *idxd, int handle, | |
655 | enum idxd_interrupt_type irq_type); | |
bfe1d560 DJ |
656 | |
657 | /* work queue control */ | |
5b0c68c4 | 658 | void idxd_wqs_unmap_portal(struct idxd_device *idxd); |
bfe1d560 DJ |
659 | int idxd_wq_alloc_resources(struct idxd_wq *wq); |
660 | void idxd_wq_free_resources(struct idxd_wq *wq); | |
661 | int idxd_wq_enable(struct idxd_wq *wq); | |
0dcfe41e | 662 | int idxd_wq_disable(struct idxd_wq *wq, bool reset_config); |
0d5c10b4 | 663 | void idxd_wq_drain(struct idxd_wq *wq); |
ea9aadc0 | 664 | void idxd_wq_reset(struct idxd_wq *wq); |
c52ca478 DJ |
665 | int idxd_wq_map_portal(struct idxd_wq *wq); |
666 | void idxd_wq_unmap_portal(struct idxd_wq *wq); | |
8e50d392 DJ |
667 | int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid); |
668 | int idxd_wq_disable_pasid(struct idxd_wq *wq); | |
bd5970a0 | 669 | void __idxd_wq_quiesce(struct idxd_wq *wq); |
93a40a6d DJ |
670 | void idxd_wq_quiesce(struct idxd_wq *wq); |
671 | int idxd_wq_init_percpu_ref(struct idxd_wq *wq); | |
403a2e23 DJ |
672 | void idxd_wq_free_irq(struct idxd_wq *wq); |
673 | int idxd_wq_request_irq(struct idxd_wq *wq); | |
bfe1d560 | 674 | |
d1dfe5b8 DJ |
675 | /* submission */ |
676 | int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc); | |
677 | struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype); | |
678 | void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc); | |
7930d855 | 679 | int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc); |
d1dfe5b8 | 680 | |
8f47d1a5 DJ |
681 | /* dmaengine */ |
682 | int idxd_register_dma_device(struct idxd_device *idxd); | |
683 | void idxd_unregister_dma_device(struct idxd_device *idxd); | |
8f47d1a5 DJ |
684 | void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res); |
685 | void idxd_dma_complete_txd(struct idxd_desc *desc, | |
5d78abb6 | 686 | enum idxd_complete_type comp_type, bool free_desc); |
8f47d1a5 | 687 | |
42d279f9 DJ |
688 | /* cdev */ |
689 | int idxd_cdev_register(void); | |
690 | void idxd_cdev_remove(void); | |
691 | int idxd_cdev_get_major(struct idxd_device *idxd); | |
692 | int idxd_wq_add_cdev(struct idxd_wq *wq); | |
693 | void idxd_wq_del_cdev(struct idxd_wq *wq); | |
694 | ||
81dd4d4d TZ |
695 | /* perfmon */ |
696 | #if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON) | |
697 | int perfmon_pmu_init(struct idxd_device *idxd); | |
698 | void perfmon_pmu_remove(struct idxd_device *idxd); | |
699 | void perfmon_counter_overflow(struct idxd_device *idxd); | |
700 | void perfmon_init(void); | |
701 | void perfmon_exit(void); | |
702 | #else | |
703 | static inline int perfmon_pmu_init(struct idxd_device *idxd) { return 0; } | |
704 | static inline void perfmon_pmu_remove(struct idxd_device *idxd) {} | |
705 | static inline void perfmon_counter_overflow(struct idxd_device *idxd) {} | |
706 | static inline void perfmon_init(void) {} | |
707 | static inline void perfmon_exit(void) {} | |
708 | #endif | |
709 | ||
5fbe6503 DJ |
710 | /* debugfs */ |
711 | int idxd_device_init_debugfs(struct idxd_device *idxd); | |
712 | void idxd_device_remove_debugfs(struct idxd_device *idxd); | |
713 | int idxd_init_debugfs(void); | |
714 | void idxd_remove_debugfs(void); | |
715 | ||
bfe1d560 | 716 | #endif |