Merge tag 'cxl-fixes-6.10-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_vma_resource.h
CommitLineData
e1a4bbb6
TH
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#ifndef __I915_VMA_RESOURCE_H__
7#define __I915_VMA_RESOURCE_H__
8
9#include <linux/dma-fence.h>
10#include <linux/refcount.h>
11
39a2bd34 12#include "i915_gem.h"
60dc43d1 13#include "i915_scatterlist.h"
2f6b90da
TH
14#include "i915_sw_fence.h"
15#include "intel_runtime_pm.h"
39a2bd34 16
60dc43d1
TH
17struct intel_memory_region;
18
39a2bd34
TH
19struct i915_page_sizes {
20 /**
21 * The sg mask of the pages sg_table. i.e the mask of
22 * the lengths for each sg entry.
23 */
24 unsigned int phys;
25
26 /**
27 * The gtt page sizes we are allowed to use given the
28 * sg mask and the supported page sizes. This will
29 * express the smallest unit we can use for the whole
30 * object, as well as the larger sizes we may be able
31 * to use opportunistically.
32 */
33 unsigned int sg;
34};
35
e9711213
JN
36/**
37 * struct i915_vma_bindinfo - Information needed for async bind
38 * only but that can be dropped after the bind has taken place.
39 * Consider making this a separate argument to the bind_vma
40 * op, coalescing with other arguments like vm, stash, cache_level
41 * and flags
42 * @pages: The pages sg-table.
43 * @page_sizes: Page sizes of the pages.
44 * @pages_rsgt: Refcounted sg-table when delayed object destruction
45 * is supported. May be NULL.
46 * @readonly: Whether the vma should be bound read-only.
47 * @lmem: Whether the vma points to lmem.
48 */
49struct i915_vma_bindinfo {
50 struct sg_table *pages;
51 struct i915_page_sizes page_sizes;
52 struct i915_refct_sgt *pages_rsgt;
53 bool readonly:1;
54 bool lmem:1;
55};
56
e1a4bbb6
TH
57/**
58 * struct i915_vma_resource - Snapshotted unbind information.
59 * @unbind_fence: Fence to mark unbinding complete. Note that this fence
60 * is not considered published until unbind is scheduled, and as such it
61 * is illegal to access this fence before scheduled unbind other than
62 * for refcounting.
63 * @lock: The @unbind_fence lock.
64 * @hold_count: Number of holders blocking the fence from finishing.
65 * The vma itself is keeping a hold, which is released when unbind
66 * is scheduled.
2f6b90da
TH
67 * @work: Work struct for deferred unbind work.
68 * @chain: Pointer to struct i915_sw_fence used to await dependencies.
69 * @rb: Rb node for the vm's pending unbind interval tree.
70 * @__subtree_last: Interval tree private member.
28487ecb 71 * @wakeref: wakeref.
2f6b90da
TH
72 * @vm: non-refcounted pointer to the vm. This is for internal use only and
73 * this member is cleared after vm_resource unbind.
60dc43d1 74 * @mr: The memory region of the object pointed to by the vma.
2f6b90da 75 * @ops: Pointer to the backend i915_vma_ops.
39a2bd34 76 * @private: Bind backend private info.
8e4ee5e8
CW
77 * @start: Offset into the address space of bind range start. Note that
78 * this is after any padding that might have been allocated.
79 * @node_size: Size of the allocated range manager node with padding
80 * subtracted.
39a2bd34 81 * @vma_size: Bind size.
61102251 82 * @guard: The size of guard area preceding and trailing the bind.
39a2bd34
TH
83 * @page_sizes_gtt: Resulting page sizes from the bind operation.
84 * @bound_flags: Flags indicating binding status.
85 * @allocated: Backend private data. TODO: Should move into @private.
60dc43d1
TH
86 * @immediate_unbind: Unbind can be done immediately and doesn't need to be
87 * deferred to a work item awaiting unsignaled fences. This is a hack.
88 * (dma_fence_work uses a fence flag for this, but this seems slightly
89 * cleaner).
e1a7ab4f
TH
90 * @needs_wakeref: Whether a wakeref is needed during unbind. Since we can't
91 * take a wakeref in the dma-fence signalling critical path, it needs to be
92 * taken when the unbind is scheduled.
93 * @skip_pte_rewrite: During ggtt suspend and vm takedown pte rewriting
94 * needs to be skipped for unbind.
5d36acb7 95 * @tlb: pointer for obj->mm.tlb, if async unbind. Otherwise, NULL
e1a4bbb6
TH
96 *
97 * The lifetime of a struct i915_vma_resource is from a binding request to
98 * the actual possible asynchronous unbind has completed.
99 */
100struct i915_vma_resource {
101 struct dma_fence unbind_fence;
102 /* See above for description of the lock. */
103 spinlock_t lock;
104 refcount_t hold_count;
2f6b90da
TH
105 struct work_struct work;
106 struct i915_sw_fence chain;
107 struct rb_node rb;
108 u64 __subtree_last;
109 struct i915_address_space *vm;
110 intel_wakeref_t wakeref;
39a2bd34
TH
111
112 /**
e9711213
JN
113 * @bi: Information needed for async bind only but that can be dropped
114 * after the bind has taken place.
115 *
116 * Consider making this a separate argument to the bind_vma op,
117 * coalescing with other arguments like vm, stash, cache_level and flags
39a2bd34 118 */
e9711213 119 struct i915_vma_bindinfo bi;
39a2bd34 120
60dc43d1
TH
121#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
122 struct intel_memory_region *mr;
123#endif
2f6b90da 124 const struct i915_vma_ops *ops;
39a2bd34
TH
125 void *private;
126 u64 start;
127 u64 node_size;
128 u64 vma_size;
61102251 129 u32 guard;
39a2bd34 130 u32 page_sizes_gtt;
2f6b90da 131
39a2bd34
TH
132 u32 bound_flags;
133 bool allocated:1;
2f6b90da
TH
134 bool immediate_unbind:1;
135 bool needs_wakeref:1;
e1a7ab4f 136 bool skip_pte_rewrite:1;
5d36acb7
CW
137
138 u32 *tlb;
e1a4bbb6
TH
139};
140
141bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
142 bool *lockdep_cookie);
143
144void i915_vma_resource_unhold(struct i915_vma_resource *vma_res,
145 bool lockdep_cookie);
146
147struct i915_vma_resource *i915_vma_resource_alloc(void);
148
2f6b90da
TH
149void i915_vma_resource_free(struct i915_vma_resource *vma_res);
150
5d36acb7
CW
151struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
152 u32 *tlb);
e1a4bbb6 153
39a2bd34
TH
154void __i915_vma_resource_init(struct i915_vma_resource *vma_res);
155
e1a4bbb6
TH
156/**
157 * i915_vma_resource_get - Take a reference on a vma resource
158 * @vma_res: The vma resource on which to take a reference.
159 *
160 * Return: The @vma_res pointer
161 */
162static inline struct i915_vma_resource
163*i915_vma_resource_get(struct i915_vma_resource *vma_res)
164{
165 dma_fence_get(&vma_res->unbind_fence);
166 return vma_res;
167}
168
169/**
170 * i915_vma_resource_put - Release a reference to a struct i915_vma_resource
171 * @vma_res: The resource
172 */
173static inline void i915_vma_resource_put(struct i915_vma_resource *vma_res)
174{
175 dma_fence_put(&vma_res->unbind_fence);
176}
177
39a2bd34
TH
178/**
179 * i915_vma_resource_init - Initialize a vma resource.
180 * @vma_res: The vma resource to initialize
2f6b90da 181 * @vm: Pointer to the vm.
39a2bd34
TH
182 * @pages: The pages sg-table.
183 * @page_sizes: Page sizes of the pages.
60dc43d1
TH
184 * @pages_rsgt: Pointer to a struct i915_refct_sgt of an object with
185 * delayed destruction.
39a2bd34
TH
186 * @readonly: Whether the vma should be bound read-only.
187 * @lmem: Whether the vma points to lmem.
60dc43d1 188 * @mr: The memory region of the object the vma points to.
2f6b90da 189 * @ops: The backend ops.
39a2bd34 190 * @private: Bind backend private info.
8e4ee5e8
CW
191 * @start: Offset into the address space of bind range start after padding.
192 * @node_size: Size of the allocated range manager node minus padding.
39a2bd34 193 * @size: Bind size.
61102251 194 * @guard: The size of the guard area preceding and trailing the bind.
39a2bd34
TH
195 *
196 * Initializes a vma resource allocated using i915_vma_resource_alloc().
197 * The reason for having separate allocate and initialize function is that
198 * initialization may need to be performed from under a lock where
199 * allocation is not allowed.
200 */
201static inline void i915_vma_resource_init(struct i915_vma_resource *vma_res,
2f6b90da 202 struct i915_address_space *vm,
39a2bd34
TH
203 struct sg_table *pages,
204 const struct i915_page_sizes *page_sizes,
60dc43d1 205 struct i915_refct_sgt *pages_rsgt,
39a2bd34
TH
206 bool readonly,
207 bool lmem,
60dc43d1 208 struct intel_memory_region *mr,
2f6b90da 209 const struct i915_vma_ops *ops,
39a2bd34
TH
210 void *private,
211 u64 start,
212 u64 node_size,
61102251
CW
213 u64 size,
214 u32 guard)
39a2bd34
TH
215{
216 __i915_vma_resource_init(vma_res);
2f6b90da 217 vma_res->vm = vm;
39a2bd34
TH
218 vma_res->bi.pages = pages;
219 vma_res->bi.page_sizes = *page_sizes;
60dc43d1
TH
220 if (pages_rsgt)
221 vma_res->bi.pages_rsgt = i915_refct_sgt_get(pages_rsgt);
39a2bd34
TH
222 vma_res->bi.readonly = readonly;
223 vma_res->bi.lmem = lmem;
60dc43d1
TH
224#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
225 vma_res->mr = mr;
226#endif
2f6b90da 227 vma_res->ops = ops;
39a2bd34
TH
228 vma_res->private = private;
229 vma_res->start = start;
230 vma_res->node_size = node_size;
231 vma_res->vma_size = size;
61102251 232 vma_res->guard = guard;
39a2bd34
TH
233}
234
235static inline void i915_vma_resource_fini(struct i915_vma_resource *vma_res)
236{
237 GEM_BUG_ON(refcount_read(&vma_res->hold_count) != 1);
60dc43d1
TH
238 if (vma_res->bi.pages_rsgt)
239 i915_refct_sgt_put(vma_res->bi.pages_rsgt);
2f6b90da 240 i915_sw_fence_fini(&vma_res->chain);
39a2bd34 241}
e1a4bbb6 242
2f6b90da
TH
243int i915_vma_resource_bind_dep_sync(struct i915_address_space *vm,
244 u64 first,
245 u64 last,
246 bool intr);
247
248int i915_vma_resource_bind_dep_await(struct i915_address_space *vm,
249 struct i915_sw_fence *sw_fence,
250 u64 first,
251 u64 last,
252 bool intr,
253 gfp_t gfp);
254
255void i915_vma_resource_bind_dep_sync_all(struct i915_address_space *vm);
256
257void i915_vma_resource_module_exit(void);
258
259int i915_vma_resource_module_init(void);
260
e1a4bbb6 261#endif