Commit | Line | Data |
---|---|---|
37d63f8f CW |
1 | /* |
2 | * SPDX-License-Identifier: MIT | |
3 | * | |
4 | * Copyright © 2016 Intel Corporation | |
5 | */ | |
6 | ||
7 | #ifndef I915_SCATTERLIST_H | |
8 | #define I915_SCATTERLIST_H | |
9 | ||
10 | #include <linux/pfn.h> | |
11 | #include <linux/scatterlist.h> | |
78a07fe7 RB |
12 | #include <linux/dma-mapping.h> |
13 | #include <xen/xen.h> | |
37d63f8f CW |
14 | |
15 | #include "i915_gem.h" | |
16 | ||
d1487389 | 17 | struct drm_mm_node; |
f701b16d | 18 | struct ttm_resource; |
d1487389 | 19 | |
37d63f8f CW |
20 | /* |
21 | * Optimised SGL iterator for GEM objects | |
22 | */ | |
23 | static __always_inline struct sgt_iter { | |
24 | struct scatterlist *sgp; | |
25 | union { | |
26 | unsigned long pfn; | |
27 | dma_addr_t dma; | |
28 | }; | |
29 | unsigned int curr; | |
30 | unsigned int max; | |
31 | } __sgt_iter(struct scatterlist *sgl, bool dma) { | |
32 | struct sgt_iter s = { .sgp = sgl }; | |
33 | ||
8a473dba TU |
34 | if (dma && s.sgp && sg_dma_len(s.sgp) == 0) { |
35 | s.sgp = NULL; | |
36 | } else if (s.sgp) { | |
37d63f8f | 37 | s.max = s.curr = s.sgp->offset; |
8a473dba | 38 | if (dma) { |
37d63f8f | 39 | s.dma = sg_dma_address(s.sgp); |
8a473dba TU |
40 | s.max += sg_dma_len(s.sgp); |
41 | } else { | |
37d63f8f | 42 | s.pfn = page_to_pfn(sg_page(s.sgp)); |
8a473dba TU |
43 | s.max += s.sgp->length; |
44 | } | |
37d63f8f CW |
45 | } |
46 | ||
47 | return s; | |
48 | } | |
49 | ||
50 | static inline int __sg_page_count(const struct scatterlist *sg) | |
51 | { | |
52 | return sg->length >> PAGE_SHIFT; | |
53 | } | |
54 | ||
934941ed TU |
55 | static inline int __sg_dma_page_count(const struct scatterlist *sg) |
56 | { | |
57 | return sg_dma_len(sg) >> PAGE_SHIFT; | |
58 | } | |
59 | ||
37d63f8f CW |
60 | static inline struct scatterlist *____sg_next(struct scatterlist *sg) |
61 | { | |
62 | ++sg; | |
63 | if (unlikely(sg_is_chain(sg))) | |
64 | sg = sg_chain_ptr(sg); | |
65 | return sg; | |
66 | } | |
67 | ||
68 | /** | |
69 | * __sg_next - return the next scatterlist entry in a list | |
70 | * @sg: The current sg entry | |
71 | * | |
72 | * Description: | |
73 | * If the entry is the last, return NULL; otherwise, step to the next | |
74 | * element in the array (@sg@+1). If that's a chain pointer, follow it; | |
75 | * otherwise just return the pointer to the current element. | |
76 | **/ | |
77 | static inline struct scatterlist *__sg_next(struct scatterlist *sg) | |
78 | { | |
79 | return sg_is_last(sg) ? NULL : ____sg_next(sg); | |
80 | } | |
81 | ||
82 | /** | |
31444afb MA |
83 | * __for_each_sgt_daddr - iterate over the device addresses of the given sg_table |
84 | * @__dp: Device address (output) | |
37d63f8f CW |
85 | * @__iter: 'struct sgt_iter' (iterator state, internal) |
86 | * @__sgt: sg_table to iterate over (input) | |
87 | * @__step: step size | |
88 | */ | |
31444afb | 89 | #define __for_each_sgt_daddr(__dp, __iter, __sgt, __step) \ |
37d63f8f | 90 | for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ |
31444afb | 91 | ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp; \ |
37d63f8f CW |
92 | (((__iter).curr += (__step)) >= (__iter).max) ? \ |
93 | (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0) | |
0e514878 ND |
94 | /** |
95 | * __for_each_daddr_next - iterates over the device addresses with pre-initialized iterator. | |
96 | * @__dp: Device address (output) | |
97 | * @__iter: 'struct sgt_iter' (iterator state, external) | |
98 | * @__step: step size | |
99 | */ | |
100 | #define __for_each_daddr_next(__dp, __iter, __step) \ | |
101 | for (; ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp; \ | |
102 | (((__iter).curr += (__step)) >= (__iter).max) ? \ | |
103 | (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0) | |
37d63f8f CW |
104 | |
105 | /** | |
106 | * for_each_sgt_page - iterate over the pages of the given sg_table | |
107 | * @__pp: page pointer (output) | |
108 | * @__iter: 'struct sgt_iter' (iterator state, internal) | |
109 | * @__sgt: sg_table to iterate over (input) | |
110 | */ | |
111 | #define for_each_sgt_page(__pp, __iter, __sgt) \ | |
112 | for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ | |
113 | ((__pp) = (__iter).pfn == 0 ? NULL : \ | |
114 | pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ | |
115 | (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ | |
116 | (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0) | |
117 | ||
62445a97 TH |
118 | /** |
119 | * i915_sg_dma_sizes - Record the dma segment sizes of a scatterlist | |
120 | * @sg: The scatterlist | |
121 | * | |
122 | * Return: An unsigned int with segment sizes logically or'ed together. | |
123 | * A caller can use this information to determine what hardware page table | |
124 | * entry sizes can be used to map the memory represented by the scatterlist. | |
125 | */ | |
126 | static inline unsigned int i915_sg_dma_sizes(struct scatterlist *sg) | |
37d63f8f CW |
127 | { |
128 | unsigned int page_sizes; | |
129 | ||
130 | page_sizes = 0; | |
62445a97 | 131 | while (sg && sg_dma_len(sg)) { |
37d63f8f | 132 | GEM_BUG_ON(sg->offset); |
62445a97 TH |
133 | GEM_BUG_ON(!IS_ALIGNED(sg_dma_len(sg), PAGE_SIZE)); |
134 | page_sizes |= sg_dma_len(sg); | |
37d63f8f CW |
135 | sg = __sg_next(sg); |
136 | } | |
137 | ||
138 | return page_sizes; | |
139 | } | |
140 | ||
78a07fe7 | 141 | static inline unsigned int i915_sg_segment_size(struct device *dev) |
37d63f8f | 142 | { |
78a07fe7 RB |
143 | size_t max = min_t(size_t, UINT_MAX, dma_max_mapping_size(dev)); |
144 | ||
145 | /* | |
146 | * For Xen PV guests pages aren't contiguous in DMA (machine) address | |
147 | * space. The DMA API takes care of that both in dma_alloc_* (by | |
148 | * calling into the hypervisor to make the pages contiguous) and in | |
149 | * dma_map_* (by bounce buffering). But i915 abuses ignores the | |
150 | * coherency aspects of the DMA API and thus can't cope with bounce | |
151 | * buffering actually happening, so add a hack here to force small | |
152 | * allocations and mappings when running in PV mode on Xen. | |
153 | * | |
154 | * Note this will still break if bounce buffering is required for other | |
155 | * reasons, like confidential computing hypervisors or PCIe root ports | |
156 | * with addressing limitations. | |
157 | */ | |
158 | if (xen_pv_domain()) | |
159 | max = PAGE_SIZE; | |
160 | return round_down(max, PAGE_SIZE); | |
37d63f8f CW |
161 | } |
162 | ||
163 | bool i915_sg_trim(struct sg_table *orig_st); | |
164 | ||
cad7109a TH |
165 | /** |
166 | * struct i915_refct_sgt_ops - Operations structure for struct i915_refct_sgt | |
167 | */ | |
168 | struct i915_refct_sgt_ops { | |
169 | /** | |
9d7fe94d | 170 | * @release: Free the memory of the struct i915_refct_sgt |
cad7109a TH |
171 | */ |
172 | void (*release)(struct kref *ref); | |
173 | }; | |
174 | ||
175 | /** | |
176 | * struct i915_refct_sgt - A refcounted scatter-gather table | |
177 | * @kref: struct kref for refcounting | |
178 | * @table: struct sg_table holding the scatter-gather table itself. Note that | |
179 | * @table->sgl = NULL can be used to determine whether a scatter-gather table | |
180 | * is present or not. | |
181 | * @size: The size in bytes of the underlying memory buffer | |
182 | * @ops: The operations structure. | |
183 | */ | |
184 | struct i915_refct_sgt { | |
185 | struct kref kref; | |
186 | struct sg_table table; | |
187 | size_t size; | |
188 | const struct i915_refct_sgt_ops *ops; | |
189 | }; | |
190 | ||
191 | /** | |
192 | * i915_refct_sgt_put - Put a refcounted sg-table | |
8b2a7394 | 193 | * @rsgt: the struct i915_refct_sgt to put. |
cad7109a TH |
194 | */ |
195 | static inline void i915_refct_sgt_put(struct i915_refct_sgt *rsgt) | |
196 | { | |
197 | if (rsgt) | |
198 | kref_put(&rsgt->kref, rsgt->ops->release); | |
199 | } | |
200 | ||
201 | /** | |
202 | * i915_refct_sgt_get - Get a refcounted sg-table | |
8b2a7394 | 203 | * @rsgt: the struct i915_refct_sgt to get. |
cad7109a TH |
204 | */ |
205 | static inline struct i915_refct_sgt * | |
206 | i915_refct_sgt_get(struct i915_refct_sgt *rsgt) | |
207 | { | |
208 | kref_get(&rsgt->kref); | |
209 | return rsgt; | |
210 | } | |
211 | ||
212 | /** | |
213 | * __i915_refct_sgt_init - Initialize a refcounted sg-list with a custom | |
214 | * operations structure | |
8b2a7394 | 215 | * @rsgt: The struct i915_refct_sgt to initialize. |
cad7109a TH |
216 | * @size: Size in bytes of the underlying memory buffer. |
217 | * @ops: A customized operations structure in case the refcounted sg-list | |
218 | * is embedded into another structure. | |
219 | */ | |
220 | static inline void __i915_refct_sgt_init(struct i915_refct_sgt *rsgt, | |
221 | size_t size, | |
222 | const struct i915_refct_sgt_ops *ops) | |
223 | { | |
224 | kref_init(&rsgt->kref); | |
225 | rsgt->table.sgl = NULL; | |
226 | rsgt->size = size; | |
227 | rsgt->ops = ops; | |
228 | } | |
229 | ||
230 | void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size); | |
231 | ||
232 | struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, | |
bc99f120 | 233 | u64 region_start, |
9306b2b2 | 234 | u32 page_alignment); |
f701b16d | 235 | |
cad7109a | 236 | struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, |
bc99f120 | 237 | u64 region_start, |
9306b2b2 | 238 | u32 page_alignment); |
f701b16d | 239 | |
37d63f8f | 240 | #endif |