Commit | Line | Data |
---|---|---|
2c86e55d MA |
1 | // SPDX-License-Identifier: MIT |
2 | /* | |
3 | * Copyright © 2020 Intel Corporation | |
4 | */ | |
5 | ||
6 | #include <linux/log2.h> | |
7 | ||
b508d01f JN |
8 | #include "gem/i915_gem_internal.h" |
9 | ||
2c86e55d MA |
10 | #include "gen6_ppgtt.h" |
11 | #include "i915_scatterlist.h" | |
12 | #include "i915_trace.h" | |
13 | #include "i915_vgpu.h" | |
0d6419e9 | 14 | #include "intel_gt_regs.h" |
202b1f4c | 15 | #include "intel_engine_regs.h" |
2c86e55d MA |
16 | #include "intel_gt.h" |
17 | ||
18 | /* Write pde (index) from the page directory @pd to the page table @pt */ | |
9834dfef CW |
19 | static void gen6_write_pde(const struct gen6_ppgtt *ppgtt, |
20 | const unsigned int pde, | |
21 | const struct i915_page_table *pt) | |
2c86e55d | 22 | { |
89351925 CW |
23 | dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]); |
24 | ||
2c86e55d | 25 | /* Caller needs to make sure the write completes if necessary */ |
89351925 | 26 | iowrite32(GEN6_PDE_ADDR_ENCODE(addr) | GEN6_PDE_VALID, |
2c86e55d MA |
27 | ppgtt->pd_addr + pde); |
28 | } | |
29 | ||
30 | void gen7_ppgtt_enable(struct intel_gt *gt) | |
31 | { | |
32 | struct drm_i915_private *i915 = gt->i915; | |
33 | struct intel_uncore *uncore = gt->uncore; | |
2c86e55d MA |
34 | u32 ecochk; |
35 | ||
36 | intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B); | |
37 | ||
38 | ecochk = intel_uncore_read(uncore, GAM_ECOCHK); | |
39 | if (IS_HASWELL(i915)) { | |
40 | ecochk |= ECOCHK_PPGTT_WB_HSW; | |
41 | } else { | |
42 | ecochk |= ECOCHK_PPGTT_LLC_IVB; | |
43 | ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; | |
44 | } | |
45 | intel_uncore_write(uncore, GAM_ECOCHK, ecochk); | |
2c86e55d MA |
46 | } |
47 | ||
48 | void gen6_ppgtt_enable(struct intel_gt *gt) | |
49 | { | |
50 | struct intel_uncore *uncore = gt->uncore; | |
51 | ||
52 | intel_uncore_rmw(uncore, | |
53 | GAC_ECO_BITS, | |
54 | 0, | |
55 | ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B); | |
56 | ||
57 | intel_uncore_rmw(uncore, | |
58 | GAB_CTL, | |
59 | 0, | |
60 | GAB_CTL_CONT_AFTER_PAGEFAULT); | |
61 | ||
62 | intel_uncore_rmw(uncore, | |
63 | GAM_ECOCHK, | |
64 | 0, | |
65 | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); | |
66 | ||
67 | if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */ | |
68 | intel_uncore_write(uncore, | |
69 | GFX_MODE, | |
70 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); | |
71 | } | |
72 | ||
73 | /* PPGTT support for Sandybdrige/Gen6 and later */ | |
74 | static void gen6_ppgtt_clear_range(struct i915_address_space *vm, | |
75 | u64 start, u64 length) | |
76 | { | |
77 | struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); | |
78 | const unsigned int first_entry = start / I915_GTT_PAGE_SIZE; | |
89351925 | 79 | const gen6_pte_t scratch_pte = vm->scratch[0]->encode; |
2c86e55d MA |
80 | unsigned int pde = first_entry / GEN6_PTES; |
81 | unsigned int pte = first_entry % GEN6_PTES; | |
82 | unsigned int num_entries = length / I915_GTT_PAGE_SIZE; | |
83 | ||
84 | while (num_entries) { | |
85 | struct i915_page_table * const pt = | |
86 | i915_pt_entry(ppgtt->base.pd, pde++); | |
87 | const unsigned int count = min(num_entries, GEN6_PTES - pte); | |
88 | gen6_pte_t *vaddr; | |
89 | ||
2c86e55d MA |
90 | num_entries -= count; |
91 | ||
92 | GEM_BUG_ON(count > atomic_read(&pt->used)); | |
93 | if (!atomic_sub_return(count, &pt->used)) | |
94 | ppgtt->scan_for_unused_pt = true; | |
95 | ||
96 | /* | |
97 | * Note that the hw doesn't support removing PDE on the fly | |
98 | * (they are cached inside the context with no means to | |
99 | * invalidate the cache), so we can only reset the PTE | |
100 | * entries back to scratch. | |
101 | */ | |
102 | ||
529b9ec8 | 103 | vaddr = px_vaddr(pt); |
2c86e55d | 104 | memset32(vaddr + pte, scratch_pte, count); |
2c86e55d MA |
105 | |
106 | pte = 0; | |
107 | } | |
108 | } | |
109 | ||
110 | static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, | |
39a2bd34 | 111 | struct i915_vma_resource *vma_res, |
2c86e55d MA |
112 | enum i915_cache_level cache_level, |
113 | u32 flags) | |
114 | { | |
115 | struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); | |
116 | struct i915_page_directory * const pd = ppgtt->pd; | |
39a2bd34 | 117 | unsigned int first_entry = vma_res->start / I915_GTT_PAGE_SIZE; |
2c86e55d MA |
118 | unsigned int act_pt = first_entry / GEN6_PTES; |
119 | unsigned int act_pte = first_entry % GEN6_PTES; | |
120 | const u32 pte_encode = vm->pte_encode(0, cache_level, flags); | |
39a2bd34 | 121 | struct sgt_dma iter = sgt_dma(vma_res); |
2c86e55d MA |
122 | gen6_pte_t *vaddr; |
123 | ||
89351925 | 124 | GEM_BUG_ON(!pd->entry[act_pt]); |
2c86e55d | 125 | |
529b9ec8 | 126 | vaddr = px_vaddr(i915_pt_entry(pd, act_pt)); |
2c86e55d | 127 | do { |
8a473dba | 128 | GEM_BUG_ON(sg_dma_len(iter.sg) < I915_GTT_PAGE_SIZE); |
2c86e55d MA |
129 | vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); |
130 | ||
131 | iter.dma += I915_GTT_PAGE_SIZE; | |
132 | if (iter.dma == iter.max) { | |
133 | iter.sg = __sg_next(iter.sg); | |
8a473dba | 134 | if (!iter.sg || sg_dma_len(iter.sg) == 0) |
2c86e55d MA |
135 | break; |
136 | ||
137 | iter.dma = sg_dma_address(iter.sg); | |
8a473dba | 138 | iter.max = iter.dma + sg_dma_len(iter.sg); |
2c86e55d MA |
139 | } |
140 | ||
141 | if (++act_pte == GEN6_PTES) { | |
529b9ec8 | 142 | vaddr = px_vaddr(i915_pt_entry(pd, ++act_pt)); |
2c86e55d MA |
143 | act_pte = 0; |
144 | } | |
145 | } while (1); | |
2c86e55d | 146 | |
39a2bd34 | 147 | vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE; |
2c86e55d MA |
148 | } |
149 | ||
150 | static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end) | |
151 | { | |
152 | struct i915_page_directory * const pd = ppgtt->base.pd; | |
153 | struct i915_page_table *pt; | |
154 | unsigned int pde; | |
155 | ||
156 | start = round_down(start, SZ_64K); | |
157 | end = round_up(end, SZ_64K) - start; | |
158 | ||
159 | mutex_lock(&ppgtt->flush); | |
160 | ||
161 | gen6_for_each_pde(pt, pd, start, end, pde) | |
162 | gen6_write_pde(ppgtt, pde, pt); | |
163 | ||
164 | mb(); | |
165 | ioread32(ppgtt->pd_addr + pde - 1); | |
166 | gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt); | |
167 | mb(); | |
168 | ||
169 | mutex_unlock(&ppgtt->flush); | |
170 | } | |
171 | ||
cd0452aa CW |
172 | static void gen6_alloc_va_range(struct i915_address_space *vm, |
173 | struct i915_vm_pt_stash *stash, | |
174 | u64 start, u64 length) | |
2c86e55d MA |
175 | { |
176 | struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); | |
177 | struct i915_page_directory * const pd = ppgtt->base.pd; | |
cd0452aa | 178 | struct i915_page_table *pt; |
b297bde1 | 179 | bool flush = false; |
2c86e55d MA |
180 | u64 from = start; |
181 | unsigned int pde; | |
2c86e55d | 182 | |
2c86e55d MA |
183 | spin_lock(&pd->lock); |
184 | gen6_for_each_pde(pt, pd, start, length, pde) { | |
185 | const unsigned int count = gen6_pte_count(start, length); | |
186 | ||
89351925 | 187 | if (!pt) { |
2c86e55d MA |
188 | spin_unlock(&pd->lock); |
189 | ||
cd0452aa | 190 | pt = stash->pt[0]; |
89351925 | 191 | __i915_gem_object_pin_pages(pt->base); |
2c86e55d | 192 | |
89351925 | 193 | fill32_px(pt, vm->scratch[0]->encode); |
2c86e55d MA |
194 | |
195 | spin_lock(&pd->lock); | |
89351925 | 196 | if (!pd->entry[pde]) { |
cd0452aa CW |
197 | stash->pt[0] = pt->stash; |
198 | atomic_set(&pt->used, 0); | |
2c86e55d MA |
199 | pd->entry[pde] = pt; |
200 | } else { | |
2c86e55d MA |
201 | pt = pd->entry[pde]; |
202 | } | |
b297bde1 CW |
203 | |
204 | flush = true; | |
2c86e55d MA |
205 | } |
206 | ||
207 | atomic_add(count, &pt->used); | |
208 | } | |
209 | spin_unlock(&pd->lock); | |
210 | ||
b297bde1 CW |
211 | if (flush && i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) { |
212 | intel_wakeref_t wakeref; | |
213 | ||
214 | with_intel_runtime_pm(&vm->i915->runtime_pm, wakeref) | |
215 | gen6_flush_pd(ppgtt, from, start); | |
216 | } | |
2c86e55d MA |
217 | } |
218 | ||
219 | static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt) | |
220 | { | |
221 | struct i915_address_space * const vm = &ppgtt->base.vm; | |
2c86e55d MA |
222 | int ret; |
223 | ||
89351925 | 224 | ret = setup_scratch_page(vm); |
2c86e55d MA |
225 | if (ret) |
226 | return ret; | |
227 | ||
89351925 CW |
228 | vm->scratch[0]->encode = |
229 | vm->pte_encode(px_dma(vm->scratch[0]), | |
2c86e55d MA |
230 | I915_CACHE_NONE, PTE_READ_ONLY); |
231 | ||
89351925 | 232 | vm->scratch[1] = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K); |
fa812ce9 CW |
233 | if (IS_ERR(vm->scratch[1])) { |
234 | ret = PTR_ERR(vm->scratch[1]); | |
235 | goto err_scratch0; | |
236 | } | |
89351925 | 237 | |
529b9ec8 | 238 | ret = map_pt_dma(vm, vm->scratch[1]); |
fa812ce9 CW |
239 | if (ret) |
240 | goto err_scratch1; | |
2c86e55d | 241 | |
89351925 | 242 | fill32_px(vm->scratch[1], vm->scratch[0]->encode); |
2c86e55d MA |
243 | |
244 | return 0; | |
fa812ce9 CW |
245 | |
246 | err_scratch1: | |
247 | i915_gem_object_put(vm->scratch[1]); | |
248 | err_scratch0: | |
249 | i915_gem_object_put(vm->scratch[0]); | |
250 | return ret; | |
2c86e55d MA |
251 | } |
252 | ||
253 | static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt) | |
254 | { | |
255 | struct i915_page_directory * const pd = ppgtt->base.pd; | |
2c86e55d MA |
256 | struct i915_page_table *pt; |
257 | u32 pde; | |
258 | ||
259 | gen6_for_all_pdes(pt, pd, pde) | |
89351925 | 260 | if (pt) |
82adf901 | 261 | free_pt(&ppgtt->base.vm, pt); |
2c86e55d MA |
262 | } |
263 | ||
264 | static void gen6_ppgtt_cleanup(struct i915_address_space *vm) | |
265 | { | |
266 | struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); | |
267 | ||
2c86e55d MA |
268 | gen6_ppgtt_free_pd(ppgtt); |
269 | free_scratch(vm); | |
270 | ||
271 | mutex_destroy(&ppgtt->flush); | |
82adf901 CW |
272 | |
273 | free_pd(&ppgtt->base.vm, ppgtt->base.pd); | |
2c86e55d MA |
274 | } |
275 | ||
cd0452aa CW |
276 | static void pd_vma_bind(struct i915_address_space *vm, |
277 | struct i915_vm_pt_stash *stash, | |
39a2bd34 | 278 | struct i915_vma_resource *vma_res, |
cd0452aa CW |
279 | enum i915_cache_level cache_level, |
280 | u32 unused) | |
2c86e55d | 281 | { |
12b07256 | 282 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
39a2bd34 TH |
283 | struct gen6_ppgtt *ppgtt = vma_res->private; |
284 | u32 ggtt_offset = vma_res->start / I915_GTT_PAGE_SIZE; | |
2c86e55d | 285 | |
89351925 | 286 | ppgtt->pp_dir = ggtt_offset * sizeof(gen6_pte_t) << 10; |
2c86e55d MA |
287 | ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset; |
288 | ||
289 | gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total); | |
2c86e55d MA |
290 | } |
291 | ||
39a2bd34 TH |
292 | static void pd_vma_unbind(struct i915_address_space *vm, |
293 | struct i915_vma_resource *vma_res) | |
2c86e55d | 294 | { |
39a2bd34 | 295 | struct gen6_ppgtt *ppgtt = vma_res->private; |
2c86e55d | 296 | struct i915_page_directory * const pd = ppgtt->base.pd; |
2c86e55d MA |
297 | struct i915_page_table *pt; |
298 | unsigned int pde; | |
299 | ||
300 | if (!ppgtt->scan_for_unused_pt) | |
301 | return; | |
302 | ||
303 | /* Free all no longer used page tables */ | |
304 | gen6_for_all_pdes(pt, ppgtt->base.pd, pde) { | |
89351925 | 305 | if (!pt || atomic_read(&pt->used)) |
2c86e55d MA |
306 | continue; |
307 | ||
82adf901 | 308 | free_pt(&ppgtt->base.vm, pt); |
89351925 | 309 | pd->entry[pde] = NULL; |
2c86e55d MA |
310 | } |
311 | ||
312 | ppgtt->scan_for_unused_pt = false; | |
313 | } | |
314 | ||
315 | static const struct i915_vma_ops pd_vma_ops = { | |
2c86e55d MA |
316 | .bind_vma = pd_vma_bind, |
317 | .unbind_vma = pd_vma_unbind, | |
318 | }; | |
319 | ||
47b08693 | 320 | int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww) |
2c86e55d MA |
321 | { |
322 | struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); | |
323 | int err; | |
324 | ||
e1a7ab4f | 325 | GEM_BUG_ON(!kref_read(&ppgtt->base.vm.ref)); |
2c86e55d MA |
326 | |
327 | /* | |
328 | * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt | |
329 | * which will be pinned into every active context. | |
330 | * (When vma->pin_count becomes atomic, I expect we will naturally | |
331 | * need a larger, unpacked, type and kill this redundancy.) | |
332 | */ | |
333 | if (atomic_add_unless(&ppgtt->pin_count, 1, 0)) | |
334 | return 0; | |
335 | ||
b0b0f2d2 ML |
336 | /* grab the ppgtt resv to pin the object */ |
337 | err = i915_vm_lock_objects(&ppgtt->base.vm, ww); | |
338 | if (err) | |
339 | return err; | |
2c86e55d MA |
340 | |
341 | /* | |
342 | * PPGTT PDEs reside in the GGTT and consists of 512 entries. The | |
343 | * allocator works in address space sizes, so it's multiplied by page | |
344 | * size. We allocate at the top of the GTT to avoid fragmentation. | |
345 | */ | |
b0b0f2d2 | 346 | if (!atomic_read(&ppgtt->pin_count)) { |
47b08693 | 347 | err = i915_ggtt_pin(ppgtt->vma, ww, GEN6_PD_ALIGN, PIN_HIGH); |
b0b0f2d2 ML |
348 | |
349 | GEM_BUG_ON(ppgtt->vma->fence); | |
350 | clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(ppgtt->vma)); | |
351 | } | |
2c86e55d MA |
352 | if (!err) |
353 | atomic_inc(&ppgtt->pin_count); | |
2c86e55d MA |
354 | |
355 | return err; | |
356 | } | |
357 | ||
b0b0f2d2 | 358 | static int pd_dummy_obj_get_pages(struct drm_i915_gem_object *obj) |
2c86e55d | 359 | { |
b0b0f2d2 ML |
360 | obj->mm.pages = ZERO_SIZE_PTR; |
361 | return 0; | |
362 | } | |
2c86e55d | 363 | |
b0b0f2d2 ML |
364 | static void pd_dummy_obj_put_pages(struct drm_i915_gem_object *obj, |
365 | struct sg_table *pages) | |
366 | { | |
2c86e55d MA |
367 | } |
368 | ||
b0b0f2d2 ML |
369 | static const struct drm_i915_gem_object_ops pd_dummy_obj_ops = { |
370 | .name = "pd_dummy_obj", | |
371 | .get_pages = pd_dummy_obj_get_pages, | |
372 | .put_pages = pd_dummy_obj_put_pages, | |
373 | }; | |
374 | ||
375 | static struct i915_page_directory * | |
376 | gen6_alloc_top_pd(struct gen6_ppgtt *ppgtt) | |
2c86e55d | 377 | { |
b0b0f2d2 ML |
378 | struct i915_ggtt * const ggtt = ppgtt->base.vm.gt->ggtt; |
379 | struct i915_page_directory *pd; | |
380 | int err; | |
2c86e55d | 381 | |
b0b0f2d2 ML |
382 | pd = __alloc_pd(I915_PDES); |
383 | if (unlikely(!pd)) | |
384 | return ERR_PTR(-ENOMEM); | |
2c86e55d | 385 | |
b0b0f2d2 ML |
386 | pd->pt.base = __i915_gem_object_create_internal(ppgtt->base.vm.gt->i915, |
387 | &pd_dummy_obj_ops, | |
388 | I915_PDES * SZ_4K); | |
389 | if (IS_ERR(pd->pt.base)) { | |
390 | err = PTR_ERR(pd->pt.base); | |
391 | pd->pt.base = NULL; | |
392 | goto err_pd; | |
393 | } | |
394 | ||
395 | pd->pt.base->base.resv = i915_vm_resv_get(&ppgtt->base.vm); | |
396 | pd->pt.base->shares_resv_from = &ppgtt->base.vm; | |
397 | ||
398 | ppgtt->vma = i915_vma_instance(pd->pt.base, &ggtt->vm, NULL); | |
399 | if (IS_ERR(ppgtt->vma)) { | |
400 | err = PTR_ERR(ppgtt->vma); | |
401 | ppgtt->vma = NULL; | |
402 | goto err_pd; | |
403 | } | |
404 | ||
405 | /* The dummy object we create is special, override ops.. */ | |
406 | ppgtt->vma->ops = &pd_vma_ops; | |
407 | ppgtt->vma->private = ppgtt; | |
408 | return pd; | |
409 | ||
410 | err_pd: | |
411 | free_pd(&ppgtt->base.vm, pd); | |
412 | return ERR_PTR(err); | |
413 | } | |
414 | ||
2c86e55d MA |
415 | void gen6_ppgtt_unpin(struct i915_ppgtt *base) |
416 | { | |
417 | struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); | |
418 | ||
419 | GEM_BUG_ON(!atomic_read(&ppgtt->pin_count)); | |
420 | if (atomic_dec_and_test(&ppgtt->pin_count)) | |
421 | i915_vma_unpin(ppgtt->vma); | |
2c86e55d MA |
422 | } |
423 | ||
424 | struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) | |
425 | { | |
426 | struct i915_ggtt * const ggtt = gt->ggtt; | |
427 | struct gen6_ppgtt *ppgtt; | |
428 | int err; | |
429 | ||
430 | ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); | |
431 | if (!ppgtt) | |
432 | return ERR_PTR(-ENOMEM); | |
433 | ||
434 | mutex_init(&ppgtt->flush); | |
2c86e55d | 435 | |
a259cc14 | 436 | ppgtt_init(&ppgtt->base, gt, 0); |
cd0452aa | 437 | ppgtt->base.vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen6_pte_t)); |
2c86e55d MA |
438 | ppgtt->base.vm.top = 1; |
439 | ||
440 | ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND; | |
441 | ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range; | |
442 | ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; | |
443 | ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries; | |
444 | ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup; | |
445 | ||
89351925 | 446 | ppgtt->base.vm.alloc_pt_dma = alloc_pt_dma; |
fef53be0 | 447 | ppgtt->base.vm.alloc_scratch_dma = alloc_pt_dma; |
2c86e55d MA |
448 | ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; |
449 | ||
2c86e55d MA |
450 | err = gen6_ppgtt_init_scratch(ppgtt); |
451 | if (err) | |
b0b0f2d2 | 452 | goto err_free; |
2c86e55d | 453 | |
b0b0f2d2 ML |
454 | ppgtt->base.pd = gen6_alloc_top_pd(ppgtt); |
455 | if (IS_ERR(ppgtt->base.pd)) { | |
456 | err = PTR_ERR(ppgtt->base.pd); | |
2c86e55d MA |
457 | goto err_scratch; |
458 | } | |
459 | ||
460 | return &ppgtt->base; | |
461 | ||
462 | err_scratch: | |
463 | free_scratch(&ppgtt->base.vm); | |
2c86e55d | 464 | err_free: |
2c86e55d MA |
465 | kfree(ppgtt); |
466 | return ERR_PTR(err); | |
467 | } |