Merge tag 'soc-drivers-6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-block.git] / fs / bcachefs / util.h
CommitLineData
1c6fdbd8
KO
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BCACHEFS_UTIL_H
3#define _BCACHEFS_UTIL_H
4
5#include <linux/bio.h>
6#include <linux/blkdev.h>
7#include <linux/closure.h>
8#include <linux/errno.h>
9#include <linux/freezer.h>
10#include <linux/kernel.h>
11#include <linux/sched/clock.h>
12#include <linux/llist.h>
13#include <linux/log2.h>
000de459 14#include <linux/percpu.h>
4c97e04a 15#include <linux/preempt.h>
1c6fdbd8
KO
16#include <linux/ratelimit.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19#include <linux/workqueue.h>
20
bf8f8b20
DH
21#include "mean_and_variance.h"
22
3ea4219d
KO
23#include "darray.h"
24
1c6fdbd8
KO
25struct closure;
26
27#ifdef CONFIG_BCACHEFS_DEBUG
1c6fdbd8 28#define EBUG_ON(cond) BUG_ON(cond)
961cbdef 29#else
1c6fdbd8 30#define EBUG_ON(cond)
1c6fdbd8
KO
31#endif
32
33#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
34#define CPU_BIG_ENDIAN 0
35#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
36#define CPU_BIG_ENDIAN 1
37#endif
38
39/* type hackery */
40
41#define type_is_exact(_val, _type) \
42 __builtin_types_compatible_p(typeof(_val), _type)
43
44#define type_is(_val, _type) \
45 (__builtin_types_compatible_p(typeof(_val), _type) || \
46 __builtin_types_compatible_p(typeof(_val), const _type))
47
48/* Userspace doesn't align allocations as nicely as the kernel allocators: */
49static inline size_t buf_pages(void *p, size_t len)
50{
51 return DIV_ROUND_UP(len +
52 ((unsigned long) p & (PAGE_SIZE - 1)),
53 PAGE_SIZE);
54}
55
56static inline void vpfree(void *p, size_t size)
57{
58 if (is_vmalloc_addr(p))
59 vfree(p);
60 else
61 free_pages((unsigned long) p, get_order(size));
62}
63
64static inline void *vpmalloc(size_t size, gfp_t gfp_mask)
65{
66 return (void *) __get_free_pages(gfp_mask|__GFP_NOWARN,
67 get_order(size)) ?:
68 __vmalloc(size, gfp_mask);
69}
70
71static inline void kvpfree(void *p, size_t size)
72{
73 if (size < PAGE_SIZE)
74 kfree(p);
75 else
76 vpfree(p, size);
77}
78
79static inline void *kvpmalloc(size_t size, gfp_t gfp_mask)
80{
81 return size < PAGE_SIZE
82 ? kmalloc(size, gfp_mask)
83 : vpmalloc(size, gfp_mask);
84}
85
86int mempool_init_kvpmalloc_pool(mempool_t *, int, size_t);
87
88#define HEAP(type) \
89struct { \
90 size_t size, used; \
91 type *data; \
92}
93
94#define DECLARE_HEAP(type, name) HEAP(type) name
95
96#define init_heap(heap, _size, gfp) \
97({ \
98 (heap)->used = 0; \
99 (heap)->size = (_size); \
100 (heap)->data = kvpmalloc((heap)->size * sizeof((heap)->data[0]),\
101 (gfp)); \
102})
103
104#define free_heap(heap) \
105do { \
106 kvpfree((heap)->data, (heap)->size * sizeof((heap)->data[0])); \
107 (heap)->data = NULL; \
108} while (0)
109
198d6700
KO
110#define heap_set_backpointer(h, i, _fn) \
111do { \
112 void (*fn)(typeof(h), size_t) = _fn; \
113 if (fn) \
114 fn(h, i); \
115} while (0)
116
117#define heap_swap(h, i, j, set_backpointer) \
118do { \
119 swap((h)->data[i], (h)->data[j]); \
120 heap_set_backpointer(h, i, set_backpointer); \
121 heap_set_backpointer(h, j, set_backpointer); \
122} while (0)
1c6fdbd8
KO
123
124#define heap_peek(h) \
125({ \
126 EBUG_ON(!(h)->used); \
127 (h)->data[0]; \
128})
129
130#define heap_full(h) ((h)->used == (h)->size)
131
198d6700 132#define heap_sift_down(h, i, cmp, set_backpointer) \
1c6fdbd8
KO
133do { \
134 size_t _c, _j = i; \
135 \
136 for (; _j * 2 + 1 < (h)->used; _j = _c) { \
137 _c = _j * 2 + 1; \
138 if (_c + 1 < (h)->used && \
139 cmp(h, (h)->data[_c], (h)->data[_c + 1]) >= 0) \
140 _c++; \
141 \
142 if (cmp(h, (h)->data[_c], (h)->data[_j]) >= 0) \
143 break; \
198d6700 144 heap_swap(h, _c, _j, set_backpointer); \
1c6fdbd8
KO
145 } \
146} while (0)
147
198d6700 148#define heap_sift_up(h, i, cmp, set_backpointer) \
1c6fdbd8
KO
149do { \
150 while (i) { \
151 size_t p = (i - 1) / 2; \
152 if (cmp(h, (h)->data[i], (h)->data[p]) >= 0) \
153 break; \
198d6700 154 heap_swap(h, i, p, set_backpointer); \
1c6fdbd8
KO
155 i = p; \
156 } \
157} while (0)
158
198d6700
KO
159#define __heap_add(h, d, cmp, set_backpointer) \
160({ \
1c6fdbd8
KO
161 size_t _i = (h)->used++; \
162 (h)->data[_i] = d; \
198d6700 163 heap_set_backpointer(h, _i, set_backpointer); \
1c6fdbd8 164 \
198d6700
KO
165 heap_sift_up(h, _i, cmp, set_backpointer); \
166 _i; \
167})
1c6fdbd8 168
198d6700 169#define heap_add(h, d, cmp, set_backpointer) \
1c6fdbd8
KO
170({ \
171 bool _r = !heap_full(h); \
172 if (_r) \
198d6700 173 __heap_add(h, d, cmp, set_backpointer); \
1c6fdbd8
KO
174 _r; \
175})
176
198d6700 177#define heap_add_or_replace(h, new, cmp, set_backpointer) \
1c6fdbd8 178do { \
198d6700 179 if (!heap_add(h, new, cmp, set_backpointer) && \
1c6fdbd8
KO
180 cmp(h, new, heap_peek(h)) >= 0) { \
181 (h)->data[0] = new; \
198d6700
KO
182 heap_set_backpointer(h, 0, set_backpointer); \
183 heap_sift_down(h, 0, cmp, set_backpointer); \
1c6fdbd8
KO
184 } \
185} while (0)
186
198d6700 187#define heap_del(h, i, cmp, set_backpointer) \
1c6fdbd8
KO
188do { \
189 size_t _i = (i); \
190 \
191 BUG_ON(_i >= (h)->used); \
192 (h)->used--; \
30690c44
KO
193 if ((_i) < (h)->used) { \
194 heap_swap(h, _i, (h)->used, set_backpointer); \
195 heap_sift_up(h, _i, cmp, set_backpointer); \
196 heap_sift_down(h, _i, cmp, set_backpointer); \
197 } \
1c6fdbd8
KO
198} while (0)
199
198d6700 200#define heap_pop(h, d, cmp, set_backpointer) \
1c6fdbd8
KO
201({ \
202 bool _r = (h)->used; \
203 if (_r) { \
204 (d) = (h)->data[0]; \
198d6700 205 heap_del(h, 0, cmp, set_backpointer); \
1c6fdbd8
KO
206 } \
207 _r; \
208})
209
198d6700 210#define heap_resort(heap, cmp, set_backpointer) \
1c6fdbd8
KO
211do { \
212 ssize_t _i; \
213 for (_i = (ssize_t) (heap)->used / 2 - 1; _i >= 0; --_i) \
198d6700 214 heap_sift_down(heap, _i, cmp, set_backpointer); \
1c6fdbd8
KO
215} while (0)
216
217#define ANYSINT_MAX(t) \
218 ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
219
401ec4db 220#include "printbuf.h"
702a4ef0 221
401ec4db
KO
222#define prt_vprintf(_out, ...) bch2_prt_vprintf(_out, __VA_ARGS__)
223#define prt_printf(_out, ...) bch2_prt_printf(_out, __VA_ARGS__)
224#define printbuf_str(_buf) bch2_printbuf_str(_buf)
225#define printbuf_exit(_buf) bch2_printbuf_exit(_buf)
702a4ef0 226
401ec4db
KO
227#define printbuf_tabstops_reset(_buf) bch2_printbuf_tabstops_reset(_buf)
228#define printbuf_tabstop_pop(_buf) bch2_printbuf_tabstop_pop(_buf)
229#define printbuf_tabstop_push(_buf, _n) bch2_printbuf_tabstop_push(_buf, _n)
702a4ef0 230
401ec4db
KO
231#define printbuf_indent_add(_out, _n) bch2_printbuf_indent_add(_out, _n)
232#define printbuf_indent_sub(_out, _n) bch2_printbuf_indent_sub(_out, _n)
07b8121f 233
401ec4db
KO
234#define prt_newline(_out) bch2_prt_newline(_out)
235#define prt_tab(_out) bch2_prt_tab(_out)
236#define prt_tab_rjust(_out) bch2_prt_tab_rjust(_out)
e7bc7cdf 237
401ec4db 238#define prt_bytes_indented(...) bch2_prt_bytes_indented(__VA_ARGS__)
b1cfe5ed 239#define prt_u64(_out, _v) prt_printf(_out, "%llu", (u64) (_v))
401ec4db
KO
240#define prt_human_readable_u64(...) bch2_prt_human_readable_u64(__VA_ARGS__)
241#define prt_human_readable_s64(...) bch2_prt_human_readable_s64(__VA_ARGS__)
242#define prt_units_u64(...) bch2_prt_units_u64(__VA_ARGS__)
243#define prt_units_s64(...) bch2_prt_units_s64(__VA_ARGS__)
244#define prt_string_option(...) bch2_prt_string_option(__VA_ARGS__)
245#define prt_bitflags(...) bch2_prt_bitflags(__VA_ARGS__)
560661d4 246#define prt_bitflags_vector(...) bch2_prt_bitflags_vector(__VA_ARGS__)
702a4ef0 247
b17d3cec 248void bch2_pr_time_units(struct printbuf *, u64);
59154f2c 249void bch2_prt_datetime(struct printbuf *, time64_t);
12bf93a4
KO
250
251#ifdef __KERNEL__
252static inline void uuid_unparse_lower(u8 *uuid, char *out)
253{
590b91cf 254 sprintf(out, "%pUb", uuid);
12bf93a4
KO
255}
256#else
257#include <uuid/uuid.h>
258#endif
259
260static inline void pr_uuid(struct printbuf *out, u8 *uuid)
261{
262 char uuid_str[40];
263
264 uuid_unparse_lower(uuid, uuid_str);
401ec4db 265 prt_printf(out, "%s", uuid_str);
12bf93a4
KO
266}
267
1c6fdbd8
KO
268int bch2_strtoint_h(const char *, int *);
269int bch2_strtouint_h(const char *, unsigned int *);
270int bch2_strtoll_h(const char *, long long *);
271int bch2_strtoull_h(const char *, unsigned long long *);
0b847a19 272int bch2_strtou64_h(const char *, u64 *);
1c6fdbd8
KO
273
274static inline int bch2_strtol_h(const char *cp, long *res)
275{
276#if BITS_PER_LONG == 32
277 return bch2_strtoint_h(cp, (int *) res);
278#else
279 return bch2_strtoll_h(cp, (long long *) res);
280#endif
281}
282
283static inline int bch2_strtoul_h(const char *cp, long *res)
284{
285#if BITS_PER_LONG == 32
286 return bch2_strtouint_h(cp, (unsigned int *) res);
287#else
288 return bch2_strtoull_h(cp, (unsigned long long *) res);
289#endif
290}
291
292#define strtoi_h(cp, res) \
293 ( type_is(*res, int) ? bch2_strtoint_h(cp, (void *) res)\
294 : type_is(*res, long) ? bch2_strtol_h(cp, (void *) res)\
295 : type_is(*res, long long) ? bch2_strtoll_h(cp, (void *) res)\
296 : type_is(*res, unsigned) ? bch2_strtouint_h(cp, (void *) res)\
297 : type_is(*res, unsigned long) ? bch2_strtoul_h(cp, (void *) res)\
298 : type_is(*res, unsigned long long) ? bch2_strtoull_h(cp, (void *) res)\
299 : -EINVAL)
300
301#define strtoul_safe(cp, var) \
302({ \
303 unsigned long _v; \
304 int _r = kstrtoul(cp, 10, &_v); \
305 if (!_r) \
306 var = _v; \
307 _r; \
308})
309
310#define strtoul_safe_clamp(cp, var, min, max) \
311({ \
312 unsigned long _v; \
313 int _r = kstrtoul(cp, 10, &_v); \
314 if (!_r) \
315 var = clamp_t(typeof(var), _v, min, max); \
316 _r; \
317})
318
319#define strtoul_safe_restrict(cp, var, min, max) \
320({ \
321 unsigned long _v; \
322 int _r = kstrtoul(cp, 10, &_v); \
323 if (!_r && _v >= min && _v <= max) \
324 var = _v; \
325 else \
326 _r = -EINVAL; \
327 _r; \
328})
329
fa8e94fa 330#define snprint(out, var) \
401ec4db 331 prt_printf(out, \
1c6fdbd8
KO
332 type_is(var, int) ? "%i\n" \
333 : type_is(var, unsigned) ? "%u\n" \
334 : type_is(var, long) ? "%li\n" \
335 : type_is(var, unsigned long) ? "%lu\n" \
336 : type_is(var, s64) ? "%lli\n" \
337 : type_is(var, u64) ? "%llu\n" \
338 : type_is(var, char *) ? "%s\n" \
339 : "%i\n", var)
340
1c6fdbd8
KO
341bool bch2_is_zero(const void *, size_t);
342
1c6fdbd8
KO
343u64 bch2_read_flag_list(char *, const char * const[]);
344
189c176c
KO
345void bch2_prt_u64_base2_nbits(struct printbuf *, u64, unsigned);
346void bch2_prt_u64_base2(struct printbuf *, u64);
d0b50524 347
a8f35428 348void bch2_print_string_as_lines(const char *prefix, const char *lines);
3ea4219d
KO
349
350typedef DARRAY(unsigned long) bch_stacktrace;
612e1110 351int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *, unsigned, gfp_t);
3ea4219d 352void bch2_prt_backtrace(struct printbuf *, bch_stacktrace *);
612e1110 353int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *, unsigned, gfp_t);
a8f35428 354
1f5af5fc
KO
355static inline void prt_bdevname(struct printbuf *out, struct block_device *bdev)
356{
357#ifdef __KERNEL__
358 prt_printf(out, "%pg", bdev);
359#else
360 prt_str(out, bdev->name);
361#endif
362}
363
1c6fdbd8
KO
364#define NR_QUANTILES 15
365#define QUANTILE_IDX(i) inorder_to_eytzinger0(i, NR_QUANTILES)
366#define QUANTILE_FIRST eytzinger0_first(NR_QUANTILES)
367#define QUANTILE_LAST eytzinger0_last(NR_QUANTILES)
368
369struct bch2_quantiles {
370 struct bch2_quantile_entry {
371 u64 m;
372 u64 step;
373 } entries[NR_QUANTILES];
374};
375
376struct bch2_time_stat_buffer {
377 unsigned nr;
378 struct bch2_time_stat_buffer_entry {
379 u64 start;
380 u64 end;
381 } entries[32];
382};
383
384struct bch2_time_stats {
385 spinlock_t lock;
1c6fdbd8 386 /* all fields are in nanoseconds */
bf8f8b20 387 u64 min_duration;
066a2646
KO
388 u64 max_duration;
389 u64 total_duration;
bf8f8b20
DH
390 u64 max_freq;
391 u64 min_freq;
1c6fdbd8
KO
392 u64 last_event;
393 struct bch2_quantiles quantiles;
394
bf8f8b20
DH
395 struct mean_and_variance duration_stats;
396 struct mean_and_variance_weighted duration_stats_weighted;
397 struct mean_and_variance freq_stats;
398 struct mean_and_variance_weighted freq_stats_weighted;
1c6fdbd8
KO
399 struct bch2_time_stat_buffer __percpu *buffer;
400};
401
402#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
403void __bch2_time_stats_update(struct bch2_time_stats *stats, u64, u64);
1c6fdbd8
KO
404
405static inline void bch2_time_stats_update(struct bch2_time_stats *stats, u64 start)
406{
407 __bch2_time_stats_update(stats, start, local_clock());
408}
409
066a2646
KO
410static inline bool track_event_change(struct bch2_time_stats *stats,
411 u64 *start, bool v)
412{
413 if (v != !!*start) {
414 if (!v) {
415 bch2_time_stats_update(stats, *start);
416 *start = 0;
417 } else {
418 *start = local_clock() ?: 1;
419 return true;
420 }
421 }
422
423 return false;
424}
425#else
426static inline void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end) {}
427static inline void bch2_time_stats_update(struct bch2_time_stats *stats, u64 start) {}
428static inline bool track_event_change(struct bch2_time_stats *stats,
429 u64 *start, bool v)
430{
431 bool ret = v && !*start;
432 *start = v;
433 return ret;
434}
435#endif
436
7807e143 437void bch2_time_stats_to_text(struct printbuf *, struct bch2_time_stats *);
1c6fdbd8
KO
438
439void bch2_time_stats_exit(struct bch2_time_stats *);
440void bch2_time_stats_init(struct bch2_time_stats *);
441
442#define ewma_add(ewma, val, weight) \
443({ \
444 typeof(ewma) _ewma = (ewma); \
445 typeof(weight) _weight = (weight); \
446 \
447 (((_ewma << _weight) - _ewma) + (val)) >> _weight; \
448})
449
450struct bch_ratelimit {
451 /* Next time we want to do some work, in nanoseconds */
452 u64 next;
453
454 /*
455 * Rate at which we want to do work, in units per nanosecond
456 * The units here correspond to the units passed to
457 * bch2_ratelimit_increment()
458 */
459 unsigned rate;
460};
461
462static inline void bch2_ratelimit_reset(struct bch_ratelimit *d)
463{
464 d->next = local_clock();
465}
466
467u64 bch2_ratelimit_delay(struct bch_ratelimit *);
468void bch2_ratelimit_increment(struct bch_ratelimit *, u64);
1c6fdbd8
KO
469
470struct bch_pd_controller {
471 struct bch_ratelimit rate;
472 unsigned long last_update;
473
474 s64 last_actual;
475 s64 smoothed_derivative;
476
477 unsigned p_term_inverse;
478 unsigned d_smooth;
479 unsigned d_term;
480
481 /* for exporting to sysfs (no effect on behavior) */
482 s64 last_derivative;
483 s64 last_proportional;
484 s64 last_change;
485 s64 last_target;
486
1e81f89b
KO
487 /*
488 * If true, the rate will not increase if bch2_ratelimit_delay()
489 * is not being called often enough.
490 */
1c6fdbd8
KO
491 bool backpressure;
492};
493
494void bch2_pd_controller_update(struct bch_pd_controller *, s64, s64, int);
495void bch2_pd_controller_init(struct bch_pd_controller *);
2be7b16e 496void bch2_pd_controller_debug_to_text(struct printbuf *, struct bch_pd_controller *);
1c6fdbd8
KO
497
498#define sysfs_pd_controller_attribute(name) \
499 rw_attribute(name##_rate); \
500 rw_attribute(name##_rate_bytes); \
501 rw_attribute(name##_rate_d_term); \
502 rw_attribute(name##_rate_p_term_inverse); \
503 read_attribute(name##_rate_debug)
504
505#define sysfs_pd_controller_files(name) \
506 &sysfs_##name##_rate, \
507 &sysfs_##name##_rate_bytes, \
508 &sysfs_##name##_rate_d_term, \
509 &sysfs_##name##_rate_p_term_inverse, \
510 &sysfs_##name##_rate_debug
511
512#define sysfs_pd_controller_show(name, var) \
513do { \
514 sysfs_hprint(name##_rate, (var)->rate.rate); \
515 sysfs_print(name##_rate_bytes, (var)->rate.rate); \
516 sysfs_print(name##_rate_d_term, (var)->d_term); \
517 sysfs_print(name##_rate_p_term_inverse, (var)->p_term_inverse); \
518 \
fa8e94fa
KO
519 if (attr == &sysfs_##name##_rate_debug) \
520 bch2_pd_controller_debug_to_text(out, var); \
1c6fdbd8
KO
521} while (0)
522
523#define sysfs_pd_controller_store(name, var) \
524do { \
525 sysfs_strtoul_clamp(name##_rate, \
526 (var)->rate.rate, 1, UINT_MAX); \
527 sysfs_strtoul_clamp(name##_rate_bytes, \
528 (var)->rate.rate, 1, UINT_MAX); \
529 sysfs_strtoul(name##_rate_d_term, (var)->d_term); \
530 sysfs_strtoul_clamp(name##_rate_p_term_inverse, \
531 (var)->p_term_inverse, 1, INT_MAX); \
532} while (0)
533
1c6fdbd8
KO
534#define container_of_or_null(ptr, type, member) \
535({ \
536 typeof(ptr) _ptr = ptr; \
537 _ptr ? container_of(_ptr, type, member) : NULL; \
538})
539
1c6fdbd8
KO
540/* Does linear interpolation between powers of two */
541static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
542{
543 unsigned fract = x & ~(~0 << fract_bits);
544
545 x >>= fract_bits;
546 x = 1 << x;
547 x += (x * fract) >> fract_bits;
548
549 return x;
550}
551
885678f6 552void bch2_bio_map(struct bio *bio, void *base, size_t);
1c6fdbd8
KO
553int bch2_bio_alloc_pages(struct bio *, size_t, gfp_t);
554
555static inline sector_t bdev_sectors(struct block_device *bdev)
556{
557 return bdev->bd_inode->i_size >> 9;
558}
559
560#define closure_bio_submit(bio, cl) \
561do { \
562 closure_get(cl); \
563 submit_bio(bio); \
564} while (0)
565
8fcdf814
KO
566#define kthread_wait(cond) \
567({ \
568 int _ret = 0; \
569 \
570 while (1) { \
571 set_current_state(TASK_INTERRUPTIBLE); \
572 if (kthread_should_stop()) { \
573 _ret = -1; \
574 break; \
575 } \
576 \
577 if (cond) \
578 break; \
579 \
580 schedule(); \
581 } \
582 set_current_state(TASK_RUNNING); \
583 _ret; \
584})
585
1c6fdbd8
KO
586#define kthread_wait_freezable(cond) \
587({ \
588 int _ret = 0; \
589 while (1) { \
590 set_current_state(TASK_INTERRUPTIBLE); \
591 if (kthread_should_stop()) { \
592 _ret = -1; \
593 break; \
594 } \
595 \
596 if (cond) \
597 break; \
598 \
599 schedule(); \
600 try_to_freeze(); \
601 } \
602 set_current_state(TASK_RUNNING); \
603 _ret; \
604})
605
606size_t bch2_rand_range(size_t);
607
03c8c747 608void memcpy_to_bio(struct bio *, struct bvec_iter, const void *);
1c6fdbd8
KO
609void memcpy_from_bio(void *, struct bio *, struct bvec_iter);
610
71c9e0ba
KO
611static inline void memcpy_u64s_small(void *dst, const void *src,
612 unsigned u64s)
613{
614 u64 *d = dst;
615 const u64 *s = src;
616
617 while (u64s--)
618 *d++ = *s++;
619}
620
1c6fdbd8
KO
621static inline void __memcpy_u64s(void *dst, const void *src,
622 unsigned u64s)
623{
624#ifdef CONFIG_X86_64
625 long d0, d1, d2;
1e81f89b 626
1c6fdbd8
KO
627 asm volatile("rep ; movsq"
628 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
629 : "0" (u64s), "1" (dst), "2" (src)
630 : "memory");
631#else
632 u64 *d = dst;
633 const u64 *s = src;
634
635 while (u64s--)
636 *d++ = *s++;
637#endif
638}
639
640static inline void memcpy_u64s(void *dst, const void *src,
641 unsigned u64s)
642{
643 EBUG_ON(!(dst >= src + u64s * sizeof(u64) ||
644 dst + u64s * sizeof(u64) <= src));
645
646 __memcpy_u64s(dst, src, u64s);
647}
648
649static inline void __memmove_u64s_down(void *dst, const void *src,
650 unsigned u64s)
651{
652 __memcpy_u64s(dst, src, u64s);
653}
654
655static inline void memmove_u64s_down(void *dst, const void *src,
656 unsigned u64s)
657{
658 EBUG_ON(dst > src);
659
660 __memmove_u64s_down(dst, src, u64s);
661}
662
0423fb71
KO
663static inline void __memmove_u64s_down_small(void *dst, const void *src,
664 unsigned u64s)
665{
666 memcpy_u64s_small(dst, src, u64s);
667}
668
669static inline void memmove_u64s_down_small(void *dst, const void *src,
670 unsigned u64s)
671{
672 EBUG_ON(dst > src);
673
674 __memmove_u64s_down_small(dst, src, u64s);
675}
676
b8098f36
KO
677static inline void __memmove_u64s_up_small(void *_dst, const void *_src,
678 unsigned u64s)
679{
680 u64 *dst = (u64 *) _dst + u64s;
681 u64 *src = (u64 *) _src + u64s;
682
683 while (u64s--)
684 *--dst = *--src;
685}
686
687static inline void memmove_u64s_up_small(void *dst, const void *src,
688 unsigned u64s)
689{
690 EBUG_ON(dst < src);
691
692 __memmove_u64s_up_small(dst, src, u64s);
693}
694
1c6fdbd8
KO
695static inline void __memmove_u64s_up(void *_dst, const void *_src,
696 unsigned u64s)
697{
698 u64 *dst = (u64 *) _dst + u64s - 1;
699 u64 *src = (u64 *) _src + u64s - 1;
700
701#ifdef CONFIG_X86_64
702 long d0, d1, d2;
1e81f89b 703
1c6fdbd8
KO
704 asm volatile("std ;\n"
705 "rep ; movsq\n"
706 "cld ;\n"
707 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
708 : "0" (u64s), "1" (dst), "2" (src)
709 : "memory");
710#else
711 while (u64s--)
712 *dst-- = *src--;
713#endif
714}
715
716static inline void memmove_u64s_up(void *dst, const void *src,
717 unsigned u64s)
718{
719 EBUG_ON(dst < src);
720
721 __memmove_u64s_up(dst, src, u64s);
722}
723
724static inline void memmove_u64s(void *dst, const void *src,
725 unsigned u64s)
726{
727 if (dst < src)
728 __memmove_u64s_down(dst, src, u64s);
729 else
730 __memmove_u64s_up(dst, src, u64s);
731}
732
e3728b50
JH
733/* Set the last few bytes up to a u64 boundary given an offset into a buffer. */
734static inline void memset_u64s_tail(void *s, int c, unsigned bytes)
735{
736 unsigned rem = round_up(bytes, sizeof(u64)) - bytes;
737
738 memset(s + bytes, c, rem);
739}
740
1c6fdbd8
KO
741void sort_cmp_size(void *base, size_t num, size_t size,
742 int (*cmp_func)(const void *, const void *, size_t),
743 void (*swap_func)(void *, void *, size_t));
744
745/* just the memmove, doesn't update @_nr */
746#define __array_insert_item(_array, _nr, _pos) \
747 memmove(&(_array)[(_pos) + 1], \
748 &(_array)[(_pos)], \
749 sizeof((_array)[0]) * ((_nr) - (_pos)))
750
751#define array_insert_item(_array, _nr, _pos, _new_item) \
752do { \
753 __array_insert_item(_array, _nr, _pos); \
754 (_nr)++; \
755 (_array)[(_pos)] = (_new_item); \
756} while (0)
757
758#define array_remove_items(_array, _nr, _pos, _nr_to_remove) \
759do { \
760 (_nr) -= (_nr_to_remove); \
761 memmove(&(_array)[(_pos)], \
762 &(_array)[(_pos) + (_nr_to_remove)], \
763 sizeof((_array)[0]) * ((_nr) - (_pos))); \
764} while (0)
765
766#define array_remove_item(_array, _nr, _pos) \
767 array_remove_items(_array, _nr, _pos, 1)
768
d1d7737f
KO
769static inline void __move_gap(void *array, size_t element_size,
770 size_t nr, size_t size,
771 size_t old_gap, size_t new_gap)
772{
773 size_t gap_end = old_gap + size - nr;
774
775 if (new_gap < old_gap) {
776 size_t move = old_gap - new_gap;
777
778 memmove(array + element_size * (gap_end - move),
779 array + element_size * (old_gap - move),
780 element_size * move);
781 } else if (new_gap > old_gap) {
782 size_t move = new_gap - old_gap;
783
784 memmove(array + element_size * old_gap,
785 array + element_size * gap_end,
786 element_size * move);
787 }
788}
789
790/* Move the gap in a gap buffer: */
791#define move_gap(_array, _nr, _size, _old_gap, _new_gap) \
792 __move_gap(_array, sizeof(_array[0]), _nr, _size, _old_gap, _new_gap)
793
1c6fdbd8
KO
794#define bubble_sort(_base, _nr, _cmp) \
795do { \
96dea3d5 796 ssize_t _i, _last; \
1c6fdbd8
KO
797 bool _swapped = true; \
798 \
96dea3d5 799 for (_last= (ssize_t) (_nr) - 1; _last > 0 && _swapped; --_last) {\
1c6fdbd8 800 _swapped = false; \
96dea3d5 801 for (_i = 0; _i < _last; _i++) \
1c6fdbd8
KO
802 if (_cmp((_base)[_i], (_base)[_i + 1]) > 0) { \
803 swap((_base)[_i], (_base)[_i + 1]); \
804 _swapped = true; \
805 } \
806 } \
807} while (0)
808
4c97e04a
KO
809static inline u64 percpu_u64_get(u64 __percpu *src)
810{
811 u64 ret = 0;
812 int cpu;
813
814 for_each_possible_cpu(cpu)
815 ret += *per_cpu_ptr(src, cpu);
816 return ret;
817}
818
819static inline void percpu_u64_set(u64 __percpu *dst, u64 src)
820{
821 int cpu;
822
823 for_each_possible_cpu(cpu)
824 *per_cpu_ptr(dst, cpu) = 0;
109a4277 825 this_cpu_write(*dst, src);
4c97e04a
KO
826}
827
23f80d2b
KO
828static inline void acc_u64s(u64 *acc, const u64 *src, unsigned nr)
829{
830 unsigned i;
831
832 for (i = 0; i < nr; i++)
833 acc[i] += src[i];
834}
835
836static inline void acc_u64s_percpu(u64 *acc, const u64 __percpu *src,
837 unsigned nr)
838{
839 int cpu;
840
841 for_each_possible_cpu(cpu)
842 acc_u64s(acc, per_cpu_ptr(src, cpu), nr);
843}
844
5e82a9a1
KO
845static inline void percpu_memset(void __percpu *p, int c, size_t bytes)
846{
847 int cpu;
848
849 for_each_possible_cpu(cpu)
850 memset(per_cpu_ptr(p, cpu), c, bytes);
851}
852
7ef2a73a
KO
853u64 *bch2_acc_percpu_u64s(u64 __percpu *, unsigned);
854
3ea2b1e1
KO
855#define cmp_int(l, r) ((l > r) - (l < r))
856
ded54580
KO
857static inline int u8_cmp(u8 l, u8 r)
858{
859 return cmp_int(l, r);
860}
861
8e877caa
KO
862static inline int cmp_le32(__le32 l, __le32 r)
863{
864 return cmp_int(le32_to_cpu(l), le32_to_cpu(r));
865}
866
a0668d77
KO
867#include <linux/uuid.h>
868
038fecc0
KO
869#define QSTR(n) { { { .len = strlen(n) } }, .name = n }
870
871static inline bool qstr_eq(const struct qstr l, const struct qstr r)
872{
873 return l.len == r.len && !memcmp(l.name, r.name, l.len);
874}
875
806ebf2a
KO
876void bch2_darray_str_exit(darray_str *);
877int bch2_split_devs(const char *, darray_str *);
878
1c6fdbd8 879#endif /* _BCACHEFS_UTIL_H */