Merge tag '5.19-rc-ksmbd-server-fixes' of git://git.samba.org/ksmbd
[linux-2.6-block.git] / include / linux / sbitmap.h
CommitLineData
0fc479b1 1/* SPDX-License-Identifier: GPL-2.0-only */
88459642
OS
2/*
3 * Fast and scalable bitmaps.
4 *
5 * Copyright (C) 2016 Facebook
6 * Copyright (C) 2013-2014 Jens Axboe
88459642
OS
7 */
8
9#ifndef __LINUX_SCALE_BITMAP_H
10#define __LINUX_SCALE_BITMAP_H
11
1fcbd5de
AS
12#include <linux/atomic.h>
13#include <linux/bitops.h>
14#include <linux/cache.h>
15#include <linux/list.h>
16#include <linux/log2.h>
17#include <linux/minmax.h>
18#include <linux/percpu.h>
88459642 19#include <linux/slab.h>
1fcbd5de
AS
20#include <linux/smp.h>
21#include <linux/types.h>
22#include <linux/wait.h>
88459642 23
14b470b5
AB
24struct seq_file;
25
88459642
OS
26/**
27 * struct sbitmap_word - Word in a &struct sbitmap.
28 */
29struct sbitmap_word {
88459642 30 /**
ea86ea2c 31 * @word: word holding free bits
88459642 32 */
3301bc53 33 unsigned long word;
ea86ea2c
JA
34
35 /**
36 * @cleared: word holding cleared bits
37 */
38 unsigned long cleared ____cacheline_aligned_in_smp;
88459642
OS
39} ____cacheline_aligned_in_smp;
40
41/**
42 * struct sbitmap - Scalable bitmap.
43 *
44 * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This
45 * trades off higher memory usage for better scalability.
46 */
47struct sbitmap {
48 /**
49 * @depth: Number of bits used in the whole bitmap.
50 */
51 unsigned int depth;
52
53 /**
54 * @shift: log2(number of bits used per word)
55 */
56 unsigned int shift;
57
58 /**
59 * @map_nr: Number of words (cachelines) being used for the bitmap.
60 */
61 unsigned int map_nr;
62
efe1f3a1
ML
63 /**
64 * @round_robin: Allocate bits in strict round-robin order.
65 */
66 bool round_robin;
67
88459642
OS
68 /**
69 * @map: Allocated bitmap.
70 */
71 struct sbitmap_word *map;
c548e62b
ML
72
73 /*
74 * @alloc_hint: Cache of last successfully allocated or freed bit.
75 *
76 * This is per-cpu, which allows multiple users to stick to different
77 * cachelines until the map is exhausted.
78 */
79 unsigned int __percpu *alloc_hint;
88459642
OS
80};
81
82#define SBQ_WAIT_QUEUES 8
83#define SBQ_WAKE_BATCH 8
84
85/**
86 * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue.
87 */
88struct sbq_wait_state {
89 /**
90 * @wait_cnt: Number of frees remaining before we wake up.
91 */
92 atomic_t wait_cnt;
93
94 /**
95 * @wait: Wait queue.
96 */
97 wait_queue_head_t wait;
98} ____cacheline_aligned_in_smp;
99
100/**
101 * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free
102 * bits.
103 *
104 * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to
105 * avoid contention on the wait queue spinlock. This ensures that we don't hit a
106 * scalability wall when we run out of free bits and have to start putting tasks
107 * to sleep.
108 */
109struct sbitmap_queue {
110 /**
111 * @sb: Scalable bitmap.
112 */
113 struct sbitmap sb;
114
115 /**
116 * @wake_batch: Number of bits which must be freed before we wake up any
117 * waiters.
118 */
119 unsigned int wake_batch;
120
121 /**
122 * @wake_index: Next wait queue in @ws to wake up.
123 */
124 atomic_t wake_index;
125
126 /**
127 * @ws: Wait queues.
128 */
129 struct sbq_wait_state *ws;
f4a644db 130
5d2ee712
JA
131 /*
132 * @ws_active: count of currently active ws waitqueues
133 */
134 atomic_t ws_active;
135
a3275539
OS
136 /**
137 * @min_shallow_depth: The minimum shallow depth which may be passed to
3f607293 138 * sbitmap_queue_get_shallow()
a3275539
OS
139 */
140 unsigned int min_shallow_depth;
88459642
OS
141};
142
143/**
144 * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node.
145 * @sb: Bitmap to initialize.
146 * @depth: Number of bits to allocate.
147 * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if
148 * given, a good default is chosen.
149 * @flags: Allocation flags.
150 * @node: Memory node to allocate on.
efe1f3a1
ML
151 * @round_robin: If true, be stricter about allocation order; always allocate
152 * starting from the last allocated bit. This is less efficient
153 * than the default behavior (false).
c548e62b
ML
154 * @alloc_hint: If true, apply percpu hint for where to start searching for
155 * a free bit.
88459642
OS
156 *
157 * Return: Zero on success or negative errno on failure.
158 */
159int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
c548e62b 160 gfp_t flags, int node, bool round_robin, bool alloc_hint);
88459642 161
3301bc53
ML
162/* sbitmap internal helper */
163static inline unsigned int __map_depth(const struct sbitmap *sb, int index)
164{
165 if (index == sb->map_nr - 1)
166 return sb->depth - (index << sb->shift);
167 return 1U << sb->shift;
168}
169
88459642
OS
170/**
171 * sbitmap_free() - Free memory used by a &struct sbitmap.
172 * @sb: Bitmap to free.
173 */
174static inline void sbitmap_free(struct sbitmap *sb)
175{
c548e62b 176 free_percpu(sb->alloc_hint);
863a66cd 177 kvfree(sb->map);
88459642
OS
178 sb->map = NULL;
179}
180
181/**
182 * sbitmap_resize() - Resize a &struct sbitmap.
183 * @sb: Bitmap to resize.
184 * @depth: New number of bits to resize to.
185 *
186 * Doesn't reallocate anything. It's up to the caller to ensure that the new
187 * depth doesn't exceed the depth that the sb was initialized with.
188 */
189void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
190
191/**
192 * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
193 * @sb: Bitmap to allocate from.
88459642 194 *
4ace53f1
OS
195 * This operation provides acquire barrier semantics if it succeeds.
196 *
88459642
OS
197 * Return: Non-negative allocated bit number if successful, -1 otherwise.
198 */
c548e62b 199int sbitmap_get(struct sbitmap *sb);
88459642 200
c05e6673
OS
201/**
202 * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
203 * limiting the depth used from each word.
204 * @sb: Bitmap to allocate from.
c05e6673
OS
205 * @shallow_depth: The maximum number of bits to allocate from a single word.
206 *
207 * This rather specific operation allows for having multiple users with
208 * different allocation limits. E.g., there can be a high-priority class that
209 * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
210 * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
211 * class can only allocate half of the total bits in the bitmap, preventing it
212 * from starving out the high-priority class.
213 *
214 * Return: Non-negative allocated bit number if successful, -1 otherwise.
215 */
c548e62b 216int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth);
c05e6673 217
88459642
OS
218/**
219 * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
220 * @sb: Bitmap to check.
221 *
222 * Return: true if any bit in the bitmap is set, false otherwise.
223 */
224bool sbitmap_any_bit_set(const struct sbitmap *sb);
225
7930d0a0
ML
226#define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
227#define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
228
88459642
OS
229typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
230
231/**
7930d0a0
ML
232 * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
233 * @start: Where to start the iteration.
88459642
OS
234 * @sb: Bitmap to iterate over.
235 * @fn: Callback. Should return true to continue or false to break early.
236 * @data: Pointer to pass to callback.
237 *
238 * This is inline even though it's non-trivial so that the function calls to the
239 * callback will hopefully get optimized away.
240 */
7930d0a0
ML
241static inline void __sbitmap_for_each_set(struct sbitmap *sb,
242 unsigned int start,
243 sb_for_each_fn fn, void *data)
88459642 244{
7930d0a0
ML
245 unsigned int index;
246 unsigned int nr;
247 unsigned int scanned = 0;
88459642 248
7930d0a0
ML
249 if (start >= sb->depth)
250 start = 0;
251 index = SB_NR_TO_INDEX(sb, start);
252 nr = SB_NR_TO_BIT(sb, start);
88459642 253
7930d0a0 254 while (scanned < sb->depth) {
8c2def89
OS
255 unsigned long word;
256 unsigned int depth = min_t(unsigned int,
3301bc53 257 __map_depth(sb, index) - nr,
7930d0a0 258 sb->depth - scanned);
88459642 259
7930d0a0 260 scanned += depth;
8c2def89
OS
261 word = sb->map[index].word & ~sb->map[index].cleared;
262 if (!word)
7930d0a0
ML
263 goto next;
264
265 /*
266 * On the first iteration of the outer loop, we need to add the
267 * bit offset back to the size of the word for find_next_bit().
268 * On all other iterations, nr is zero, so this is a noop.
269 */
270 depth += nr;
88459642 271 while (1) {
8c2def89 272 nr = find_next_bit(&word, depth, nr);
7930d0a0 273 if (nr >= depth)
88459642 274 break;
7930d0a0 275 if (!fn(sb, (index << sb->shift) + nr, data))
88459642
OS
276 return;
277
278 nr++;
279 }
7930d0a0
ML
280next:
281 nr = 0;
282 if (++index >= sb->map_nr)
283 index = 0;
88459642
OS
284 }
285}
286
7930d0a0
ML
287/**
288 * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
289 * @sb: Bitmap to iterate over.
290 * @fn: Callback. Should return true to continue or false to break early.
291 * @data: Pointer to pass to callback.
292 */
293static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn,
294 void *data)
295{
296 __sbitmap_for_each_set(sb, 0, fn, data);
297}
88459642
OS
298
299static inline unsigned long *__sbitmap_word(struct sbitmap *sb,
300 unsigned int bitnr)
301{
302 return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word;
303}
304
305/* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */
306
307static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr)
308{
309 set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
310}
311
312static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
313{
314 clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
315}
316
ea86ea2c
JA
317/*
318 * This one is special, since it doesn't actually clear the bit, rather it
319 * sets the corresponding bit in the ->cleared mask instead. Paired with
1e4471e7 320 * the caller doing sbitmap_deferred_clear() if a given index is full, which
ea86ea2c
JA
321 * will clear the previously freed entries in the corresponding ->word.
322 */
323static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
324{
325 unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared;
326
327 set_bit(SB_NR_TO_BIT(sb, bitnr), addr);
328}
329
c548e62b
ML
330/*
331 * Pair of sbitmap_get, and this one applies both cleared bit and
332 * allocation hint.
333 */
334static inline void sbitmap_put(struct sbitmap *sb, unsigned int bitnr)
335{
336 sbitmap_deferred_clear_bit(sb, bitnr);
337
338 if (likely(sb->alloc_hint && !sb->round_robin && bitnr < sb->depth))
035e9f47 339 *raw_cpu_ptr(sb->alloc_hint) = bitnr;
c548e62b
ML
340}
341
88459642
OS
342static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
343{
344 return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
345}
346
2d13b1ea
ML
347static inline int sbitmap_calculate_shift(unsigned int depth)
348{
349 int shift = ilog2(BITS_PER_LONG);
350
351 /*
352 * If the bitmap is small, shrink the number of bits per word so
353 * we spread over a few cachelines, at least. If less than 4
354 * bits, just forget about it, it's not going to work optimally
355 * anyway.
356 */
357 if (depth >= 4) {
358 while ((4U << shift) > depth)
359 shift--;
360 }
361
362 return shift;
363}
364
24af1ccf
OS
365/**
366 * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
367 * @sb: Bitmap to show.
368 * @m: struct seq_file to write to.
369 *
370 * This is intended for debugging. The format may change at any time.
371 */
372void sbitmap_show(struct sbitmap *sb, struct seq_file *m);
373
cbb9950b
ML
374
375/**
376 * sbitmap_weight() - Return how many set and not cleared bits in a &struct
377 * sbitmap.
378 * @sb: Bitmap to check.
379 *
380 * Return: How many set and not cleared bits set
381 */
382unsigned int sbitmap_weight(const struct sbitmap *sb);
383
24af1ccf
OS
384/**
385 * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
386 * seq_file.
387 * @sb: Bitmap to show.
388 * @m: struct seq_file to write to.
389 *
390 * This is intended for debugging. The output isn't guaranteed to be internally
391 * consistent.
392 */
393void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m);
394
88459642
OS
395/**
396 * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific
397 * memory node.
398 * @sbq: Bitmap queue to initialize.
399 * @depth: See sbitmap_init_node().
400 * @shift: See sbitmap_init_node().
f4a644db 401 * @round_robin: See sbitmap_get().
88459642
OS
402 * @flags: Allocation flags.
403 * @node: Memory node to allocate on.
404 *
405 * Return: Zero on success or negative errno on failure.
406 */
407int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
f4a644db 408 int shift, bool round_robin, gfp_t flags, int node);
88459642
OS
409
410/**
411 * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue.
412 *
413 * @sbq: Bitmap queue to free.
414 */
415static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
416{
417 kfree(sbq->ws);
418 sbitmap_free(&sbq->sb);
419}
420
180dccb0
LQ
421/**
422 * sbitmap_queue_recalculate_wake_batch() - Recalculate wake batch
423 * @sbq: Bitmap queue to recalculate wake batch.
424 * @users: Number of shares.
425 *
426 * Like sbitmap_queue_update_wake_batch(), this will calculate wake batch
427 * by depth. This interface is for HCTX shared tags or queue shared tags.
428 */
429void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
430 unsigned int users);
431
88459642
OS
432/**
433 * sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
434 * @sbq: Bitmap queue to resize.
435 * @depth: New number of bits to resize to.
436 *
437 * Like sbitmap_resize(), this doesn't reallocate anything. It has to do
438 * some extra work on the &struct sbitmap_queue, so it's not safe to just
439 * resize the underlying &struct sbitmap.
440 */
441void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
442
40aabb67
OS
443/**
444 * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
445 * sbitmap_queue with preemption already disabled.
446 * @sbq: Bitmap queue to allocate from.
40aabb67
OS
447 *
448 * Return: Non-negative allocated bit number if successful, -1 otherwise.
449 */
f4a644db 450int __sbitmap_queue_get(struct sbitmap_queue *sbq);
40aabb67 451
9672b0d4
JA
452/**
453 * __sbitmap_queue_get_batch() - Try to allocate a batch of free bits
454 * @sbq: Bitmap queue to allocate from.
455 * @nr_tags: number of tags requested
456 * @offset: offset to add to returned bits
457 *
458 * Return: Mask of allocated tags, 0 if none are found. Each tag allocated is
459 * a bit in the mask returned, and the caller must add @offset to the value to
460 * get the absolute tag value.
461 */
462unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
463 unsigned int *offset);
464
c05e6673 465/**
3f607293 466 * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
c05e6673
OS
467 * sbitmap_queue, limiting the depth used from each word, with preemption
468 * already disabled.
469 * @sbq: Bitmap queue to allocate from.
470 * @shallow_depth: The maximum number of bits to allocate from a single word.
471 * See sbitmap_get_shallow().
472 *
a3275539
OS
473 * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
474 * initializing @sbq.
475 *
c05e6673
OS
476 * Return: Non-negative allocated bit number if successful, -1 otherwise.
477 */
3f607293
JG
478int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
479 unsigned int shallow_depth);
c05e6673 480
40aabb67
OS
481/**
482 * sbitmap_queue_get() - Try to allocate a free bit from a &struct
483 * sbitmap_queue.
484 * @sbq: Bitmap queue to allocate from.
40aabb67
OS
485 * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
486 * sbitmap_queue_clear()).
487 *
488 * Return: Non-negative allocated bit number if successful, -1 otherwise.
489 */
f4a644db 490static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
40aabb67
OS
491 unsigned int *cpu)
492{
493 int nr;
494
495 *cpu = get_cpu();
f4a644db 496 nr = __sbitmap_queue_get(sbq);
40aabb67
OS
497 put_cpu();
498 return nr;
499}
500
a3275539
OS
501/**
502 * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
503 * minimum shallow depth that will be used.
504 * @sbq: Bitmap queue in question.
505 * @min_shallow_depth: The minimum shallow depth that will be passed to
506 * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
507 *
508 * sbitmap_queue_clear() batches wakeups as an optimization. The batch size
509 * depends on the depth of the bitmap. Since the shallow allocation functions
510 * effectively operate with a different depth, the shallow depth must be taken
511 * into account when calculating the batch size. This function must be called
512 * with the minimum shallow depth that will be used. Failure to do so can result
513 * in missed wakeups.
514 */
515void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
516 unsigned int min_shallow_depth);
517
88459642
OS
518/**
519 * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
520 * &struct sbitmap_queue.
521 * @sbq: Bitmap to free from.
522 * @nr: Bit number to free.
40aabb67 523 * @cpu: CPU the bit was allocated on.
88459642 524 */
40aabb67 525void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
f4a644db 526 unsigned int cpu);
88459642 527
1aec5e4a
JA
528/**
529 * sbitmap_queue_clear_batch() - Free a batch of allocated bits
530 * &struct sbitmap_queue.
531 * @sbq: Bitmap to free from.
532 * @offset: offset for each tag in array
533 * @tags: array of tags
534 * @nr_tags: number of tags in array
535 */
536void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
537 int *tags, int nr_tags);
538
88459642
OS
539static inline int sbq_index_inc(int index)
540{
541 return (index + 1) & (SBQ_WAIT_QUEUES - 1);
542}
543
544static inline void sbq_index_atomic_inc(atomic_t *index)
545{
546 int old = atomic_read(index);
547 int new = sbq_index_inc(old);
548 atomic_cmpxchg(index, old, new);
549}
550
551/**
552 * sbq_wait_ptr() - Get the next wait queue to use for a &struct
553 * sbitmap_queue.
554 * @sbq: Bitmap queue to wait on.
555 * @wait_index: A counter per "user" of @sbq.
556 */
557static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
558 atomic_t *wait_index)
559{
560 struct sbq_wait_state *ws;
561
562 ws = &sbq->ws[atomic_read(wait_index)];
563 sbq_index_atomic_inc(wait_index);
564 return ws;
565}
566
567/**
568 * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct
569 * sbitmap_queue.
570 * @sbq: Bitmap queue to wake up.
571 */
572void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
573
e6fc4649
ML
574/**
575 * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
576 * on a &struct sbitmap_queue.
577 * @sbq: Bitmap queue to wake up.
578 */
579void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
580
24af1ccf
OS
581/**
582 * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
583 * seq_file.
584 * @sbq: Bitmap queue to show.
585 * @m: struct seq_file to write to.
586 *
587 * This is intended for debugging. The format may change at any time.
588 */
589void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
590
5d2ee712 591struct sbq_wait {
9f6b7ef6 592 struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */
5d2ee712
JA
593 struct wait_queue_entry wait;
594};
595
596#define DEFINE_SBQ_WAIT(name) \
597 struct sbq_wait name = { \
9f6b7ef6 598 .sbq = NULL, \
5d2ee712
JA
599 .wait = { \
600 .private = current, \
601 .func = autoremove_wake_function, \
602 .entry = LIST_HEAD_INIT((name).wait.entry), \
603 } \
604 }
605
606/*
607 * Wrapper around prepare_to_wait_exclusive(), which maintains some extra
608 * internal state.
609 */
610void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
611 struct sbq_wait_state *ws,
612 struct sbq_wait *sbq_wait, int state);
613
614/*
615 * Must be paired with sbitmap_prepare_to_wait().
616 */
617void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
618 struct sbq_wait *sbq_wait);
619
9f6b7ef6
JA
620/*
621 * Wrapper around add_wait_queue(), which maintains some extra internal state
622 */
623void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
624 struct sbq_wait_state *ws,
625 struct sbq_wait *sbq_wait);
626
627/*
628 * Must be paired with sbitmap_add_wait_queue()
629 */
630void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait);
631
88459642 632#endif /* __LINUX_SCALE_BITMAP_H */