ufs: restore maintaining ->i_blocks
[linux-2.6-block.git] / fs / ufs / balloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/ufs/balloc.c
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
54fb996a
ED
7 *
8 * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
1da177e4
LT
9 */
10
11#include <linux/fs.h>
1da177e4
LT
12#include <linux/stat.h>
13#include <linux/time.h>
14#include <linux/string.h>
1da177e4 15#include <linux/buffer_head.h>
16f7e0fe 16#include <linux/capability.h>
1da177e4 17#include <linux/bitops.h>
2f8b5444 18#include <linux/bio.h>
1da177e4
LT
19#include <asm/byteorder.h>
20
e5420598 21#include "ufs_fs.h"
bcd6d4ec 22#include "ufs.h"
1da177e4
LT
23#include "swab.h"
24#include "util.h"
25
54fb996a
ED
26#define INVBLOCK ((u64)-1L)
27
45641c82 28static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
54fb996a
ED
29static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
30static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
31static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
1da177e4
LT
32static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
33static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
34
35/*
36 * Free 'count' fragments from fragment number 'fragment'
37 */
54fb996a 38void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
6ef4d6bf 39{
1da177e4
LT
40 struct super_block * sb;
41 struct ufs_sb_private_info * uspi;
1da177e4
LT
42 struct ufs_cg_private_info * ucpi;
43 struct ufs_cylinder_group * ucg;
54fb996a
ED
44 unsigned cgno, bit, end_bit, bbase, blkmap, i;
45 u64 blkno;
1da177e4
LT
46
47 sb = inode->i_sb;
48 uspi = UFS_SB(sb)->s_uspi;
1da177e4 49
54fb996a
ED
50 UFSD("ENTER, fragment %llu, count %u\n",
51 (unsigned long long)fragment, count);
1da177e4
LT
52
53 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
54 ufs_error (sb, "ufs_free_fragments", "internal error");
cdd9eefd
FF
55
56 mutex_lock(&UFS_SB(sb)->s_lock);
1da177e4 57
54fb996a
ED
58 cgno = ufs_dtog(uspi, fragment);
59 bit = ufs_dtogd(uspi, fragment);
1da177e4
LT
60 if (cgno >= uspi->s_ncg) {
61 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
62 goto failed;
63 }
64
65 ucpi = ufs_load_cylinder (sb, cgno);
66 if (!ucpi)
67 goto failed;
9695ef16 68 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
1da177e4
LT
69 if (!ufs_cg_chkmagic(sb, ucg)) {
70 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
71 goto failed;
72 }
73
74 end_bit = bit + count;
75 bbase = ufs_blknum (bit);
9695ef16 76 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
1da177e4
LT
77 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
78 for (i = bit; i < end_bit; i++) {
9695ef16
ED
79 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
80 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
7b4ee73e
E
81 else
82 ufs_error (sb, "ufs_free_fragments",
83 "bit already cleared for fragment %u", i);
1da177e4 84 }
eb315d2a
AV
85
86 inode_sub_bytes(inode, count << uspi->s_fshift);
1da177e4 87 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
ee3ffd6c 88 uspi->cs_total.cs_nffree += count;
1da177e4 89 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
9695ef16 90 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
1da177e4
LT
91 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
92
93 /*
94 * Trying to reassemble free fragments into block
95 */
96 blkno = ufs_fragstoblks (bbase);
9695ef16 97 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
1da177e4 98 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
ee3ffd6c 99 uspi->cs_total.cs_nffree -= uspi->s_fpb;
1da177e4
LT
100 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
101 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
102 ufs_clusteracct (sb, ucpi, blkno, 1);
103 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
ee3ffd6c 104 uspi->cs_total.cs_nbfree++;
1da177e4 105 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
54fb996a
ED
106 if (uspi->fs_magic != UFS2_MAGIC) {
107 unsigned cylno = ufs_cbtocylno (bbase);
108
109 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
110 ufs_cbtorpos(bbase)), 1);
111 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
112 }
1da177e4
LT
113 }
114
9695ef16
ED
115 ubh_mark_buffer_dirty (USPI_UBH(uspi));
116 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
9cb569d6
CH
117 if (sb->s_flags & MS_SYNCHRONOUS)
118 ubh_sync_block(UCPI_UBH(ucpi));
9e9ad5f4 119 ufs_mark_sb_dirty(sb);
cdd9eefd
FF
120
121 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 122 UFSD("EXIT\n");
1da177e4
LT
123 return;
124
125failed:
cdd9eefd 126 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 127 UFSD("EXIT (FAILED)\n");
1da177e4
LT
128 return;
129}
130
131/*
132 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
133 */
54fb996a 134void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
6ef4d6bf 135{
1da177e4
LT
136 struct super_block * sb;
137 struct ufs_sb_private_info * uspi;
1da177e4
LT
138 struct ufs_cg_private_info * ucpi;
139 struct ufs_cylinder_group * ucg;
54fb996a
ED
140 unsigned overflow, cgno, bit, end_bit, i;
141 u64 blkno;
1da177e4
LT
142
143 sb = inode->i_sb;
144 uspi = UFS_SB(sb)->s_uspi;
1da177e4 145
54fb996a
ED
146 UFSD("ENTER, fragment %llu, count %u\n",
147 (unsigned long long)fragment, count);
1da177e4
LT
148
149 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
150 ufs_error (sb, "ufs_free_blocks", "internal error, "
54fb996a
ED
151 "fragment %llu, count %u\n",
152 (unsigned long long)fragment, count);
1da177e4
LT
153 goto failed;
154 }
155
cdd9eefd 156 mutex_lock(&UFS_SB(sb)->s_lock);
1da177e4
LT
157
158do_more:
159 overflow = 0;
54fb996a
ED
160 cgno = ufs_dtog(uspi, fragment);
161 bit = ufs_dtogd(uspi, fragment);
1da177e4
LT
162 if (cgno >= uspi->s_ncg) {
163 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
2e006393 164 goto failed_unlock;
1da177e4
LT
165 }
166 end_bit = bit + count;
167 if (end_bit > uspi->s_fpg) {
168 overflow = bit + count - uspi->s_fpg;
169 count -= overflow;
170 end_bit -= overflow;
171 }
172
173 ucpi = ufs_load_cylinder (sb, cgno);
174 if (!ucpi)
2e006393 175 goto failed_unlock;
9695ef16 176 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
1da177e4
LT
177 if (!ufs_cg_chkmagic(sb, ucg)) {
178 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
2e006393 179 goto failed_unlock;
1da177e4
LT
180 }
181
182 for (i = bit; i < end_bit; i += uspi->s_fpb) {
183 blkno = ufs_fragstoblks(i);
9695ef16 184 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
1da177e4
LT
185 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
186 }
9695ef16 187 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
eb315d2a 188 inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
1da177e4
LT
189 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
190 ufs_clusteracct (sb, ucpi, blkno, 1);
1da177e4
LT
191
192 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
ee3ffd6c 193 uspi->cs_total.cs_nbfree++;
1da177e4 194 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
54fb996a
ED
195
196 if (uspi->fs_magic != UFS2_MAGIC) {
197 unsigned cylno = ufs_cbtocylno(i);
198
199 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
200 ufs_cbtorpos(i)), 1);
201 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
202 }
1da177e4
LT
203 }
204
9695ef16
ED
205 ubh_mark_buffer_dirty (USPI_UBH(uspi));
206 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
9cb569d6
CH
207 if (sb->s_flags & MS_SYNCHRONOUS)
208 ubh_sync_block(UCPI_UBH(ucpi));
1da177e4
LT
209
210 if (overflow) {
211 fragment += count;
212 count = overflow;
213 goto do_more;
214 }
215
9e9ad5f4 216 ufs_mark_sb_dirty(sb);
cdd9eefd 217 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 218 UFSD("EXIT\n");
1da177e4
LT
219 return;
220
2e006393 221failed_unlock:
cdd9eefd 222 mutex_unlock(&UFS_SB(sb)->s_lock);
2e006393 223failed:
abf5d15f 224 UFSD("EXIT (FAILED)\n");
1da177e4
LT
225 return;
226}
227
6ef4d6bf
ED
228/*
229 * Modify inode page cache in such way:
230 * have - blocks with b_blocknr equal to oldb...oldb+count-1
231 * get - blocks with b_blocknr equal to newb...newb+count-1
232 * also we suppose that oldb...oldb+count-1 blocks
233 * situated at the end of file.
234 *
235 * We can come here from ufs_writepage or ufs_prepare_write,
236 * locked_page is argument of these functions, so we already lock it.
237 */
5431bf97
ED
238static void ufs_change_blocknr(struct inode *inode, sector_t beg,
239 unsigned int count, sector_t oldb,
240 sector_t newb, struct page *locked_page)
6ef4d6bf 241{
5431bf97 242 const unsigned blks_per_page =
09cbfeaf 243 1 << (PAGE_SHIFT - inode->i_blkbits);
5431bf97 244 const unsigned mask = blks_per_page - 1;
efee2b81 245 struct address_space * const mapping = inode->i_mapping;
5431bf97
ED
246 pgoff_t index, cur_index, last_index;
247 unsigned pos, j, lblock;
248 sector_t end, i;
6ef4d6bf
ED
249 struct page *page;
250 struct buffer_head *head, *bh;
251
5431bf97
ED
252 UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
253 inode->i_ino, count,
254 (unsigned long long)oldb, (unsigned long long)newb);
6ef4d6bf 255
a685e26f 256 BUG_ON(!locked_page);
6ef4d6bf
ED
257 BUG_ON(!PageLocked(locked_page));
258
a685e26f 259 cur_index = locked_page->index;
5431bf97 260 end = count + beg;
09cbfeaf 261 last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
5431bf97 262 for (i = beg; i < end; i = (i | mask) + 1) {
09cbfeaf 263 index = i >> (PAGE_SHIFT - inode->i_blkbits);
6ef4d6bf
ED
264
265 if (likely(cur_index != index)) {
266 page = ufs_get_locked_page(mapping, index);
5431bf97
ED
267 if (!page)/* it was truncated */
268 continue;
269 if (IS_ERR(page)) {/* or EIO */
9746077a 270 ufs_error(inode->i_sb, __func__,
5431bf97
ED
271 "read of page %llu failed\n",
272 (unsigned long long)index);
6ef4d6bf 273 continue;
5431bf97 274 }
6ef4d6bf
ED
275 } else
276 page = locked_page;
277
6ef4d6bf
ED
278 head = page_buffers(page);
279 bh = head;
5431bf97 280 pos = i & mask;
efee2b81
ED
281 for (j = 0; j < pos; ++j)
282 bh = bh->b_this_page;
5431bf97
ED
283
284
285 if (unlikely(index == last_index))
286 lblock = end & mask;
287 else
288 lblock = blks_per_page;
289
6ef4d6bf 290 do {
5431bf97
ED
291 if (j >= lblock)
292 break;
293 pos = (i - beg) + j;
294
295 if (!buffer_mapped(bh))
296 map_bh(bh, inode->i_sb, oldb + pos);
297 if (!buffer_uptodate(bh)) {
dfec8a14 298 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
5431bf97
ED
299 wait_on_buffer(bh);
300 if (!buffer_uptodate(bh)) {
9746077a 301 ufs_error(inode->i_sb, __func__,
5431bf97
ED
302 "read of block failed\n");
303 break;
efee2b81 304 }
6ef4d6bf
ED
305 }
306
5431bf97 307 UFSD(" change from %llu to %llu, pos %u\n",
9df13039
AM
308 (unsigned long long)(pos + oldb),
309 (unsigned long long)(pos + newb), pos);
5431bf97
ED
310
311 bh->b_blocknr = newb + pos;
e64855c6 312 clean_bdev_bh_alias(bh);
5431bf97
ED
313 mark_buffer_dirty(bh);
314 ++j;
6ef4d6bf
ED
315 bh = bh->b_this_page;
316 } while (bh != head);
317
10e5dce0
ED
318 if (likely(cur_index != index))
319 ufs_put_locked_page(page);
6ef4d6bf 320 }
abf5d15f 321 UFSD("EXIT\n");
6ef4d6bf
ED
322}
323
d63b7090
ED
324static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
325 int sync)
326{
327 struct buffer_head *bh;
328 sector_t end = beg + n;
329
330 for (; beg < end; ++beg) {
331 bh = sb_getblk(inode->i_sb, beg);
332 lock_buffer(bh);
333 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
334 set_buffer_uptodate(bh);
335 mark_buffer_dirty(bh);
336 unlock_buffer(bh);
337 if (IS_SYNC(inode) || sync)
338 sync_dirty_buffer(bh);
339 brelse(bh);
340 }
341}
342
54fb996a
ED
343u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
344 u64 goal, unsigned count, int *err,
345 struct page *locked_page)
1da177e4
LT
346{
347 struct super_block * sb;
348 struct ufs_sb_private_info * uspi;
349 struct ufs_super_block_first * usb1;
54fb996a
ED
350 unsigned cgno, oldcount, newcount;
351 u64 tmp, request, result;
1da177e4 352
54fb996a
ED
353 UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
354 inode->i_ino, (unsigned long long)fragment,
355 (unsigned long long)goal, count);
1da177e4
LT
356
357 sb = inode->i_sb;
358 uspi = UFS_SB(sb)->s_uspi;
7b4ee73e 359 usb1 = ubh_get_usb_first(uspi);
1da177e4
LT
360 *err = -ENOSPC;
361
cdd9eefd 362 mutex_lock(&UFS_SB(sb)->s_lock);
54fb996a
ED
363 tmp = ufs_data_ptr_to_cpu(sb, p);
364
1da177e4 365 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
54fb996a
ED
366 ufs_warning(sb, "ufs_new_fragments", "internal warning"
367 " fragment %llu, count %u",
368 (unsigned long long)fragment, count);
1da177e4
LT
369 count = uspi->s_fpb - ufs_fragnum(fragment);
370 }
371 oldcount = ufs_fragnum (fragment);
372 newcount = oldcount + count;
373
374 /*
375 * Somebody else has just allocated our fragments
376 */
377 if (oldcount) {
378 if (!tmp) {
54fb996a
ED
379 ufs_error(sb, "ufs_new_fragments", "internal error, "
380 "fragment %llu, tmp %llu\n",
381 (unsigned long long)fragment,
382 (unsigned long long)tmp);
cdd9eefd 383 mutex_unlock(&UFS_SB(sb)->s_lock);
54fb996a 384 return INVBLOCK;
1da177e4
LT
385 }
386 if (fragment < UFS_I(inode)->i_lastfrag) {
abf5d15f 387 UFSD("EXIT (ALREADY ALLOCATED)\n");
cdd9eefd 388 mutex_unlock(&UFS_SB(sb)->s_lock);
1da177e4
LT
389 return 0;
390 }
391 }
392 else {
393 if (tmp) {
abf5d15f 394 UFSD("EXIT (ALREADY ALLOCATED)\n");
cdd9eefd 395 mutex_unlock(&UFS_SB(sb)->s_lock);
1da177e4
LT
396 return 0;
397 }
398 }
399
400 /*
401 * There is not enough space for user on the device
402 */
ee3ffd6c 403 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
cdd9eefd 404 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 405 UFSD("EXIT (FAILED)\n");
1da177e4
LT
406 return 0;
407 }
408
409 if (goal >= uspi->s_size)
410 goal = 0;
411 if (goal == 0)
412 cgno = ufs_inotocg (inode->i_ino);
413 else
54fb996a 414 cgno = ufs_dtog(uspi, goal);
1da177e4
LT
415
416 /*
417 * allocate new fragment
418 */
419 if (oldcount == 0) {
420 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
421 if (result) {
bd2843fe
AV
422 ufs_clear_frags(inode, result + oldcount,
423 newcount - oldcount, locked_page != NULL);
724bb09f 424 write_seqlock(&UFS_I(inode)->meta_lock);
54fb996a 425 ufs_cpu_to_data_ptr(sb, p, result);
724bb09f 426 write_sequnlock(&UFS_I(inode)->meta_lock);
1da177e4 427 *err = 0;
54fb996a 428 UFS_I(inode)->i_lastfrag =
1d582723 429 max(UFS_I(inode)->i_lastfrag, fragment + count);
1da177e4 430 }
cdd9eefd 431 mutex_unlock(&UFS_SB(sb)->s_lock);
54fb996a 432 UFSD("EXIT, result %llu\n", (unsigned long long)result);
1da177e4
LT
433 return result;
434 }
435
436 /*
437 * resize block
438 */
45641c82 439 result = ufs_add_fragments(inode, tmp, oldcount, newcount);
1da177e4
LT
440 if (result) {
441 *err = 0;
1d582723
DC
442 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
443 fragment + count);
d63b7090
ED
444 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
445 locked_page != NULL);
cdd9eefd 446 mutex_unlock(&UFS_SB(sb)->s_lock);
54fb996a 447 UFSD("EXIT, result %llu\n", (unsigned long long)result);
1da177e4
LT
448 return result;
449 }
450
451 /*
452 * allocate new block and move data
453 */
454 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
455 case UFS_OPTSPACE:
456 request = newcount;
ee3ffd6c
ED
457 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
458 > uspi->s_dsize * uspi->s_minfree / (2 * 100))
1da177e4
LT
459 break;
460 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
461 break;
462 default:
463 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
464
465 case UFS_OPTTIME:
466 request = uspi->s_fpb;
ee3ffd6c 467 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
1da177e4
LT
468 (uspi->s_minfree - 2) / 100)
469 break;
470 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
471 break;
472 }
473 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
474 if (result) {
efee2b81
ED
475 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
476 locked_page != NULL);
4b25a37e
ED
477 ufs_change_blocknr(inode, fragment - oldcount, oldcount,
478 uspi->s_sbbase + tmp,
479 uspi->s_sbbase + result, locked_page);
724bb09f 480 write_seqlock(&UFS_I(inode)->meta_lock);
54fb996a 481 ufs_cpu_to_data_ptr(sb, p, result);
724bb09f 482 write_sequnlock(&UFS_I(inode)->meta_lock);
1da177e4 483 *err = 0;
1d582723
DC
484 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
485 fragment + count);
cdd9eefd 486 mutex_unlock(&UFS_SB(sb)->s_lock);
1da177e4
LT
487 if (newcount < request)
488 ufs_free_fragments (inode, result + newcount, request - newcount);
489 ufs_free_fragments (inode, tmp, oldcount);
54fb996a 490 UFSD("EXIT, result %llu\n", (unsigned long long)result);
1da177e4
LT
491 return result;
492 }
493
cdd9eefd 494 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 495 UFSD("EXIT (FAILED)\n");
1da177e4
LT
496 return 0;
497}
498
eb315d2a
AV
499static bool try_add_frags(struct inode *inode, unsigned frags)
500{
501 unsigned size = frags * i_blocksize(inode);
502 spin_lock(&inode->i_lock);
503 __inode_add_bytes(inode, size);
504 if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
505 __inode_sub_bytes(inode, size);
506 spin_unlock(&inode->i_lock);
507 return false;
508 }
509 spin_unlock(&inode->i_lock);
510 return true;
511}
512
54fb996a 513static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
45641c82 514 unsigned oldcount, unsigned newcount)
1da177e4
LT
515{
516 struct super_block * sb;
517 struct ufs_sb_private_info * uspi;
1da177e4
LT
518 struct ufs_cg_private_info * ucpi;
519 struct ufs_cylinder_group * ucg;
520 unsigned cgno, fragno, fragoff, count, fragsize, i;
521
54fb996a
ED
522 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
523 (unsigned long long)fragment, oldcount, newcount);
1da177e4
LT
524
525 sb = inode->i_sb;
526 uspi = UFS_SB(sb)->s_uspi;
1da177e4
LT
527 count = newcount - oldcount;
528
54fb996a 529 cgno = ufs_dtog(uspi, fragment);
1da177e4
LT
530 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
531 return 0;
532 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
533 return 0;
534 ucpi = ufs_load_cylinder (sb, cgno);
535 if (!ucpi)
536 return 0;
9695ef16 537 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
1da177e4
LT
538 if (!ufs_cg_chkmagic(sb, ucg)) {
539 ufs_panic (sb, "ufs_add_fragments",
540 "internal error, bad magic number on cg %u", cgno);
541 return 0;
542 }
543
54fb996a 544 fragno = ufs_dtogd(uspi, fragment);
1da177e4
LT
545 fragoff = ufs_fragnum (fragno);
546 for (i = oldcount; i < newcount; i++)
9695ef16 547 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
1da177e4 548 return 0;
eb315d2a
AV
549
550 if (!try_add_frags(inode, count))
551 return 0;
1da177e4
LT
552 /*
553 * Block can be extended
554 */
555 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
556 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
9695ef16 557 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
1da177e4
LT
558 break;
559 fragsize = i - oldcount;
560 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
561 ufs_panic (sb, "ufs_add_fragments",
562 "internal error or corrupted bitmap on cg %u", cgno);
563 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
564 if (fragsize != count)
565 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
566 for (i = oldcount; i < newcount; i++)
9695ef16 567 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
1da177e4
LT
568
569 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
570 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
ee3ffd6c 571 uspi->cs_total.cs_nffree -= count;
1da177e4 572
9695ef16
ED
573 ubh_mark_buffer_dirty (USPI_UBH(uspi));
574 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
9cb569d6
CH
575 if (sb->s_flags & MS_SYNCHRONOUS)
576 ubh_sync_block(UCPI_UBH(ucpi));
9e9ad5f4 577 ufs_mark_sb_dirty(sb);
1da177e4 578
54fb996a 579 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
1da177e4
LT
580
581 return fragment;
582}
583
584#define UFS_TEST_FREE_SPACE_CG \
585 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
586 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
587 goto cg_found; \
588 for (k = count; k < uspi->s_fpb; k++) \
589 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
590 goto cg_found;
591
54fb996a
ED
592static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
593 u64 goal, unsigned count, int *err)
1da177e4
LT
594{
595 struct super_block * sb;
596 struct ufs_sb_private_info * uspi;
1da177e4
LT
597 struct ufs_cg_private_info * ucpi;
598 struct ufs_cylinder_group * ucg;
54fb996a
ED
599 unsigned oldcg, i, j, k, allocsize;
600 u64 result;
1da177e4 601
54fb996a
ED
602 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
603 inode->i_ino, cgno, (unsigned long long)goal, count);
1da177e4
LT
604
605 sb = inode->i_sb;
606 uspi = UFS_SB(sb)->s_uspi;
1da177e4
LT
607 oldcg = cgno;
608
609 /*
610 * 1. searching on preferred cylinder group
611 */
612 UFS_TEST_FREE_SPACE_CG
613
614 /*
615 * 2. quadratic rehash
616 */
617 for (j = 1; j < uspi->s_ncg; j *= 2) {
618 cgno += j;
619 if (cgno >= uspi->s_ncg)
620 cgno -= uspi->s_ncg;
621 UFS_TEST_FREE_SPACE_CG
622 }
623
624 /*
625 * 3. brute force search
626 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
627 */
628 cgno = (oldcg + 1) % uspi->s_ncg;
629 for (j = 2; j < uspi->s_ncg; j++) {
630 cgno++;
631 if (cgno >= uspi->s_ncg)
632 cgno = 0;
633 UFS_TEST_FREE_SPACE_CG
634 }
635
abf5d15f 636 UFSD("EXIT (FAILED)\n");
1da177e4
LT
637 return 0;
638
639cg_found:
640 ucpi = ufs_load_cylinder (sb, cgno);
641 if (!ucpi)
642 return 0;
9695ef16 643 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
1da177e4
LT
644 if (!ufs_cg_chkmagic(sb, ucg))
645 ufs_panic (sb, "ufs_alloc_fragments",
646 "internal error, bad magic number on cg %u", cgno);
647 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
648
649 if (count == uspi->s_fpb) {
650 result = ufs_alloccg_block (inode, ucpi, goal, err);
54fb996a 651 if (result == INVBLOCK)
1da177e4
LT
652 return 0;
653 goto succed;
654 }
655
656 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
657 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
658 break;
659
660 if (allocsize == uspi->s_fpb) {
661 result = ufs_alloccg_block (inode, ucpi, goal, err);
54fb996a 662 if (result == INVBLOCK)
1da177e4 663 return 0;
54fb996a 664 goal = ufs_dtogd(uspi, result);
1da177e4 665 for (i = count; i < uspi->s_fpb; i++)
9695ef16 666 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
1da177e4 667 i = uspi->s_fpb - count;
1da177e4 668
eb315d2a 669 inode_sub_bytes(inode, i << uspi->s_fshift);
1da177e4 670 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
ee3ffd6c 671 uspi->cs_total.cs_nffree += i;
1da177e4
LT
672 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
673 fs32_add(sb, &ucg->cg_frsum[i], 1);
674 goto succed;
675 }
676
677 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
54fb996a 678 if (result == INVBLOCK)
1da177e4 679 return 0;
eb315d2a
AV
680 if (!try_add_frags(inode, count))
681 return 0;
1da177e4 682 for (i = 0; i < count; i++)
9695ef16 683 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
1da177e4
LT
684
685 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
ee3ffd6c 686 uspi->cs_total.cs_nffree -= count;
1da177e4
LT
687 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
688 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
689
690 if (count != allocsize)
691 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
692
693succed:
9695ef16
ED
694 ubh_mark_buffer_dirty (USPI_UBH(uspi));
695 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
9cb569d6
CH
696 if (sb->s_flags & MS_SYNCHRONOUS)
697 ubh_sync_block(UCPI_UBH(ucpi));
9e9ad5f4 698 ufs_mark_sb_dirty(sb);
1da177e4
LT
699
700 result += cgno * uspi->s_fpg;
54fb996a 701 UFSD("EXIT3, result %llu\n", (unsigned long long)result);
1da177e4
LT
702 return result;
703}
704
54fb996a
ED
705static u64 ufs_alloccg_block(struct inode *inode,
706 struct ufs_cg_private_info *ucpi,
707 u64 goal, int *err)
1da177e4
LT
708{
709 struct super_block * sb;
710 struct ufs_sb_private_info * uspi;
1da177e4 711 struct ufs_cylinder_group * ucg;
54fb996a 712 u64 result, blkno;
1da177e4 713
54fb996a 714 UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
1da177e4
LT
715
716 sb = inode->i_sb;
717 uspi = UFS_SB(sb)->s_uspi;
9695ef16 718 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
1da177e4
LT
719
720 if (goal == 0) {
721 goal = ucpi->c_rotor;
722 goto norot;
723 }
724 goal = ufs_blknum (goal);
54fb996a 725 goal = ufs_dtogd(uspi, goal);
1da177e4
LT
726
727 /*
728 * If the requested block is available, use it.
729 */
9695ef16 730 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
1da177e4
LT
731 result = goal;
732 goto gotit;
733 }
734
735norot:
736 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
54fb996a
ED
737 if (result == INVBLOCK)
738 return INVBLOCK;
1da177e4
LT
739 ucpi->c_rotor = result;
740gotit:
eb315d2a
AV
741 if (!try_add_frags(inode, uspi->s_fpb))
742 return 0;
1da177e4 743 blkno = ufs_fragstoblks(result);
9695ef16 744 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
1da177e4
LT
745 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
746 ufs_clusteracct (sb, ucpi, blkno, -1);
1da177e4
LT
747
748 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
ee3ffd6c 749 uspi->cs_total.cs_nbfree--;
1da177e4 750 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
54fb996a
ED
751
752 if (uspi->fs_magic != UFS2_MAGIC) {
753 unsigned cylno = ufs_cbtocylno((unsigned)result);
754
755 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
756 ufs_cbtorpos((unsigned)result)), 1);
757 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
758 }
1da177e4 759
54fb996a 760 UFSD("EXIT, result %llu\n", (unsigned long long)result);
1da177e4
LT
761
762 return result;
763}
764
3e41f597
ED
765static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
766 struct ufs_buffer_head *ubh,
767 unsigned begin, unsigned size,
768 unsigned char *table, unsigned char mask)
1da177e4 769{
3e41f597
ED
770 unsigned rest, offset;
771 unsigned char *cp;
1da177e4 772
1da177e4 773
3e41f597
ED
774 offset = begin & ~uspi->s_fmask;
775 begin >>= uspi->s_fshift;
776 for (;;) {
777 if ((offset + size) < uspi->s_fsize)
778 rest = size;
779 else
780 rest = uspi->s_fsize - offset;
781 size -= rest;
782 cp = ubh->bh[begin]->b_data + offset;
783 while ((table[*cp++] & mask) == 0 && --rest)
784 ;
785 if (rest || !size)
786 break;
787 begin++;
788 offset = 0;
789 }
790 return (size + rest);
791}
792
793/*
794 * Find a block of the specified size in the specified cylinder group.
795 * @sp: pointer to super block
796 * @ucpi: pointer to cylinder group info
797 * @goal: near which block we want find new one
798 * @count: specified size
799 */
54fb996a
ED
800static u64 ufs_bitmap_search(struct super_block *sb,
801 struct ufs_cg_private_info *ucpi,
802 u64 goal, unsigned count)
3e41f597
ED
803{
804 /*
805 * Bit patterns for identifying fragments in the block map
806 * used as ((map & mask_arr) == want_arr)
807 */
808 static const int mask_arr[9] = {
809 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
810 };
811 static const int want_arr[9] = {
812 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
813 };
814 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
54fb996a 815 unsigned start, length, loc;
3e41f597 816 unsigned pos, want, blockmap, mask, end;
54fb996a 817 u64 result;
3e41f597 818
54fb996a
ED
819 UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
820 (unsigned long long)goal, count);
3e41f597 821
1da177e4 822 if (goal)
54fb996a 823 start = ufs_dtogd(uspi, goal) >> 3;
1da177e4
LT
824 else
825 start = ucpi->c_frotor >> 3;
826
827 length = ((uspi->s_fpg + 7) >> 3) - start;
3e41f597 828 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
1da177e4
LT
829 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
830 1 << (count - 1 + (uspi->s_fpb & 7)));
3e41f597 831 if (loc == 0) {
1da177e4 832 length = start + 1;
3e41f597
ED
833 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
834 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
835 ufs_fragtable_other,
836 1 << (count - 1 + (uspi->s_fpb & 7)));
837 if (loc == 0) {
838 ufs_error(sb, "ufs_bitmap_search",
839 "bitmap corrupted on cg %u, start %u,"
840 " length %u, count %u, freeoff %u\n",
841 ucpi->c_cgx, start, length, count,
842 ucpi->c_freeoff);
54fb996a 843 return INVBLOCK;
1da177e4
LT
844 }
845 start = 0;
846 }
3e41f597 847 result = (start + length - loc) << 3;
1da177e4
LT
848 ucpi->c_frotor = result;
849
850 /*
851 * found the byte in the map
852 */
3e41f597
ED
853
854 for (end = result + 8; result < end; result += uspi->s_fpb) {
855 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
856 blockmap <<= 1;
857 mask = mask_arr[count];
858 want = want_arr[count];
859 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
860 if ((blockmap & mask) == want) {
54fb996a
ED
861 UFSD("EXIT, result %llu\n",
862 (unsigned long long)result);
3e41f597
ED
863 return result + pos;
864 }
865 mask <<= 1;
866 want <<= 1;
867 }
868 }
869
870 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
871 ucpi->c_cgx);
abf5d15f 872 UFSD("EXIT (FAILED)\n");
54fb996a 873 return INVBLOCK;
1da177e4
LT
874}
875
876static void ufs_clusteracct(struct super_block * sb,
877 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
878{
879 struct ufs_sb_private_info * uspi;
880 int i, start, end, forw, back;
881
882 uspi = UFS_SB(sb)->s_uspi;
883 if (uspi->s_contigsumsize <= 0)
884 return;
885
886 if (cnt > 0)
9695ef16 887 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
1da177e4 888 else
9695ef16 889 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
1da177e4
LT
890
891 /*
892 * Find the size of the cluster going forward.
893 */
894 start = blkno + 1;
895 end = start + uspi->s_contigsumsize;
896 if ( end >= ucpi->c_nclusterblks)
897 end = ucpi->c_nclusterblks;
9695ef16 898 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
1da177e4
LT
899 if (i > end)
900 i = end;
901 forw = i - start;
902
903 /*
904 * Find the size of the cluster going backward.
905 */
906 start = blkno - 1;
907 end = start - uspi->s_contigsumsize;
908 if (end < 0 )
909 end = -1;
9695ef16 910 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
1da177e4
LT
911 if ( i < end)
912 i = end;
913 back = start - i;
914
915 /*
916 * Account for old cluster and the possibly new forward and
917 * back clusters.
918 */
919 i = back + forw + 1;
920 if (i > uspi->s_contigsumsize)
921 i = uspi->s_contigsumsize;
9695ef16 922 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
1da177e4 923 if (back > 0)
9695ef16 924 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
1da177e4 925 if (forw > 0)
9695ef16 926 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
1da177e4
LT
927}
928
929
930static unsigned char ufs_fragtable_8fpb[] = {
931 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
932 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
933 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
934 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
935 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
936 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
937 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
938 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
939 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
940 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
941 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
942 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
943 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
944 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
945 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
946 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
947};
948
949static unsigned char ufs_fragtable_other[] = {
950 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
951 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
952 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
953 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
954 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
955 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
956 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
957 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
958 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
959 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
960 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
961 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
962 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
963 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
964 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
965 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
966};