| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Block Translation Table |
| 4 | * Copyright (c) 2014-2015, Intel Corporation. |
| 5 | */ |
| 6 | #include <linux/highmem.h> |
| 7 | #include <linux/debugfs.h> |
| 8 | #include <linux/blkdev.h> |
| 9 | #include <linux/blk-integrity.h> |
| 10 | #include <linux/pagemap.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/device.h> |
| 13 | #include <linux/mutex.h> |
| 14 | #include <linux/hdreg.h> |
| 15 | #include <linux/sizes.h> |
| 16 | #include <linux/ndctl.h> |
| 17 | #include <linux/fs.h> |
| 18 | #include <linux/nd.h> |
| 19 | #include <linux/backing-dev.h> |
| 20 | #include <linux/cleanup.h> |
| 21 | #include "btt.h" |
| 22 | #include "nd.h" |
| 23 | |
| 24 | enum log_ent_request { |
| 25 | LOG_NEW_ENT = 0, |
| 26 | LOG_OLD_ENT |
| 27 | }; |
| 28 | |
| 29 | static struct device *to_dev(struct arena_info *arena) |
| 30 | { |
| 31 | return &arena->nd_btt->dev; |
| 32 | } |
| 33 | |
| 34 | static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset) |
| 35 | { |
| 36 | return offset + nd_btt->initial_offset; |
| 37 | } |
| 38 | |
| 39 | static int arena_read_bytes(struct arena_info *arena, resource_size_t offset, |
| 40 | void *buf, size_t n, unsigned long flags) |
| 41 | { |
| 42 | struct nd_btt *nd_btt = arena->nd_btt; |
| 43 | struct nd_namespace_common *ndns = nd_btt->ndns; |
| 44 | |
| 45 | /* arena offsets may be shifted from the base of the device */ |
| 46 | offset = adjust_initial_offset(nd_btt, offset); |
| 47 | return nvdimm_read_bytes(ndns, offset, buf, n, flags); |
| 48 | } |
| 49 | |
| 50 | static int arena_write_bytes(struct arena_info *arena, resource_size_t offset, |
| 51 | void *buf, size_t n, unsigned long flags) |
| 52 | { |
| 53 | struct nd_btt *nd_btt = arena->nd_btt; |
| 54 | struct nd_namespace_common *ndns = nd_btt->ndns; |
| 55 | |
| 56 | /* arena offsets may be shifted from the base of the device */ |
| 57 | offset = adjust_initial_offset(nd_btt, offset); |
| 58 | return nvdimm_write_bytes(ndns, offset, buf, n, flags); |
| 59 | } |
| 60 | |
| 61 | static int btt_info_write(struct arena_info *arena, struct btt_sb *super) |
| 62 | { |
| 63 | int ret; |
| 64 | |
| 65 | /* |
| 66 | * infooff and info2off should always be at least 512B aligned. |
| 67 | * We rely on that to make sure rw_bytes does error clearing |
| 68 | * correctly, so make sure that is the case. |
| 69 | */ |
| 70 | dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512), |
| 71 | "arena->infooff: %#llx is unaligned\n", arena->infooff); |
| 72 | dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512), |
| 73 | "arena->info2off: %#llx is unaligned\n", arena->info2off); |
| 74 | |
| 75 | ret = arena_write_bytes(arena, arena->info2off, super, |
| 76 | sizeof(struct btt_sb), 0); |
| 77 | if (ret) |
| 78 | return ret; |
| 79 | |
| 80 | return arena_write_bytes(arena, arena->infooff, super, |
| 81 | sizeof(struct btt_sb), 0); |
| 82 | } |
| 83 | |
| 84 | static int btt_info_read(struct arena_info *arena, struct btt_sb *super) |
| 85 | { |
| 86 | return arena_read_bytes(arena, arena->infooff, super, |
| 87 | sizeof(struct btt_sb), 0); |
| 88 | } |
| 89 | |
| 90 | /* |
| 91 | * 'raw' version of btt_map write |
| 92 | * Assumptions: |
| 93 | * mapping is in little-endian |
| 94 | * mapping contains 'E' and 'Z' flags as desired |
| 95 | */ |
| 96 | static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping, |
| 97 | unsigned long flags) |
| 98 | { |
| 99 | u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE); |
| 100 | |
| 101 | if (unlikely(lba >= arena->external_nlba)) |
| 102 | dev_err_ratelimited(to_dev(arena), |
| 103 | "%s: lba %#x out of range (max: %#x)\n", |
| 104 | __func__, lba, arena->external_nlba); |
| 105 | return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags); |
| 106 | } |
| 107 | |
| 108 | static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping, |
| 109 | u32 z_flag, u32 e_flag, unsigned long rwb_flags) |
| 110 | { |
| 111 | u32 ze; |
| 112 | __le32 mapping_le; |
| 113 | |
| 114 | /* |
| 115 | * This 'mapping' is supposed to be just the LBA mapping, without |
| 116 | * any flags set, so strip the flag bits. |
| 117 | */ |
| 118 | mapping = ent_lba(mapping); |
| 119 | |
| 120 | ze = (z_flag << 1) + e_flag; |
| 121 | switch (ze) { |
| 122 | case 0: |
| 123 | /* |
| 124 | * We want to set neither of the Z or E flags, and |
| 125 | * in the actual layout, this means setting the bit |
| 126 | * positions of both to '1' to indicate a 'normal' |
| 127 | * map entry |
| 128 | */ |
| 129 | mapping |= MAP_ENT_NORMAL; |
| 130 | break; |
| 131 | case 1: |
| 132 | mapping |= (1 << MAP_ERR_SHIFT); |
| 133 | break; |
| 134 | case 2: |
| 135 | mapping |= (1 << MAP_TRIM_SHIFT); |
| 136 | break; |
| 137 | default: |
| 138 | /* |
| 139 | * The case where Z and E are both sent in as '1' could be |
| 140 | * construed as a valid 'normal' case, but we decide not to, |
| 141 | * to avoid confusion |
| 142 | */ |
| 143 | dev_err_ratelimited(to_dev(arena), |
| 144 | "Invalid use of Z and E flags\n"); |
| 145 | return -EIO; |
| 146 | } |
| 147 | |
| 148 | mapping_le = cpu_to_le32(mapping); |
| 149 | return __btt_map_write(arena, lba, mapping_le, rwb_flags); |
| 150 | } |
| 151 | |
| 152 | static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping, |
| 153 | int *trim, int *error, unsigned long rwb_flags) |
| 154 | { |
| 155 | int ret; |
| 156 | __le32 in; |
| 157 | u32 raw_mapping, postmap, ze, z_flag, e_flag; |
| 158 | u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE); |
| 159 | |
| 160 | if (unlikely(lba >= arena->external_nlba)) |
| 161 | dev_err_ratelimited(to_dev(arena), |
| 162 | "%s: lba %#x out of range (max: %#x)\n", |
| 163 | __func__, lba, arena->external_nlba); |
| 164 | |
| 165 | ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags); |
| 166 | if (ret) |
| 167 | return ret; |
| 168 | |
| 169 | raw_mapping = le32_to_cpu(in); |
| 170 | |
| 171 | z_flag = ent_z_flag(raw_mapping); |
| 172 | e_flag = ent_e_flag(raw_mapping); |
| 173 | ze = (z_flag << 1) + e_flag; |
| 174 | postmap = ent_lba(raw_mapping); |
| 175 | |
| 176 | /* Reuse the {z,e}_flag variables for *trim and *error */ |
| 177 | z_flag = 0; |
| 178 | e_flag = 0; |
| 179 | |
| 180 | switch (ze) { |
| 181 | case 0: |
| 182 | /* Initial state. Return postmap = premap */ |
| 183 | *mapping = lba; |
| 184 | break; |
| 185 | case 1: |
| 186 | *mapping = postmap; |
| 187 | e_flag = 1; |
| 188 | break; |
| 189 | case 2: |
| 190 | *mapping = postmap; |
| 191 | z_flag = 1; |
| 192 | break; |
| 193 | case 3: |
| 194 | *mapping = postmap; |
| 195 | break; |
| 196 | default: |
| 197 | return -EIO; |
| 198 | } |
| 199 | |
| 200 | if (trim) |
| 201 | *trim = z_flag; |
| 202 | if (error) |
| 203 | *error = e_flag; |
| 204 | |
| 205 | return ret; |
| 206 | } |
| 207 | |
| 208 | static int btt_log_group_read(struct arena_info *arena, u32 lane, |
| 209 | struct log_group *log) |
| 210 | { |
| 211 | return arena_read_bytes(arena, |
| 212 | arena->logoff + (lane * LOG_GRP_SIZE), log, |
| 213 | LOG_GRP_SIZE, 0); |
| 214 | } |
| 215 | |
| 216 | static struct dentry *debugfs_root; |
| 217 | |
| 218 | static void arena_debugfs_init(struct arena_info *a, struct dentry *parent, |
| 219 | int idx) |
| 220 | { |
| 221 | char dirname[32]; |
| 222 | struct dentry *d; |
| 223 | |
| 224 | /* If for some reason, parent bttN was not created, exit */ |
| 225 | if (!parent) |
| 226 | return; |
| 227 | |
| 228 | snprintf(dirname, 32, "arena%d", idx); |
| 229 | d = debugfs_create_dir(dirname, parent); |
| 230 | if (IS_ERR_OR_NULL(d)) |
| 231 | return; |
| 232 | a->debugfs_dir = d; |
| 233 | |
| 234 | debugfs_create_x64("size", S_IRUGO, d, &a->size); |
| 235 | debugfs_create_x64("external_lba_start", S_IRUGO, d, |
| 236 | &a->external_lba_start); |
| 237 | debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba); |
| 238 | debugfs_create_u32("internal_lbasize", S_IRUGO, d, |
| 239 | &a->internal_lbasize); |
| 240 | debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba); |
| 241 | debugfs_create_u32("external_lbasize", S_IRUGO, d, |
| 242 | &a->external_lbasize); |
| 243 | debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree); |
| 244 | debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major); |
| 245 | debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor); |
| 246 | debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff); |
| 247 | debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff); |
| 248 | debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff); |
| 249 | debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff); |
| 250 | debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff); |
| 251 | debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off); |
| 252 | debugfs_create_x32("flags", S_IRUGO, d, &a->flags); |
| 253 | debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]); |
| 254 | debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]); |
| 255 | } |
| 256 | |
| 257 | static void btt_debugfs_init(struct btt *btt) |
| 258 | { |
| 259 | int i = 0; |
| 260 | struct arena_info *arena; |
| 261 | |
| 262 | btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev), |
| 263 | debugfs_root); |
| 264 | if (IS_ERR_OR_NULL(btt->debugfs_dir)) |
| 265 | return; |
| 266 | |
| 267 | list_for_each_entry(arena, &btt->arena_list, list) { |
| 268 | arena_debugfs_init(arena, btt->debugfs_dir, i); |
| 269 | i++; |
| 270 | } |
| 271 | } |
| 272 | |
| 273 | static u32 log_seq(struct log_group *log, int log_idx) |
| 274 | { |
| 275 | return le32_to_cpu(log->ent[log_idx].seq); |
| 276 | } |
| 277 | |
| 278 | /* |
| 279 | * This function accepts two log entries, and uses the |
| 280 | * sequence number to find the 'older' entry. |
| 281 | * It also updates the sequence number in this old entry to |
| 282 | * make it the 'new' one if the mark_flag is set. |
| 283 | * Finally, it returns which of the entries was the older one. |
| 284 | * |
| 285 | * TODO The logic feels a bit kludge-y. make it better.. |
| 286 | */ |
| 287 | static int btt_log_get_old(struct arena_info *a, struct log_group *log) |
| 288 | { |
| 289 | int idx0 = a->log_index[0]; |
| 290 | int idx1 = a->log_index[1]; |
| 291 | int old; |
| 292 | |
| 293 | /* |
| 294 | * the first ever time this is seen, the entry goes into [0] |
| 295 | * the next time, the following logic works out to put this |
| 296 | * (next) entry into [1] |
| 297 | */ |
| 298 | if (log_seq(log, idx0) == 0) { |
| 299 | log->ent[idx0].seq = cpu_to_le32(1); |
| 300 | return 0; |
| 301 | } |
| 302 | |
| 303 | if (log_seq(log, idx0) == log_seq(log, idx1)) |
| 304 | return -EINVAL; |
| 305 | if (log_seq(log, idx0) + log_seq(log, idx1) > 5) |
| 306 | return -EINVAL; |
| 307 | |
| 308 | if (log_seq(log, idx0) < log_seq(log, idx1)) { |
| 309 | if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1) |
| 310 | old = 0; |
| 311 | else |
| 312 | old = 1; |
| 313 | } else { |
| 314 | if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1) |
| 315 | old = 1; |
| 316 | else |
| 317 | old = 0; |
| 318 | } |
| 319 | |
| 320 | return old; |
| 321 | } |
| 322 | |
| 323 | /* |
| 324 | * This function copies the desired (old/new) log entry into ent if |
| 325 | * it is not NULL. It returns the sub-slot number (0 or 1) |
| 326 | * where the desired log entry was found. Negative return values |
| 327 | * indicate errors. |
| 328 | */ |
| 329 | static int btt_log_read(struct arena_info *arena, u32 lane, |
| 330 | struct log_entry *ent, int old_flag) |
| 331 | { |
| 332 | int ret; |
| 333 | int old_ent, ret_ent; |
| 334 | struct log_group log; |
| 335 | |
| 336 | ret = btt_log_group_read(arena, lane, &log); |
| 337 | if (ret) |
| 338 | return -EIO; |
| 339 | |
| 340 | old_ent = btt_log_get_old(arena, &log); |
| 341 | if (old_ent < 0 || old_ent > 1) { |
| 342 | dev_err(to_dev(arena), |
| 343 | "log corruption (%d): lane %d seq [%d, %d]\n", |
| 344 | old_ent, lane, log.ent[arena->log_index[0]].seq, |
| 345 | log.ent[arena->log_index[1]].seq); |
| 346 | /* TODO set error state? */ |
| 347 | return -EIO; |
| 348 | } |
| 349 | |
| 350 | ret_ent = (old_flag ? old_ent : (1 - old_ent)); |
| 351 | |
| 352 | if (ent != NULL) |
| 353 | memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE); |
| 354 | |
| 355 | return ret_ent; |
| 356 | } |
| 357 | |
| 358 | /* |
| 359 | * This function commits a log entry to media |
| 360 | * It does _not_ prepare the freelist entry for the next write |
| 361 | * btt_flog_write is the wrapper for updating the freelist elements |
| 362 | */ |
| 363 | static int __btt_log_write(struct arena_info *arena, u32 lane, |
| 364 | u32 sub, struct log_entry *ent, unsigned long flags) |
| 365 | { |
| 366 | int ret; |
| 367 | u32 group_slot = arena->log_index[sub]; |
| 368 | unsigned int log_half = LOG_ENT_SIZE / 2; |
| 369 | void *src = ent; |
| 370 | u64 ns_off; |
| 371 | |
| 372 | ns_off = arena->logoff + (lane * LOG_GRP_SIZE) + |
| 373 | (group_slot * LOG_ENT_SIZE); |
| 374 | /* split the 16B write into atomic, durable halves */ |
| 375 | ret = arena_write_bytes(arena, ns_off, src, log_half, flags); |
| 376 | if (ret) |
| 377 | return ret; |
| 378 | |
| 379 | ns_off += log_half; |
| 380 | src += log_half; |
| 381 | return arena_write_bytes(arena, ns_off, src, log_half, flags); |
| 382 | } |
| 383 | |
| 384 | static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub, |
| 385 | struct log_entry *ent) |
| 386 | { |
| 387 | int ret; |
| 388 | |
| 389 | ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC); |
| 390 | if (ret) |
| 391 | return ret; |
| 392 | |
| 393 | /* prepare the next free entry */ |
| 394 | arena->freelist[lane].sub = 1 - arena->freelist[lane].sub; |
| 395 | if (++(arena->freelist[lane].seq) == 4) |
| 396 | arena->freelist[lane].seq = 1; |
| 397 | if (ent_e_flag(le32_to_cpu(ent->old_map))) |
| 398 | arena->freelist[lane].has_err = 1; |
| 399 | arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map)); |
| 400 | |
| 401 | return ret; |
| 402 | } |
| 403 | |
| 404 | /* |
| 405 | * This function initializes the BTT map to the initial state, which is |
| 406 | * all-zeroes, and indicates an identity mapping |
| 407 | */ |
| 408 | static int btt_map_init(struct arena_info *arena) |
| 409 | { |
| 410 | int ret = -EINVAL; |
| 411 | void *zerobuf; |
| 412 | size_t offset = 0; |
| 413 | size_t chunk_size = SZ_2M; |
| 414 | size_t mapsize = arena->logoff - arena->mapoff; |
| 415 | |
| 416 | zerobuf = kzalloc(chunk_size, GFP_KERNEL); |
| 417 | if (!zerobuf) |
| 418 | return -ENOMEM; |
| 419 | |
| 420 | /* |
| 421 | * mapoff should always be at least 512B aligned. We rely on that to |
| 422 | * make sure rw_bytes does error clearing correctly, so make sure that |
| 423 | * is the case. |
| 424 | */ |
| 425 | dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512), |
| 426 | "arena->mapoff: %#llx is unaligned\n", arena->mapoff); |
| 427 | |
| 428 | while (mapsize) { |
| 429 | size_t size = min(mapsize, chunk_size); |
| 430 | |
| 431 | dev_WARN_ONCE(to_dev(arena), size < 512, |
| 432 | "chunk size: %#zx is unaligned\n", size); |
| 433 | ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf, |
| 434 | size, 0); |
| 435 | if (ret) |
| 436 | goto free; |
| 437 | |
| 438 | offset += size; |
| 439 | mapsize -= size; |
| 440 | cond_resched(); |
| 441 | } |
| 442 | |
| 443 | free: |
| 444 | kfree(zerobuf); |
| 445 | return ret; |
| 446 | } |
| 447 | |
| 448 | /* |
| 449 | * This function initializes the BTT log with 'fake' entries pointing |
| 450 | * to the initial reserved set of blocks as being free |
| 451 | */ |
| 452 | static int btt_log_init(struct arena_info *arena) |
| 453 | { |
| 454 | size_t logsize = arena->info2off - arena->logoff; |
| 455 | size_t chunk_size = SZ_4K, offset = 0; |
| 456 | struct log_entry ent; |
| 457 | void *zerobuf; |
| 458 | int ret; |
| 459 | u32 i; |
| 460 | |
| 461 | zerobuf = kzalloc(chunk_size, GFP_KERNEL); |
| 462 | if (!zerobuf) |
| 463 | return -ENOMEM; |
| 464 | /* |
| 465 | * logoff should always be at least 512B aligned. We rely on that to |
| 466 | * make sure rw_bytes does error clearing correctly, so make sure that |
| 467 | * is the case. |
| 468 | */ |
| 469 | dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512), |
| 470 | "arena->logoff: %#llx is unaligned\n", arena->logoff); |
| 471 | |
| 472 | while (logsize) { |
| 473 | size_t size = min(logsize, chunk_size); |
| 474 | |
| 475 | dev_WARN_ONCE(to_dev(arena), size < 512, |
| 476 | "chunk size: %#zx is unaligned\n", size); |
| 477 | ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf, |
| 478 | size, 0); |
| 479 | if (ret) |
| 480 | goto free; |
| 481 | |
| 482 | offset += size; |
| 483 | logsize -= size; |
| 484 | cond_resched(); |
| 485 | } |
| 486 | |
| 487 | for (i = 0; i < arena->nfree; i++) { |
| 488 | ent.lba = cpu_to_le32(i); |
| 489 | ent.old_map = cpu_to_le32(arena->external_nlba + i); |
| 490 | ent.new_map = cpu_to_le32(arena->external_nlba + i); |
| 491 | ent.seq = cpu_to_le32(LOG_SEQ_INIT); |
| 492 | ret = __btt_log_write(arena, i, 0, &ent, 0); |
| 493 | if (ret) |
| 494 | goto free; |
| 495 | } |
| 496 | |
| 497 | free: |
| 498 | kfree(zerobuf); |
| 499 | return ret; |
| 500 | } |
| 501 | |
| 502 | static u64 to_namespace_offset(struct arena_info *arena, u64 lba) |
| 503 | { |
| 504 | return arena->dataoff + ((u64)lba * arena->internal_lbasize); |
| 505 | } |
| 506 | |
| 507 | static int arena_clear_freelist_error(struct arena_info *arena, u32 lane) |
| 508 | { |
| 509 | int ret = 0; |
| 510 | |
| 511 | if (arena->freelist[lane].has_err) { |
| 512 | void *zero_page = page_address(ZERO_PAGE(0)); |
| 513 | u32 lba = arena->freelist[lane].block; |
| 514 | u64 nsoff = to_namespace_offset(arena, lba); |
| 515 | unsigned long len = arena->sector_size; |
| 516 | |
| 517 | mutex_lock(&arena->err_lock); |
| 518 | |
| 519 | while (len) { |
| 520 | unsigned long chunk = min(len, PAGE_SIZE); |
| 521 | |
| 522 | ret = arena_write_bytes(arena, nsoff, zero_page, |
| 523 | chunk, 0); |
| 524 | if (ret) |
| 525 | break; |
| 526 | len -= chunk; |
| 527 | nsoff += chunk; |
| 528 | if (len == 0) |
| 529 | arena->freelist[lane].has_err = 0; |
| 530 | } |
| 531 | mutex_unlock(&arena->err_lock); |
| 532 | } |
| 533 | return ret; |
| 534 | } |
| 535 | |
| 536 | static int btt_freelist_init(struct arena_info *arena) |
| 537 | { |
| 538 | int new, ret; |
| 539 | struct log_entry log_new; |
| 540 | u32 i, map_entry, log_oldmap, log_newmap; |
| 541 | |
| 542 | arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry), |
| 543 | GFP_KERNEL); |
| 544 | if (!arena->freelist) |
| 545 | return -ENOMEM; |
| 546 | |
| 547 | for (i = 0; i < arena->nfree; i++) { |
| 548 | new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT); |
| 549 | if (new < 0) |
| 550 | return new; |
| 551 | |
| 552 | /* old and new map entries with any flags stripped out */ |
| 553 | log_oldmap = ent_lba(le32_to_cpu(log_new.old_map)); |
| 554 | log_newmap = ent_lba(le32_to_cpu(log_new.new_map)); |
| 555 | |
| 556 | /* sub points to the next one to be overwritten */ |
| 557 | arena->freelist[i].sub = 1 - new; |
| 558 | arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq)); |
| 559 | arena->freelist[i].block = log_oldmap; |
| 560 | |
| 561 | /* |
| 562 | * FIXME: if error clearing fails during init, we want to make |
| 563 | * the BTT read-only |
| 564 | */ |
| 565 | if (ent_e_flag(le32_to_cpu(log_new.old_map)) && |
| 566 | !ent_normal(le32_to_cpu(log_new.old_map))) { |
| 567 | arena->freelist[i].has_err = 1; |
| 568 | ret = arena_clear_freelist_error(arena, i); |
| 569 | if (ret) |
| 570 | dev_err_ratelimited(to_dev(arena), |
| 571 | "Unable to clear known errors\n"); |
| 572 | } |
| 573 | |
| 574 | /* This implies a newly created or untouched flog entry */ |
| 575 | if (log_oldmap == log_newmap) |
| 576 | continue; |
| 577 | |
| 578 | /* Check if map recovery is needed */ |
| 579 | ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry, |
| 580 | NULL, NULL, 0); |
| 581 | if (ret) |
| 582 | return ret; |
| 583 | |
| 584 | /* |
| 585 | * The map_entry from btt_read_map is stripped of any flag bits, |
| 586 | * so use the stripped out versions from the log as well for |
| 587 | * testing whether recovery is needed. For restoration, use the |
| 588 | * 'raw' version of the log entries as that captured what we |
| 589 | * were going to write originally. |
| 590 | */ |
| 591 | if ((log_newmap != map_entry) && (log_oldmap == map_entry)) { |
| 592 | /* |
| 593 | * Last transaction wrote the flog, but wasn't able |
| 594 | * to complete the map write. So fix up the map. |
| 595 | */ |
| 596 | ret = btt_map_write(arena, le32_to_cpu(log_new.lba), |
| 597 | le32_to_cpu(log_new.new_map), 0, 0, 0); |
| 598 | if (ret) |
| 599 | return ret; |
| 600 | } |
| 601 | } |
| 602 | |
| 603 | return 0; |
| 604 | } |
| 605 | |
| 606 | static bool ent_is_padding(struct log_entry *ent) |
| 607 | { |
| 608 | return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0) |
| 609 | && (ent->seq == 0); |
| 610 | } |
| 611 | |
| 612 | /* |
| 613 | * Detecting valid log indices: We read a log group (see the comments in btt.h |
| 614 | * for a description of a 'log_group' and its 'slots'), and iterate over its |
| 615 | * four slots. We expect that a padding slot will be all-zeroes, and use this |
| 616 | * to detect a padding slot vs. an actual entry. |
| 617 | * |
| 618 | * If a log_group is in the initial state, i.e. hasn't been used since the |
| 619 | * creation of this BTT layout, it will have three of the four slots with |
| 620 | * zeroes. We skip over these log_groups for the detection of log_index. If |
| 621 | * all log_groups are in the initial state (i.e. the BTT has never been |
| 622 | * written to), it is safe to assume the 'new format' of log entries in slots |
| 623 | * (0, 1). |
| 624 | */ |
| 625 | static int log_set_indices(struct arena_info *arena) |
| 626 | { |
| 627 | bool idx_set = false, initial_state = true; |
| 628 | int ret, log_index[2] = {-1, -1}; |
| 629 | u32 i, j, next_idx = 0; |
| 630 | struct log_group log; |
| 631 | u32 pad_count = 0; |
| 632 | |
| 633 | for (i = 0; i < arena->nfree; i++) { |
| 634 | ret = btt_log_group_read(arena, i, &log); |
| 635 | if (ret < 0) |
| 636 | return ret; |
| 637 | |
| 638 | for (j = 0; j < 4; j++) { |
| 639 | if (!idx_set) { |
| 640 | if (ent_is_padding(&log.ent[j])) { |
| 641 | pad_count++; |
| 642 | continue; |
| 643 | } else { |
| 644 | /* Skip if index has been recorded */ |
| 645 | if ((next_idx == 1) && |
| 646 | (j == log_index[0])) |
| 647 | continue; |
| 648 | /* valid entry, record index */ |
| 649 | log_index[next_idx] = j; |
| 650 | next_idx++; |
| 651 | } |
| 652 | if (next_idx == 2) { |
| 653 | /* two valid entries found */ |
| 654 | idx_set = true; |
| 655 | } else if (next_idx > 2) { |
| 656 | /* too many valid indices */ |
| 657 | return -ENXIO; |
| 658 | } |
| 659 | } else { |
| 660 | /* |
| 661 | * once the indices have been set, just verify |
| 662 | * that all subsequent log groups are either in |
| 663 | * their initial state or follow the same |
| 664 | * indices. |
| 665 | */ |
| 666 | if (j == log_index[0]) { |
| 667 | /* entry must be 'valid' */ |
| 668 | if (ent_is_padding(&log.ent[j])) |
| 669 | return -ENXIO; |
| 670 | } else if (j == log_index[1]) { |
| 671 | ; |
| 672 | /* |
| 673 | * log_index[1] can be padding if the |
| 674 | * lane never got used and it is still |
| 675 | * in the initial state (three 'padding' |
| 676 | * entries) |
| 677 | */ |
| 678 | } else { |
| 679 | /* entry must be invalid (padding) */ |
| 680 | if (!ent_is_padding(&log.ent[j])) |
| 681 | return -ENXIO; |
| 682 | } |
| 683 | } |
| 684 | } |
| 685 | /* |
| 686 | * If any of the log_groups have more than one valid, |
| 687 | * non-padding entry, then the we are no longer in the |
| 688 | * initial_state |
| 689 | */ |
| 690 | if (pad_count < 3) |
| 691 | initial_state = false; |
| 692 | pad_count = 0; |
| 693 | } |
| 694 | |
| 695 | if (!initial_state && !idx_set) |
| 696 | return -ENXIO; |
| 697 | |
| 698 | /* |
| 699 | * If all the entries in the log were in the initial state, |
| 700 | * assume new padding scheme |
| 701 | */ |
| 702 | if (initial_state) |
| 703 | log_index[1] = 1; |
| 704 | |
| 705 | /* |
| 706 | * Only allow the known permutations of log/padding indices, |
| 707 | * i.e. (0, 1), and (0, 2) |
| 708 | */ |
| 709 | if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2))) |
| 710 | ; /* known index possibilities */ |
| 711 | else { |
| 712 | dev_err(to_dev(arena), "Found an unknown padding scheme\n"); |
| 713 | return -ENXIO; |
| 714 | } |
| 715 | |
| 716 | arena->log_index[0] = log_index[0]; |
| 717 | arena->log_index[1] = log_index[1]; |
| 718 | dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]); |
| 719 | dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]); |
| 720 | return 0; |
| 721 | } |
| 722 | |
| 723 | static int btt_rtt_init(struct arena_info *arena) |
| 724 | { |
| 725 | arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL); |
| 726 | if (arena->rtt == NULL) |
| 727 | return -ENOMEM; |
| 728 | |
| 729 | return 0; |
| 730 | } |
| 731 | |
| 732 | static int btt_maplocks_init(struct arena_info *arena) |
| 733 | { |
| 734 | u32 i; |
| 735 | |
| 736 | arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock), |
| 737 | GFP_KERNEL); |
| 738 | if (!arena->map_locks) |
| 739 | return -ENOMEM; |
| 740 | |
| 741 | for (i = 0; i < arena->nfree; i++) |
| 742 | spin_lock_init(&arena->map_locks[i].lock); |
| 743 | |
| 744 | return 0; |
| 745 | } |
| 746 | |
| 747 | static struct arena_info *alloc_arena(struct btt *btt, size_t size, |
| 748 | size_t start, size_t arena_off) |
| 749 | { |
| 750 | struct arena_info *arena; |
| 751 | u64 logsize, mapsize, datasize; |
| 752 | u64 available = size; |
| 753 | |
| 754 | arena = kzalloc(sizeof(*arena), GFP_KERNEL); |
| 755 | if (!arena) |
| 756 | return NULL; |
| 757 | arena->nd_btt = btt->nd_btt; |
| 758 | arena->sector_size = btt->sector_size; |
| 759 | mutex_init(&arena->err_lock); |
| 760 | |
| 761 | if (!size) |
| 762 | return arena; |
| 763 | |
| 764 | arena->size = size; |
| 765 | arena->external_lba_start = start; |
| 766 | arena->external_lbasize = btt->lbasize; |
| 767 | arena->internal_lbasize = roundup(arena->external_lbasize, |
| 768 | INT_LBASIZE_ALIGNMENT); |
| 769 | arena->nfree = BTT_DEFAULT_NFREE; |
| 770 | arena->version_major = btt->nd_btt->version_major; |
| 771 | arena->version_minor = btt->nd_btt->version_minor; |
| 772 | |
| 773 | if (available % BTT_PG_SIZE) |
| 774 | available -= (available % BTT_PG_SIZE); |
| 775 | |
| 776 | /* Two pages are reserved for the super block and its copy */ |
| 777 | available -= 2 * BTT_PG_SIZE; |
| 778 | |
| 779 | /* The log takes a fixed amount of space based on nfree */ |
| 780 | logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE); |
| 781 | available -= logsize; |
| 782 | |
| 783 | /* Calculate optimal split between map and data area */ |
| 784 | arena->internal_nlba = div_u64(available - BTT_PG_SIZE, |
| 785 | arena->internal_lbasize + MAP_ENT_SIZE); |
| 786 | arena->external_nlba = arena->internal_nlba - arena->nfree; |
| 787 | |
| 788 | mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE); |
| 789 | datasize = available - mapsize; |
| 790 | |
| 791 | /* 'Absolute' values, relative to start of storage space */ |
| 792 | arena->infooff = arena_off; |
| 793 | arena->dataoff = arena->infooff + BTT_PG_SIZE; |
| 794 | arena->mapoff = arena->dataoff + datasize; |
| 795 | arena->logoff = arena->mapoff + mapsize; |
| 796 | arena->info2off = arena->logoff + logsize; |
| 797 | |
| 798 | /* Default log indices are (0,1) */ |
| 799 | arena->log_index[0] = 0; |
| 800 | arena->log_index[1] = 1; |
| 801 | return arena; |
| 802 | } |
| 803 | |
| 804 | static void free_arenas(struct btt *btt) |
| 805 | { |
| 806 | struct arena_info *arena, *next; |
| 807 | |
| 808 | list_for_each_entry_safe(arena, next, &btt->arena_list, list) { |
| 809 | list_del(&arena->list); |
| 810 | kfree(arena->rtt); |
| 811 | kfree(arena->map_locks); |
| 812 | kfree(arena->freelist); |
| 813 | debugfs_remove_recursive(arena->debugfs_dir); |
| 814 | kfree(arena); |
| 815 | } |
| 816 | } |
| 817 | |
| 818 | /* |
| 819 | * This function reads an existing valid btt superblock and |
| 820 | * populates the corresponding arena_info struct |
| 821 | */ |
| 822 | static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super, |
| 823 | u64 arena_off) |
| 824 | { |
| 825 | arena->internal_nlba = le32_to_cpu(super->internal_nlba); |
| 826 | arena->internal_lbasize = le32_to_cpu(super->internal_lbasize); |
| 827 | arena->external_nlba = le32_to_cpu(super->external_nlba); |
| 828 | arena->external_lbasize = le32_to_cpu(super->external_lbasize); |
| 829 | arena->nfree = le32_to_cpu(super->nfree); |
| 830 | arena->version_major = le16_to_cpu(super->version_major); |
| 831 | arena->version_minor = le16_to_cpu(super->version_minor); |
| 832 | |
| 833 | arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off + |
| 834 | le64_to_cpu(super->nextoff)); |
| 835 | arena->infooff = arena_off; |
| 836 | arena->dataoff = arena_off + le64_to_cpu(super->dataoff); |
| 837 | arena->mapoff = arena_off + le64_to_cpu(super->mapoff); |
| 838 | arena->logoff = arena_off + le64_to_cpu(super->logoff); |
| 839 | arena->info2off = arena_off + le64_to_cpu(super->info2off); |
| 840 | |
| 841 | arena->size = (le64_to_cpu(super->nextoff) > 0) |
| 842 | ? (le64_to_cpu(super->nextoff)) |
| 843 | : (arena->info2off - arena->infooff + BTT_PG_SIZE); |
| 844 | |
| 845 | arena->flags = le32_to_cpu(super->flags); |
| 846 | } |
| 847 | |
| 848 | static int discover_arenas(struct btt *btt) |
| 849 | { |
| 850 | int ret = 0; |
| 851 | struct arena_info *arena; |
| 852 | size_t remaining = btt->rawsize; |
| 853 | u64 cur_nlba = 0; |
| 854 | size_t cur_off = 0; |
| 855 | int num_arenas = 0; |
| 856 | |
| 857 | struct btt_sb *super __free(kfree) = kzalloc(sizeof(*super), GFP_KERNEL); |
| 858 | if (!super) |
| 859 | return -ENOMEM; |
| 860 | |
| 861 | while (remaining) { |
| 862 | /* Alloc memory for arena */ |
| 863 | arena = alloc_arena(btt, 0, 0, 0); |
| 864 | if (!arena) |
| 865 | return -ENOMEM; |
| 866 | |
| 867 | arena->infooff = cur_off; |
| 868 | ret = btt_info_read(arena, super); |
| 869 | if (ret) |
| 870 | goto out; |
| 871 | |
| 872 | if (!nd_btt_arena_is_valid(btt->nd_btt, super)) { |
| 873 | if (remaining == btt->rawsize) { |
| 874 | btt->init_state = INIT_NOTFOUND; |
| 875 | dev_info(to_dev(arena), "No existing arenas\n"); |
| 876 | goto out; |
| 877 | } else { |
| 878 | dev_err(to_dev(arena), |
| 879 | "Found corrupted metadata!\n"); |
| 880 | ret = -ENODEV; |
| 881 | goto out; |
| 882 | } |
| 883 | } |
| 884 | |
| 885 | arena->external_lba_start = cur_nlba; |
| 886 | parse_arena_meta(arena, super, cur_off); |
| 887 | |
| 888 | ret = log_set_indices(arena); |
| 889 | if (ret) { |
| 890 | dev_err(to_dev(arena), |
| 891 | "Unable to deduce log/padding indices\n"); |
| 892 | goto out; |
| 893 | } |
| 894 | |
| 895 | ret = btt_freelist_init(arena); |
| 896 | if (ret) |
| 897 | goto out; |
| 898 | |
| 899 | ret = btt_rtt_init(arena); |
| 900 | if (ret) |
| 901 | goto out; |
| 902 | |
| 903 | ret = btt_maplocks_init(arena); |
| 904 | if (ret) |
| 905 | goto out; |
| 906 | |
| 907 | list_add_tail(&arena->list, &btt->arena_list); |
| 908 | |
| 909 | remaining -= arena->size; |
| 910 | cur_off += arena->size; |
| 911 | cur_nlba += arena->external_nlba; |
| 912 | num_arenas++; |
| 913 | |
| 914 | if (arena->nextoff == 0) |
| 915 | break; |
| 916 | } |
| 917 | btt->num_arenas = num_arenas; |
| 918 | btt->nlba = cur_nlba; |
| 919 | btt->init_state = INIT_READY; |
| 920 | |
| 921 | return ret; |
| 922 | |
| 923 | out: |
| 924 | kfree(arena); |
| 925 | free_arenas(btt); |
| 926 | return ret; |
| 927 | } |
| 928 | |
| 929 | static int create_arenas(struct btt *btt) |
| 930 | { |
| 931 | size_t remaining = btt->rawsize; |
| 932 | size_t cur_off = 0; |
| 933 | |
| 934 | while (remaining) { |
| 935 | struct arena_info *arena; |
| 936 | size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining); |
| 937 | |
| 938 | remaining -= arena_size; |
| 939 | if (arena_size < ARENA_MIN_SIZE) |
| 940 | break; |
| 941 | |
| 942 | arena = alloc_arena(btt, arena_size, btt->nlba, cur_off); |
| 943 | if (!arena) { |
| 944 | free_arenas(btt); |
| 945 | return -ENOMEM; |
| 946 | } |
| 947 | btt->nlba += arena->external_nlba; |
| 948 | if (remaining >= ARENA_MIN_SIZE) |
| 949 | arena->nextoff = arena->size; |
| 950 | else |
| 951 | arena->nextoff = 0; |
| 952 | cur_off += arena_size; |
| 953 | list_add_tail(&arena->list, &btt->arena_list); |
| 954 | } |
| 955 | |
| 956 | return 0; |
| 957 | } |
| 958 | |
| 959 | /* |
| 960 | * This function completes arena initialization by writing |
| 961 | * all the metadata. |
| 962 | * It is only called for an uninitialized arena when a write |
| 963 | * to that arena occurs for the first time. |
| 964 | */ |
| 965 | static int btt_arena_write_layout(struct arena_info *arena) |
| 966 | { |
| 967 | int ret; |
| 968 | u64 sum; |
| 969 | struct btt_sb *super; |
| 970 | struct nd_btt *nd_btt = arena->nd_btt; |
| 971 | const uuid_t *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev); |
| 972 | |
| 973 | ret = btt_map_init(arena); |
| 974 | if (ret) |
| 975 | return ret; |
| 976 | |
| 977 | ret = btt_log_init(arena); |
| 978 | if (ret) |
| 979 | return ret; |
| 980 | |
| 981 | super = kzalloc(sizeof(*super), GFP_NOIO); |
| 982 | if (!super) |
| 983 | return -ENOMEM; |
| 984 | |
| 985 | strscpy(super->signature, BTT_SIG, sizeof(super->signature)); |
| 986 | export_uuid(super->uuid, nd_btt->uuid); |
| 987 | export_uuid(super->parent_uuid, parent_uuid); |
| 988 | super->flags = cpu_to_le32(arena->flags); |
| 989 | super->version_major = cpu_to_le16(arena->version_major); |
| 990 | super->version_minor = cpu_to_le16(arena->version_minor); |
| 991 | super->external_lbasize = cpu_to_le32(arena->external_lbasize); |
| 992 | super->external_nlba = cpu_to_le32(arena->external_nlba); |
| 993 | super->internal_lbasize = cpu_to_le32(arena->internal_lbasize); |
| 994 | super->internal_nlba = cpu_to_le32(arena->internal_nlba); |
| 995 | super->nfree = cpu_to_le32(arena->nfree); |
| 996 | super->infosize = cpu_to_le32(sizeof(struct btt_sb)); |
| 997 | super->nextoff = cpu_to_le64(arena->nextoff); |
| 998 | /* |
| 999 | * Subtract arena->infooff (arena start) so numbers are relative |
| 1000 | * to 'this' arena |
| 1001 | */ |
| 1002 | super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff); |
| 1003 | super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff); |
| 1004 | super->logoff = cpu_to_le64(arena->logoff - arena->infooff); |
| 1005 | super->info2off = cpu_to_le64(arena->info2off - arena->infooff); |
| 1006 | |
| 1007 | super->flags = 0; |
| 1008 | sum = nd_sb_checksum((struct nd_gen_sb *) super); |
| 1009 | super->checksum = cpu_to_le64(sum); |
| 1010 | |
| 1011 | ret = btt_info_write(arena, super); |
| 1012 | |
| 1013 | kfree(super); |
| 1014 | return ret; |
| 1015 | } |
| 1016 | |
| 1017 | /* |
| 1018 | * This function completes the initialization for the BTT namespace |
| 1019 | * such that it is ready to accept IOs |
| 1020 | */ |
| 1021 | static int btt_meta_init(struct btt *btt) |
| 1022 | { |
| 1023 | int ret = 0; |
| 1024 | struct arena_info *arena; |
| 1025 | |
| 1026 | mutex_lock(&btt->init_lock); |
| 1027 | list_for_each_entry(arena, &btt->arena_list, list) { |
| 1028 | ret = btt_arena_write_layout(arena); |
| 1029 | if (ret) |
| 1030 | goto unlock; |
| 1031 | |
| 1032 | ret = btt_freelist_init(arena); |
| 1033 | if (ret) |
| 1034 | goto unlock; |
| 1035 | |
| 1036 | ret = btt_rtt_init(arena); |
| 1037 | if (ret) |
| 1038 | goto unlock; |
| 1039 | |
| 1040 | ret = btt_maplocks_init(arena); |
| 1041 | if (ret) |
| 1042 | goto unlock; |
| 1043 | } |
| 1044 | |
| 1045 | btt->init_state = INIT_READY; |
| 1046 | |
| 1047 | unlock: |
| 1048 | mutex_unlock(&btt->init_lock); |
| 1049 | return ret; |
| 1050 | } |
| 1051 | |
| 1052 | static u32 btt_meta_size(struct btt *btt) |
| 1053 | { |
| 1054 | return btt->lbasize - btt->sector_size; |
| 1055 | } |
| 1056 | |
| 1057 | /* |
| 1058 | * This function calculates the arena in which the given LBA lies |
| 1059 | * by doing a linear walk. This is acceptable since we expect only |
| 1060 | * a few arenas. If we have backing devices that get much larger, |
| 1061 | * we can construct a balanced binary tree of arenas at init time |
| 1062 | * so that this range search becomes faster. |
| 1063 | */ |
| 1064 | static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap, |
| 1065 | struct arena_info **arena) |
| 1066 | { |
| 1067 | struct arena_info *arena_list; |
| 1068 | __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size); |
| 1069 | |
| 1070 | list_for_each_entry(arena_list, &btt->arena_list, list) { |
| 1071 | if (lba < arena_list->external_nlba) { |
| 1072 | *arena = arena_list; |
| 1073 | *premap = lba; |
| 1074 | return 0; |
| 1075 | } |
| 1076 | lba -= arena_list->external_nlba; |
| 1077 | } |
| 1078 | |
| 1079 | return -EIO; |
| 1080 | } |
| 1081 | |
| 1082 | /* |
| 1083 | * The following (lock_map, unlock_map) are mostly just to improve |
| 1084 | * readability, since they index into an array of locks |
| 1085 | */ |
| 1086 | static void lock_map(struct arena_info *arena, u32 premap) |
| 1087 | __acquires(&arena->map_locks[idx].lock) |
| 1088 | { |
| 1089 | u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree; |
| 1090 | |
| 1091 | spin_lock(&arena->map_locks[idx].lock); |
| 1092 | } |
| 1093 | |
| 1094 | static void unlock_map(struct arena_info *arena, u32 premap) |
| 1095 | __releases(&arena->map_locks[idx].lock) |
| 1096 | { |
| 1097 | u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree; |
| 1098 | |
| 1099 | spin_unlock(&arena->map_locks[idx].lock); |
| 1100 | } |
| 1101 | |
| 1102 | static int btt_data_read(struct arena_info *arena, struct page *page, |
| 1103 | unsigned int off, u32 lba, u32 len) |
| 1104 | { |
| 1105 | int ret; |
| 1106 | u64 nsoff = to_namespace_offset(arena, lba); |
| 1107 | void *mem = kmap_atomic(page); |
| 1108 | |
| 1109 | ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC); |
| 1110 | kunmap_atomic(mem); |
| 1111 | |
| 1112 | return ret; |
| 1113 | } |
| 1114 | |
| 1115 | static int btt_data_write(struct arena_info *arena, u32 lba, |
| 1116 | struct page *page, unsigned int off, u32 len) |
| 1117 | { |
| 1118 | int ret; |
| 1119 | u64 nsoff = to_namespace_offset(arena, lba); |
| 1120 | void *mem = kmap_atomic(page); |
| 1121 | |
| 1122 | ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC); |
| 1123 | kunmap_atomic(mem); |
| 1124 | |
| 1125 | return ret; |
| 1126 | } |
| 1127 | |
| 1128 | static void zero_fill_data(struct page *page, unsigned int off, u32 len) |
| 1129 | { |
| 1130 | void *mem = kmap_atomic(page); |
| 1131 | |
| 1132 | memset(mem + off, 0, len); |
| 1133 | kunmap_atomic(mem); |
| 1134 | } |
| 1135 | |
| 1136 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
| 1137 | static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip, |
| 1138 | struct arena_info *arena, u32 postmap, int rw) |
| 1139 | { |
| 1140 | unsigned int len = btt_meta_size(btt); |
| 1141 | u64 meta_nsoff; |
| 1142 | int ret = 0; |
| 1143 | |
| 1144 | if (bip == NULL) |
| 1145 | return 0; |
| 1146 | |
| 1147 | meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size; |
| 1148 | |
| 1149 | while (len) { |
| 1150 | unsigned int cur_len; |
| 1151 | struct bio_vec bv; |
| 1152 | void *mem; |
| 1153 | |
| 1154 | bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter); |
| 1155 | /* |
| 1156 | * The 'bv' obtained from bvec_iter_bvec has its .bv_len and |
| 1157 | * .bv_offset already adjusted for iter->bi_bvec_done, and we |
| 1158 | * can use those directly |
| 1159 | */ |
| 1160 | |
| 1161 | cur_len = min(len, bv.bv_len); |
| 1162 | mem = bvec_kmap_local(&bv); |
| 1163 | if (rw) |
| 1164 | ret = arena_write_bytes(arena, meta_nsoff, mem, cur_len, |
| 1165 | NVDIMM_IO_ATOMIC); |
| 1166 | else |
| 1167 | ret = arena_read_bytes(arena, meta_nsoff, mem, cur_len, |
| 1168 | NVDIMM_IO_ATOMIC); |
| 1169 | |
| 1170 | kunmap_local(mem); |
| 1171 | if (ret) |
| 1172 | return ret; |
| 1173 | |
| 1174 | len -= cur_len; |
| 1175 | meta_nsoff += cur_len; |
| 1176 | if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len)) |
| 1177 | return -EIO; |
| 1178 | } |
| 1179 | |
| 1180 | return ret; |
| 1181 | } |
| 1182 | |
| 1183 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
| 1184 | static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip, |
| 1185 | struct arena_info *arena, u32 postmap, int rw) |
| 1186 | { |
| 1187 | return 0; |
| 1188 | } |
| 1189 | #endif |
| 1190 | |
| 1191 | static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip, |
| 1192 | struct page *page, unsigned int off, sector_t sector, |
| 1193 | unsigned int len) |
| 1194 | { |
| 1195 | int ret = 0; |
| 1196 | int t_flag, e_flag; |
| 1197 | struct arena_info *arena = NULL; |
| 1198 | u32 lane = 0, premap, postmap; |
| 1199 | |
| 1200 | while (len) { |
| 1201 | u32 cur_len; |
| 1202 | |
| 1203 | lane = nd_region_acquire_lane(btt->nd_region); |
| 1204 | |
| 1205 | ret = lba_to_arena(btt, sector, &premap, &arena); |
| 1206 | if (ret) |
| 1207 | goto out_lane; |
| 1208 | |
| 1209 | cur_len = min(btt->sector_size, len); |
| 1210 | |
| 1211 | ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag, |
| 1212 | NVDIMM_IO_ATOMIC); |
| 1213 | if (ret) |
| 1214 | goto out_lane; |
| 1215 | |
| 1216 | /* |
| 1217 | * We loop to make sure that the post map LBA didn't change |
| 1218 | * from under us between writing the RTT and doing the actual |
| 1219 | * read. |
| 1220 | */ |
| 1221 | while (1) { |
| 1222 | u32 new_map; |
| 1223 | int new_t, new_e; |
| 1224 | |
| 1225 | if (t_flag) { |
| 1226 | zero_fill_data(page, off, cur_len); |
| 1227 | goto out_lane; |
| 1228 | } |
| 1229 | |
| 1230 | if (e_flag) { |
| 1231 | ret = -EIO; |
| 1232 | goto out_lane; |
| 1233 | } |
| 1234 | |
| 1235 | arena->rtt[lane] = RTT_VALID | postmap; |
| 1236 | /* |
| 1237 | * Barrier to make sure this write is not reordered |
| 1238 | * to do the verification map_read before the RTT store |
| 1239 | */ |
| 1240 | barrier(); |
| 1241 | |
| 1242 | ret = btt_map_read(arena, premap, &new_map, &new_t, |
| 1243 | &new_e, NVDIMM_IO_ATOMIC); |
| 1244 | if (ret) |
| 1245 | goto out_rtt; |
| 1246 | |
| 1247 | if ((postmap == new_map) && (t_flag == new_t) && |
| 1248 | (e_flag == new_e)) |
| 1249 | break; |
| 1250 | |
| 1251 | postmap = new_map; |
| 1252 | t_flag = new_t; |
| 1253 | e_flag = new_e; |
| 1254 | } |
| 1255 | |
| 1256 | ret = btt_data_read(arena, page, off, postmap, cur_len); |
| 1257 | if (ret) { |
| 1258 | /* Media error - set the e_flag */ |
| 1259 | if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC)) |
| 1260 | dev_warn_ratelimited(to_dev(arena), |
| 1261 | "Error persistently tracking bad blocks at %#x\n", |
| 1262 | premap); |
| 1263 | goto out_rtt; |
| 1264 | } |
| 1265 | |
| 1266 | if (bip) { |
| 1267 | ret = btt_rw_integrity(btt, bip, arena, postmap, READ); |
| 1268 | if (ret) |
| 1269 | goto out_rtt; |
| 1270 | } |
| 1271 | |
| 1272 | arena->rtt[lane] = RTT_INVALID; |
| 1273 | nd_region_release_lane(btt->nd_region, lane); |
| 1274 | |
| 1275 | len -= cur_len; |
| 1276 | off += cur_len; |
| 1277 | sector += btt->sector_size >> SECTOR_SHIFT; |
| 1278 | } |
| 1279 | |
| 1280 | return 0; |
| 1281 | |
| 1282 | out_rtt: |
| 1283 | arena->rtt[lane] = RTT_INVALID; |
| 1284 | out_lane: |
| 1285 | nd_region_release_lane(btt->nd_region, lane); |
| 1286 | return ret; |
| 1287 | } |
| 1288 | |
| 1289 | /* |
| 1290 | * Normally, arena_{read,write}_bytes will take care of the initial offset |
| 1291 | * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem, |
| 1292 | * we need the final, raw namespace offset here |
| 1293 | */ |
| 1294 | static bool btt_is_badblock(struct btt *btt, struct arena_info *arena, |
| 1295 | u32 postmap) |
| 1296 | { |
| 1297 | u64 nsoff = adjust_initial_offset(arena->nd_btt, |
| 1298 | to_namespace_offset(arena, postmap)); |
| 1299 | sector_t phys_sector = nsoff >> 9; |
| 1300 | |
| 1301 | return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize); |
| 1302 | } |
| 1303 | |
| 1304 | static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip, |
| 1305 | sector_t sector, struct page *page, unsigned int off, |
| 1306 | unsigned int len) |
| 1307 | { |
| 1308 | int ret = 0; |
| 1309 | struct arena_info *arena = NULL; |
| 1310 | u32 premap = 0, old_postmap, new_postmap, lane = 0, i; |
| 1311 | struct log_entry log; |
| 1312 | int sub; |
| 1313 | |
| 1314 | while (len) { |
| 1315 | u32 cur_len; |
| 1316 | int e_flag; |
| 1317 | |
| 1318 | retry: |
| 1319 | lane = nd_region_acquire_lane(btt->nd_region); |
| 1320 | |
| 1321 | ret = lba_to_arena(btt, sector, &premap, &arena); |
| 1322 | if (ret) |
| 1323 | goto out_lane; |
| 1324 | cur_len = min(btt->sector_size, len); |
| 1325 | |
| 1326 | if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) { |
| 1327 | ret = -EIO; |
| 1328 | goto out_lane; |
| 1329 | } |
| 1330 | |
| 1331 | if (btt_is_badblock(btt, arena, arena->freelist[lane].block)) |
| 1332 | arena->freelist[lane].has_err = 1; |
| 1333 | |
| 1334 | if (mutex_is_locked(&arena->err_lock) |
| 1335 | || arena->freelist[lane].has_err) { |
| 1336 | nd_region_release_lane(btt->nd_region, lane); |
| 1337 | |
| 1338 | ret = arena_clear_freelist_error(arena, lane); |
| 1339 | if (ret) |
| 1340 | return ret; |
| 1341 | |
| 1342 | /* OK to acquire a different lane/free block */ |
| 1343 | goto retry; |
| 1344 | } |
| 1345 | |
| 1346 | new_postmap = arena->freelist[lane].block; |
| 1347 | |
| 1348 | /* Wait if the new block is being read from */ |
| 1349 | for (i = 0; i < arena->nfree; i++) |
| 1350 | while (arena->rtt[i] == (RTT_VALID | new_postmap)) |
| 1351 | cpu_relax(); |
| 1352 | |
| 1353 | |
| 1354 | if (new_postmap >= arena->internal_nlba) { |
| 1355 | ret = -EIO; |
| 1356 | goto out_lane; |
| 1357 | } |
| 1358 | |
| 1359 | ret = btt_data_write(arena, new_postmap, page, off, cur_len); |
| 1360 | if (ret) |
| 1361 | goto out_lane; |
| 1362 | |
| 1363 | if (bip) { |
| 1364 | ret = btt_rw_integrity(btt, bip, arena, new_postmap, |
| 1365 | WRITE); |
| 1366 | if (ret) |
| 1367 | goto out_lane; |
| 1368 | } |
| 1369 | |
| 1370 | lock_map(arena, premap); |
| 1371 | ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag, |
| 1372 | NVDIMM_IO_ATOMIC); |
| 1373 | if (ret) |
| 1374 | goto out_map; |
| 1375 | if (old_postmap >= arena->internal_nlba) { |
| 1376 | ret = -EIO; |
| 1377 | goto out_map; |
| 1378 | } |
| 1379 | if (e_flag) |
| 1380 | set_e_flag(old_postmap); |
| 1381 | |
| 1382 | log.lba = cpu_to_le32(premap); |
| 1383 | log.old_map = cpu_to_le32(old_postmap); |
| 1384 | log.new_map = cpu_to_le32(new_postmap); |
| 1385 | log.seq = cpu_to_le32(arena->freelist[lane].seq); |
| 1386 | sub = arena->freelist[lane].sub; |
| 1387 | ret = btt_flog_write(arena, lane, sub, &log); |
| 1388 | if (ret) |
| 1389 | goto out_map; |
| 1390 | |
| 1391 | ret = btt_map_write(arena, premap, new_postmap, 0, 0, |
| 1392 | NVDIMM_IO_ATOMIC); |
| 1393 | if (ret) |
| 1394 | goto out_map; |
| 1395 | |
| 1396 | unlock_map(arena, premap); |
| 1397 | nd_region_release_lane(btt->nd_region, lane); |
| 1398 | |
| 1399 | if (e_flag) { |
| 1400 | ret = arena_clear_freelist_error(arena, lane); |
| 1401 | if (ret) |
| 1402 | return ret; |
| 1403 | } |
| 1404 | |
| 1405 | len -= cur_len; |
| 1406 | off += cur_len; |
| 1407 | sector += btt->sector_size >> SECTOR_SHIFT; |
| 1408 | } |
| 1409 | |
| 1410 | return 0; |
| 1411 | |
| 1412 | out_map: |
| 1413 | unlock_map(arena, premap); |
| 1414 | out_lane: |
| 1415 | nd_region_release_lane(btt->nd_region, lane); |
| 1416 | return ret; |
| 1417 | } |
| 1418 | |
| 1419 | static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip, |
| 1420 | struct page *page, unsigned int len, unsigned int off, |
| 1421 | enum req_op op, sector_t sector) |
| 1422 | { |
| 1423 | int ret; |
| 1424 | |
| 1425 | if (!op_is_write(op)) { |
| 1426 | ret = btt_read_pg(btt, bip, page, off, sector, len); |
| 1427 | flush_dcache_page(page); |
| 1428 | } else { |
| 1429 | flush_dcache_page(page); |
| 1430 | ret = btt_write_pg(btt, bip, sector, page, off, len); |
| 1431 | } |
| 1432 | |
| 1433 | return ret; |
| 1434 | } |
| 1435 | |
| 1436 | static void btt_submit_bio(struct bio *bio) |
| 1437 | { |
| 1438 | struct bio_integrity_payload *bip = bio_integrity(bio); |
| 1439 | struct btt *btt = bio->bi_bdev->bd_disk->private_data; |
| 1440 | struct bvec_iter iter; |
| 1441 | unsigned long start; |
| 1442 | struct bio_vec bvec; |
| 1443 | int err = 0; |
| 1444 | bool do_acct; |
| 1445 | |
| 1446 | if (!bio_integrity_prep(bio)) |
| 1447 | return; |
| 1448 | |
| 1449 | do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue); |
| 1450 | if (do_acct) |
| 1451 | start = bio_start_io_acct(bio); |
| 1452 | bio_for_each_segment(bvec, bio, iter) { |
| 1453 | unsigned int len = bvec.bv_len; |
| 1454 | |
| 1455 | if (len > PAGE_SIZE || len < btt->sector_size || |
| 1456 | len % btt->sector_size) { |
| 1457 | dev_err_ratelimited(&btt->nd_btt->dev, |
| 1458 | "unaligned bio segment (len: %d)\n", len); |
| 1459 | bio->bi_status = BLK_STS_IOERR; |
| 1460 | break; |
| 1461 | } |
| 1462 | |
| 1463 | err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset, |
| 1464 | bio_op(bio), iter.bi_sector); |
| 1465 | if (err) { |
| 1466 | dev_err(&btt->nd_btt->dev, |
| 1467 | "io error in %s sector %lld, len %d,\n", |
| 1468 | (op_is_write(bio_op(bio))) ? "WRITE" : |
| 1469 | "READ", |
| 1470 | (unsigned long long) iter.bi_sector, len); |
| 1471 | bio->bi_status = errno_to_blk_status(err); |
| 1472 | break; |
| 1473 | } |
| 1474 | } |
| 1475 | if (do_acct) |
| 1476 | bio_end_io_acct(bio, start); |
| 1477 | |
| 1478 | bio_endio(bio); |
| 1479 | } |
| 1480 | |
| 1481 | static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo) |
| 1482 | { |
| 1483 | /* some standard values */ |
| 1484 | geo->heads = 1 << 6; |
| 1485 | geo->sectors = 1 << 5; |
| 1486 | geo->cylinders = get_capacity(bd->bd_disk) >> 11; |
| 1487 | return 0; |
| 1488 | } |
| 1489 | |
| 1490 | static const struct block_device_operations btt_fops = { |
| 1491 | .owner = THIS_MODULE, |
| 1492 | .submit_bio = btt_submit_bio, |
| 1493 | .getgeo = btt_getgeo, |
| 1494 | }; |
| 1495 | |
| 1496 | static int btt_blk_init(struct btt *btt) |
| 1497 | { |
| 1498 | struct nd_btt *nd_btt = btt->nd_btt; |
| 1499 | struct nd_namespace_common *ndns = nd_btt->ndns; |
| 1500 | struct queue_limits lim = { |
| 1501 | .logical_block_size = btt->sector_size, |
| 1502 | .max_hw_sectors = UINT_MAX, |
| 1503 | .max_integrity_segments = 1, |
| 1504 | .features = BLK_FEAT_SYNCHRONOUS, |
| 1505 | }; |
| 1506 | int rc; |
| 1507 | |
| 1508 | if (btt_meta_size(btt) && IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) { |
| 1509 | lim.integrity.tuple_size = btt_meta_size(btt); |
| 1510 | lim.integrity.tag_size = btt_meta_size(btt); |
| 1511 | } |
| 1512 | |
| 1513 | btt->btt_disk = blk_alloc_disk(&lim, NUMA_NO_NODE); |
| 1514 | if (IS_ERR(btt->btt_disk)) |
| 1515 | return PTR_ERR(btt->btt_disk); |
| 1516 | |
| 1517 | nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name); |
| 1518 | btt->btt_disk->first_minor = 0; |
| 1519 | btt->btt_disk->fops = &btt_fops; |
| 1520 | btt->btt_disk->private_data = btt; |
| 1521 | |
| 1522 | set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); |
| 1523 | rc = device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL); |
| 1524 | if (rc) |
| 1525 | goto out_cleanup_disk; |
| 1526 | |
| 1527 | btt->nd_btt->size = btt->nlba * (u64)btt->sector_size; |
| 1528 | nvdimm_check_and_set_ro(btt->btt_disk); |
| 1529 | |
| 1530 | return 0; |
| 1531 | |
| 1532 | out_cleanup_disk: |
| 1533 | put_disk(btt->btt_disk); |
| 1534 | return rc; |
| 1535 | } |
| 1536 | |
| 1537 | static void btt_blk_cleanup(struct btt *btt) |
| 1538 | { |
| 1539 | del_gendisk(btt->btt_disk); |
| 1540 | put_disk(btt->btt_disk); |
| 1541 | } |
| 1542 | |
| 1543 | /** |
| 1544 | * btt_init - initialize a block translation table for the given device |
| 1545 | * @nd_btt: device with BTT geometry and backing device info |
| 1546 | * @rawsize: raw size in bytes of the backing device |
| 1547 | * @lbasize: lba size of the backing device |
| 1548 | * @uuid: A uuid for the backing device - this is stored on media |
| 1549 | * @nd_region: &struct nd_region for the REGION device |
| 1550 | * |
| 1551 | * Initialize a Block Translation Table on a backing device to provide |
| 1552 | * single sector power fail atomicity. |
| 1553 | * |
| 1554 | * Context: |
| 1555 | * Might sleep. |
| 1556 | * |
| 1557 | * Returns: |
| 1558 | * Pointer to a new struct btt on success, NULL on failure. |
| 1559 | */ |
| 1560 | static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize, |
| 1561 | u32 lbasize, uuid_t *uuid, |
| 1562 | struct nd_region *nd_region) |
| 1563 | { |
| 1564 | int ret; |
| 1565 | struct btt *btt; |
| 1566 | struct nd_namespace_io *nsio; |
| 1567 | struct device *dev = &nd_btt->dev; |
| 1568 | |
| 1569 | btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL); |
| 1570 | if (!btt) |
| 1571 | return NULL; |
| 1572 | |
| 1573 | btt->nd_btt = nd_btt; |
| 1574 | btt->rawsize = rawsize; |
| 1575 | btt->lbasize = lbasize; |
| 1576 | btt->sector_size = ((lbasize >= 4096) ? 4096 : 512); |
| 1577 | INIT_LIST_HEAD(&btt->arena_list); |
| 1578 | mutex_init(&btt->init_lock); |
| 1579 | btt->nd_region = nd_region; |
| 1580 | nsio = to_nd_namespace_io(&nd_btt->ndns->dev); |
| 1581 | btt->phys_bb = &nsio->bb; |
| 1582 | |
| 1583 | ret = discover_arenas(btt); |
| 1584 | if (ret) { |
| 1585 | dev_err(dev, "init: error in arena_discover: %d\n", ret); |
| 1586 | return NULL; |
| 1587 | } |
| 1588 | |
| 1589 | if (btt->init_state != INIT_READY && nd_region->ro) { |
| 1590 | dev_warn(dev, "%s is read-only, unable to init btt metadata\n", |
| 1591 | dev_name(&nd_region->dev)); |
| 1592 | return NULL; |
| 1593 | } else if (btt->init_state != INIT_READY) { |
| 1594 | btt->num_arenas = (rawsize / ARENA_MAX_SIZE) + |
| 1595 | ((rawsize % ARENA_MAX_SIZE) ? 1 : 0); |
| 1596 | dev_dbg(dev, "init: %d arenas for %llu rawsize\n", |
| 1597 | btt->num_arenas, rawsize); |
| 1598 | |
| 1599 | ret = create_arenas(btt); |
| 1600 | if (ret) { |
| 1601 | dev_info(dev, "init: create_arenas: %d\n", ret); |
| 1602 | return NULL; |
| 1603 | } |
| 1604 | |
| 1605 | ret = btt_meta_init(btt); |
| 1606 | if (ret) { |
| 1607 | dev_err(dev, "init: error in meta_init: %d\n", ret); |
| 1608 | return NULL; |
| 1609 | } |
| 1610 | } |
| 1611 | |
| 1612 | ret = btt_blk_init(btt); |
| 1613 | if (ret) { |
| 1614 | dev_err(dev, "init: error in blk_init: %d\n", ret); |
| 1615 | return NULL; |
| 1616 | } |
| 1617 | |
| 1618 | btt_debugfs_init(btt); |
| 1619 | |
| 1620 | return btt; |
| 1621 | } |
| 1622 | |
| 1623 | /** |
| 1624 | * btt_fini - de-initialize a BTT |
| 1625 | * @btt: the BTT handle that was generated by btt_init |
| 1626 | * |
| 1627 | * De-initialize a Block Translation Table on device removal |
| 1628 | * |
| 1629 | * Context: |
| 1630 | * Might sleep. |
| 1631 | */ |
| 1632 | static void btt_fini(struct btt *btt) |
| 1633 | { |
| 1634 | if (btt) { |
| 1635 | btt_blk_cleanup(btt); |
| 1636 | free_arenas(btt); |
| 1637 | debugfs_remove_recursive(btt->debugfs_dir); |
| 1638 | } |
| 1639 | } |
| 1640 | |
| 1641 | int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns) |
| 1642 | { |
| 1643 | struct nd_btt *nd_btt = to_nd_btt(ndns->claim); |
| 1644 | struct nd_region *nd_region; |
| 1645 | struct btt_sb *btt_sb; |
| 1646 | struct btt *btt; |
| 1647 | size_t size, rawsize; |
| 1648 | int rc; |
| 1649 | |
| 1650 | if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) { |
| 1651 | dev_dbg(&nd_btt->dev, "incomplete btt configuration\n"); |
| 1652 | return -ENODEV; |
| 1653 | } |
| 1654 | |
| 1655 | btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL); |
| 1656 | if (!btt_sb) |
| 1657 | return -ENOMEM; |
| 1658 | |
| 1659 | size = nvdimm_namespace_capacity(ndns); |
| 1660 | rc = devm_namespace_enable(&nd_btt->dev, ndns, size); |
| 1661 | if (rc) |
| 1662 | return rc; |
| 1663 | |
| 1664 | /* |
| 1665 | * If this returns < 0, that is ok as it just means there wasn't |
| 1666 | * an existing BTT, and we're creating a new one. We still need to |
| 1667 | * call this as we need the version dependent fields in nd_btt to be |
| 1668 | * set correctly based on the holder class |
| 1669 | */ |
| 1670 | nd_btt_version(nd_btt, ndns, btt_sb); |
| 1671 | |
| 1672 | rawsize = size - nd_btt->initial_offset; |
| 1673 | if (rawsize < ARENA_MIN_SIZE) { |
| 1674 | dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n", |
| 1675 | dev_name(&ndns->dev), |
| 1676 | ARENA_MIN_SIZE + nd_btt->initial_offset); |
| 1677 | return -ENXIO; |
| 1678 | } |
| 1679 | nd_region = to_nd_region(nd_btt->dev.parent); |
| 1680 | btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid, |
| 1681 | nd_region); |
| 1682 | if (!btt) |
| 1683 | return -ENOMEM; |
| 1684 | nd_btt->btt = btt; |
| 1685 | |
| 1686 | return 0; |
| 1687 | } |
| 1688 | EXPORT_SYMBOL(nvdimm_namespace_attach_btt); |
| 1689 | |
| 1690 | int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt) |
| 1691 | { |
| 1692 | struct btt *btt = nd_btt->btt; |
| 1693 | |
| 1694 | btt_fini(btt); |
| 1695 | nd_btt->btt = NULL; |
| 1696 | |
| 1697 | return 0; |
| 1698 | } |
| 1699 | EXPORT_SYMBOL(nvdimm_namespace_detach_btt); |
| 1700 | |
| 1701 | static int __init nd_btt_init(void) |
| 1702 | { |
| 1703 | int rc = 0; |
| 1704 | |
| 1705 | debugfs_root = debugfs_create_dir("btt", NULL); |
| 1706 | if (IS_ERR_OR_NULL(debugfs_root)) |
| 1707 | rc = -ENXIO; |
| 1708 | |
| 1709 | return rc; |
| 1710 | } |
| 1711 | |
| 1712 | static void __exit nd_btt_exit(void) |
| 1713 | { |
| 1714 | debugfs_remove_recursive(debugfs_root); |
| 1715 | } |
| 1716 | |
| 1717 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT); |
| 1718 | MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>"); |
| 1719 | MODULE_DESCRIPTION("NVDIMM Block Translation Table"); |
| 1720 | MODULE_LICENSE("GPL v2"); |
| 1721 | module_init(nd_btt_init); |
| 1722 | module_exit(nd_btt_exit); |