2 * Copyright (C) 2015 Matias Bjorling. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
20 #include <linux/lightnvm.h>
22 #define MAX_SYSBLKS 3 /* remember to update mapping scheme on change */
23 #define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases
24 * enables ~1.5M updates per sysblk unit
28 /* A row is a collection of flash blocks for a system block. */
31 int act_blk[MAX_SYSBLKS];
34 struct ppa_addr ppas[MAX_SYSBLKS * MAX_BLKS_PR_SYSBLK];/* all sysblks */
37 static inline int scan_ppa_idx(int row, int blkid)
39 return (row * MAX_BLKS_PR_SYSBLK) + blkid;
42 void nvm_sysblk_to_cpu(struct nvm_sb_info *info, struct nvm_system_block *sb)
44 info->seqnr = be32_to_cpu(sb->seqnr);
45 info->erase_cnt = be32_to_cpu(sb->erase_cnt);
46 info->version = be16_to_cpu(sb->version);
47 strncpy(info->mmtype, sb->mmtype, NVM_MMTYPE_LEN);
48 info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa);
51 void nvm_cpu_to_sysblk(struct nvm_system_block *sb, struct nvm_sb_info *info)
53 sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC);
54 sb->seqnr = cpu_to_be32(info->seqnr);
55 sb->erase_cnt = cpu_to_be32(info->erase_cnt);
56 sb->version = cpu_to_be16(info->version);
57 strncpy(sb->mmtype, info->mmtype, NVM_MMTYPE_LEN);
58 sb->fs_ppa = cpu_to_be64(info->fs_ppa.ppa);
61 static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
63 int nr_rows = min_t(int, MAX_SYSBLKS, dev->nr_chnls);
66 for (i = 0; i < nr_rows; i++)
67 sysblk_ppas[i].ppa = 0;
69 /* if possible, place sysblk at first channel, middle channel and last
70 * channel of the device. If not, create only one or two sys blocks
72 switch (dev->nr_chnls) {
74 sysblk_ppas[1].g.ch = 1;
77 sysblk_ppas[0].g.ch = 0;
80 sysblk_ppas[0].g.ch = 0;
81 sysblk_ppas[1].g.ch = dev->nr_chnls / 2;
82 sysblk_ppas[2].g.ch = dev->nr_chnls - 1;
89 void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
90 struct ppa_addr *sysblk_ppas)
92 memset(s, 0, sizeof(struct sysblk_scan));
93 s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
96 static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr ppa,
97 u8 *blks, int nr_blks,
98 struct sysblk_scan *s)
100 struct ppa_addr *sppa;
103 nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
107 for (i = 0; i < nr_blks; i++) {
108 if (blks[i] == NVM_BLK_T_HOST)
111 if (blks[i] != NVM_BLK_T_FREE)
114 sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
115 sppa->g.ch = ppa.g.ch;
116 sppa->g.lun = ppa.g.lun;
121 pr_debug("nvm: use (%u %u %u) as sysblk\n",
122 sppa->g.ch, sppa->g.lun, sppa->g.blk);
123 if (blkid > MAX_BLKS_PR_SYSBLK - 1)
127 pr_err("nvm: sysblk failed get sysblk\n");
131 static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa,
132 u8 *blks, int nr_blks,
133 struct sysblk_scan *s)
135 int i, nr_sysblk = 0;
137 nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
141 for (i = 0; i < nr_blks; i++) {
142 if (blks[i] != NVM_BLK_T_HOST)
145 if (s->nr_ppas == MAX_BLKS_PR_SYSBLK * MAX_SYSBLKS) {
146 pr_err("nvm: too many host blks\n");
152 s->ppas[scan_ppa_idx(s->row, nr_sysblk)] = ppa;
160 static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
161 struct ppa_addr *ppas, int get_free)
163 int i, nr_blks, ret = 0;
167 nr_blks = dev->blks_per_lun * dev->plane_mode;
169 blks = kmalloc(nr_blks, GFP_KERNEL);
173 for (i = 0; i < s->nr_rows; i++) {
176 ret = nvm_get_bb_tbl(dev, ppas[i], blks);
178 pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
185 ret = sysblk_get_free_blks(dev, ppas[i], blks, nr_blks,
188 ret = sysblk_get_host_blks(dev, ppas[i], blks, nr_blks,
201 * scans a block for latest sysblk.
203 * 0 - newer sysblk not found. PPA is updated to latest page.
204 * 1 - newer sysblk found and stored in *cur. PPA is updated to
208 static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
209 struct nvm_system_block *sblk)
211 struct nvm_system_block *cur;
212 int pg, ret, found = 0;
214 /* the full buffer for a flash page is allocated. Only the first of it
215 * contains the system block information
217 cur = kmalloc(dev->pfpg_size, GFP_KERNEL);
221 /* perform linear scan through the block */
222 for (pg = 0; pg < dev->lps_per_blk; pg++) {
223 ppa->g.pg = ppa_to_slc(dev, pg);
225 ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
226 cur, dev->pfpg_size);
228 if (ret == NVM_RSP_ERR_EMPTYPAGE) {
229 pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
236 pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)",
242 break; /* if we can't read a page, continue to the
247 if (be32_to_cpu(cur->magic) != NVM_SYSBLK_MAGIC) {
248 pr_debug("nvm: scan break for ppa (%u %u %u %u)\n",
253 break; /* last valid page already found */
256 if (be32_to_cpu(cur->seqnr) < be32_to_cpu(sblk->seqnr))
259 memcpy(sblk, cur, sizeof(struct nvm_system_block));
268 static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
273 if (s->nr_ppas > dev->ops->max_phys_sect) {
274 pr_err("nvm: unable to update all sysblocks atomically\n");
278 memset(&rqd, 0, sizeof(struct nvm_rq));
280 nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas, 1);
281 nvm_generic_to_addr_mode(dev, &rqd);
283 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
284 nvm_free_rqd_ppalist(dev, &rqd);
286 pr_err("nvm: sysblk failed bb mark\n");
293 static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
294 struct sysblk_scan *s)
296 struct nvm_system_block nvmsb;
298 int i, sect, ret = 0;
299 struct ppa_addr *ppas;
301 nvm_cpu_to_sysblk(&nvmsb, info);
303 buf = kzalloc(dev->pfpg_size, GFP_KERNEL);
306 memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
308 ppas = kcalloc(dev->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
314 /* Write and verify */
315 for (i = 0; i < s->nr_rows; i++) {
316 ppas[0] = s->ppas[scan_ppa_idx(i, s->act_blk[i])];
318 pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n",
324 /* Expand to all sectors within a flash page */
325 if (dev->sec_per_pg > 1) {
326 for (sect = 1; sect < dev->sec_per_pg; sect++) {
327 ppas[sect].ppa = ppas[0].ppa;
328 ppas[sect].g.sec = sect;
332 ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE,
333 NVM_IO_SLC_MODE, buf, dev->pfpg_size);
335 pr_err("nvm: sysblk failed program (%u %u %u)\n",
342 ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD,
343 NVM_IO_SLC_MODE, buf, dev->pfpg_size);
345 pr_err("nvm: sysblk failed read (%u %u %u)\n",
352 if (memcmp(buf, &nvmsb, sizeof(struct nvm_system_block))) {
353 pr_err("nvm: sysblk failed verify (%u %u %u)\n",
369 static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s)
372 unsigned long nxt_blk;
373 struct ppa_addr *ppa;
375 for (i = 0; i < s->nr_rows; i++) {
376 nxt_blk = (s->act_blk[i] + 1) % MAX_BLKS_PR_SYSBLK;
377 ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)];
378 ppa->g.pg = ppa_to_slc(dev, 0);
380 ret = nvm_erase_ppa(dev, ppa, 1);
384 s->act_blk[i] = nxt_blk;
390 int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
392 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
393 struct sysblk_scan s;
394 struct nvm_system_block *cur;
399 * 1. setup sysblk locations
400 * 2. get bad block list
401 * 3. filter on host-specific (type 3)
402 * 4. iterate through all and find the highest seq nr.
403 * 5. return superblock information
406 if (!dev->ops->get_bb_tbl)
409 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
411 mutex_lock(&dev->mlock);
412 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
416 /* no sysblocks initialized */
420 cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
424 /* find the latest block across all sysblocks */
425 for (i = 0; i < s.nr_rows; i++) {
426 for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
427 struct ppa_addr ppa = s.ppas[scan_ppa_idx(i, j)];
429 ret = nvm_scan_block(dev, &ppa, cur);
437 nvm_sysblk_to_cpu(info, cur);
441 mutex_unlock(&dev->mlock);
448 int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new)
450 /* 1. for each latest superblock
452 * a. write new flash page entry with the updated information
454 * a. find next available block on lun (linear search)
455 * if none, continue to next lun
456 * if none at all, report error. also report that it wasn't
457 * possible to write to all superblocks.
458 * c. write data to block.
460 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
461 struct sysblk_scan s;
462 struct nvm_system_block *cur;
463 int i, j, ppaidx, found = 0;
466 if (!dev->ops->get_bb_tbl)
469 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
471 mutex_lock(&dev->mlock);
472 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
476 cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
480 /* Get the latest sysblk for each sysblk row */
481 for (i = 0; i < s.nr_rows; i++) {
483 for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
484 ppaidx = scan_ppa_idx(i, j);
485 ret = nvm_scan_block(dev, &s.ppas[ppaidx], cur);
495 pr_err("nvm: no valid sysblks found to update\n");
501 * All sysblocks found. Check that they have same page id in their flash
504 for (i = 1; i < s.nr_rows; i++) {
505 struct ppa_addr l = s.ppas[scan_ppa_idx(0, s.act_blk[0])];
506 struct ppa_addr r = s.ppas[scan_ppa_idx(i, s.act_blk[i])];
508 if (l.g.pg != r.g.pg) {
509 pr_err("nvm: sysblks not on same page. Previous update failed.\n");
516 * Check that there haven't been another update to the seqnr since we
519 if ((new->seqnr - 1) != be32_to_cpu(cur->seqnr)) {
520 pr_err("nvm: seq is not sequential\n");
526 * When all pages in a block has been written, a new block is selected
527 * and writing is performed on the new block.
529 if (s.ppas[scan_ppa_idx(0, s.act_blk[0])].g.pg ==
530 dev->lps_per_blk - 1) {
531 ret = nvm_prepare_new_sysblks(dev, &s);
536 ret = nvm_write_and_verify(dev, new, &s);
540 mutex_unlock(&dev->mlock);
545 int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
547 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
548 struct sysblk_scan s;
552 * 1. select master blocks and select first available blks
553 * 2. get bad block list
554 * 3. mark MAX_SYSBLKS block as host-based device allocated.
555 * 4. write and verify data to block
558 if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl)
561 if (!(dev->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
562 pr_err("nvm: memory does not support SLC access\n");
566 /* Index all sysblocks and mark them as host-driven */
567 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
569 mutex_lock(&dev->mlock);
570 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 1);
574 ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
578 /* Write to the first block of each row */
579 ret = nvm_write_and_verify(dev, info, &s);
581 mutex_unlock(&dev->mlock);
585 static int factory_nblks(int nblks)
587 /* Round up to nearest BITS_PER_LONG */
588 return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
591 static unsigned int factory_blk_offset(struct nvm_dev *dev, struct ppa_addr ppa)
593 int nblks = factory_nblks(dev->blks_per_lun);
595 return ((ppa.g.ch * dev->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) /
599 static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
600 u8 *blks, int nr_blks,
601 unsigned long *blk_bitmap, int flags)
605 nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
609 lunoff = factory_blk_offset(dev, ppa);
611 /* non-set bits correspond to the block must be erased */
612 for (i = 0; i < nr_blks; i++) {
615 if (flags & NVM_FACTORY_ERASE_ONLY_USER)
616 set_bit(i, &blk_bitmap[lunoff]);
619 if (!(flags & NVM_FACTORY_RESET_HOST_BLKS))
620 set_bit(i, &blk_bitmap[lunoff]);
622 case NVM_BLK_T_GRWN_BAD:
623 if (!(flags & NVM_FACTORY_RESET_GRWN_BBLKS))
624 set_bit(i, &blk_bitmap[lunoff]);
627 set_bit(i, &blk_bitmap[lunoff]);
635 static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
636 int max_ppas, unsigned long *blk_bitmap)
639 int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
640 unsigned long *offset;
644 nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
645 idx = factory_blk_offset(dev, ppa);
646 offset = &blk_bitmap[idx];
648 blkid = find_first_zero_bit(offset,
650 if (blkid >= dev->blks_per_lun)
652 set_bit(blkid, offset);
655 pr_debug("nvm: erase ppa (%u %u %u)\n",
660 erase_list[ppa_cnt] = ppa;
664 if (ppa_cnt == max_ppas)
672 static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap,
676 int ch, lun, nr_blks, ret = 0;
679 nr_blks = dev->blks_per_lun * dev->plane_mode;
680 blks = kmalloc(nr_blks, GFP_KERNEL);
684 nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
685 ret = nvm_get_bb_tbl(dev, ppa, blks);
687 pr_err("nvm: failed bb tbl for ch%u lun%u\n",
688 ppa.g.ch, ppa.g.blk);
690 ret = nvm_factory_blks(dev, ppa, blks, nr_blks, blk_bitmap,
700 int nvm_dev_factory(struct nvm_dev *dev, int flags)
702 struct ppa_addr *ppas;
703 int ppa_cnt, ret = -ENOMEM;
704 int max_ppas = dev->ops->max_phys_sect / dev->nr_planes;
705 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
706 struct sysblk_scan s;
707 unsigned long *blk_bitmap;
709 blk_bitmap = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
714 ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL);
718 /* create list of blks to be erased */
719 ret = nvm_fact_select_blks(dev, blk_bitmap, flags);
723 /* continue to erase until list of blks until empty */
725 nvm_fact_get_blks(dev, ppas, max_ppas, blk_bitmap)) > 0)
726 nvm_erase_ppa(dev, ppas, ppa_cnt);
728 /* mark host reserved blocks free */
729 if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
730 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
731 mutex_lock(&dev->mlock);
732 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
734 ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
735 mutex_unlock(&dev->mlock);
743 EXPORT_SYMBOL(nvm_dev_factory);