2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * pblk-core.c - pblk's core functionality
21 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
24 struct nvm_tgt_dev *dev = pblk->dev;
25 struct nvm_geo *geo = &dev->geo;
26 int pos = pblk_dev_ppa_to_pos(geo, *ppa);
28 pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
29 atomic_long_inc(&pblk->erase_failed);
31 atomic_dec(&line->blk_in_line);
32 if (test_and_set_bit(pos, line->blk_bitmap))
33 pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
36 pblk_line_run_ws(pblk, NULL, ppa, pblk_line_mark_bb, pblk->bb_wq);
39 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
41 struct pblk_line *line;
43 line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
44 atomic_dec(&line->left_seblks);
49 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
54 pblk_mark_bb(pblk, line, ppa);
57 atomic_dec(&pblk->inflight_io);
60 /* Erase completion assumes that only one block is erased at the time */
61 static void pblk_end_io_erase(struct nvm_rq *rqd)
63 struct pblk *pblk = rqd->private;
65 __pblk_end_io_erase(pblk, rqd);
66 mempool_free(rqd, pblk->g_rq_pool);
69 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
72 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
73 struct list_head *move_list = NULL;
75 /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
76 * table is modified with reclaimed sectors, a check is done to endure
77 * that newer updates are not overwritten.
79 spin_lock(&line->lock);
80 if (line->state == PBLK_LINESTATE_GC ||
81 line->state == PBLK_LINESTATE_FREE) {
82 spin_unlock(&line->lock);
86 if (test_and_set_bit(paddr, line->invalid_bitmap)) {
87 WARN_ONCE(1, "pblk: double invalidate\n");
88 spin_unlock(&line->lock);
91 le32_add_cpu(line->vsc, -1);
93 if (line->state == PBLK_LINESTATE_CLOSED)
94 move_list = pblk_line_gc_list(pblk, line);
95 spin_unlock(&line->lock);
98 spin_lock(&l_mg->gc_lock);
99 spin_lock(&line->lock);
100 /* Prevent moving a line that has just been chosen for GC */
101 if (line->state == PBLK_LINESTATE_GC ||
102 line->state == PBLK_LINESTATE_FREE) {
103 spin_unlock(&line->lock);
104 spin_unlock(&l_mg->gc_lock);
107 spin_unlock(&line->lock);
109 list_move_tail(&line->list, move_list);
110 spin_unlock(&l_mg->gc_lock);
114 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
116 struct pblk_line *line;
120 #ifdef CONFIG_NVM_DEBUG
121 /* Callers must ensure that the ppa points to a device address */
122 BUG_ON(pblk_addr_in_cache(ppa));
123 BUG_ON(pblk_ppa_empty(ppa));
126 line_id = pblk_tgt_ppa_to_line(ppa);
127 line = &pblk->lines[line_id];
128 paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
130 __pblk_map_invalidate(pblk, line, paddr);
133 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
134 unsigned int nr_secs)
138 spin_lock(&pblk->trans_lock);
139 for (lba = slba; lba < slba + nr_secs; lba++) {
142 ppa = pblk_trans_map_get(pblk, lba);
144 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
145 pblk_map_invalidate(pblk, ppa);
147 pblk_ppa_set_empty(&ppa);
148 pblk_trans_map_set(pblk, lba, ppa);
150 spin_unlock(&pblk->trans_lock);
153 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
160 pool = pblk->w_rq_pool;
161 rq_size = pblk_w_rq_size;
163 pool = pblk->g_rq_pool;
164 rq_size = pblk_g_rq_size;
167 rqd = mempool_alloc(pool, GFP_KERNEL);
168 memset(rqd, 0, rq_size);
173 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
178 pool = pblk->w_rq_pool;
180 pool = pblk->g_rq_pool;
182 mempool_free(rqd, pool);
185 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
191 WARN_ON(off + nr_pages != bio->bi_vcnt);
193 for (i = off; i < nr_pages + off; i++) {
194 bv = bio->bi_io_vec[i];
195 mempool_free(bv.bv_page, pblk->page_bio_pool);
199 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
202 struct request_queue *q = pblk->dev->q;
206 for (i = 0; i < nr_pages; i++) {
207 page = mempool_alloc(pblk->page_bio_pool, flags);
211 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
212 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
213 pr_err("pblk: could not add page to bio\n");
214 mempool_free(page, pblk->page_bio_pool);
221 pblk_bio_free_pages(pblk, bio, 0, i - 1);
225 static void pblk_write_kick(struct pblk *pblk)
227 wake_up_process(pblk->writer_ts);
228 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
231 void pblk_write_timer_fn(unsigned long data)
233 struct pblk *pblk = (struct pblk *)data;
235 /* kick the write thread every tick to flush outstanding data */
236 pblk_write_kick(pblk);
239 void pblk_write_should_kick(struct pblk *pblk)
241 unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
243 if (secs_avail >= pblk->min_write_pgs)
244 pblk_write_kick(pblk);
247 void pblk_end_bio_sync(struct bio *bio)
249 struct completion *waiting = bio->bi_private;
254 void pblk_end_io_sync(struct nvm_rq *rqd)
256 struct completion *waiting = rqd->private;
261 void pblk_wait_for_meta(struct pblk *pblk)
264 if (!atomic_read(&pblk->inflight_io))
271 static void pblk_flush_writer(struct pblk *pblk)
273 pblk_rb_flush(&pblk->rwb);
275 if (!pblk_rb_sync_count(&pblk->rwb))
278 pblk_write_kick(pblk);
283 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
285 struct pblk_line_meta *lm = &pblk->lm;
286 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
287 struct list_head *move_list = NULL;
288 int vsc = le32_to_cpu(*line->vsc);
290 lockdep_assert_held(&line->lock);
293 if (line->gc_group != PBLK_LINEGC_FULL) {
294 line->gc_group = PBLK_LINEGC_FULL;
295 move_list = &l_mg->gc_full_list;
297 } else if (vsc < lm->high_thrs) {
298 if (line->gc_group != PBLK_LINEGC_HIGH) {
299 line->gc_group = PBLK_LINEGC_HIGH;
300 move_list = &l_mg->gc_high_list;
302 } else if (vsc < lm->mid_thrs) {
303 if (line->gc_group != PBLK_LINEGC_MID) {
304 line->gc_group = PBLK_LINEGC_MID;
305 move_list = &l_mg->gc_mid_list;
307 } else if (vsc < line->sec_in_line) {
308 if (line->gc_group != PBLK_LINEGC_LOW) {
309 line->gc_group = PBLK_LINEGC_LOW;
310 move_list = &l_mg->gc_low_list;
312 } else if (vsc == line->sec_in_line) {
313 if (line->gc_group != PBLK_LINEGC_EMPTY) {
314 line->gc_group = PBLK_LINEGC_EMPTY;
315 move_list = &l_mg->gc_empty_list;
318 line->state = PBLK_LINESTATE_CORRUPT;
319 line->gc_group = PBLK_LINEGC_NONE;
320 move_list = &l_mg->corrupt_list;
321 pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
324 lm->high_thrs, lm->mid_thrs);
330 void pblk_discard(struct pblk *pblk, struct bio *bio)
332 sector_t slba = pblk_get_lba(bio);
333 sector_t nr_secs = pblk_get_secs(bio);
335 pblk_invalidate_range(pblk, slba, nr_secs);
338 struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba)
342 spin_lock(&pblk->trans_lock);
343 ppa = pblk_trans_map_get(pblk, lba);
344 spin_unlock(&pblk->trans_lock);
349 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
351 atomic_long_inc(&pblk->write_failed);
352 #ifdef CONFIG_NVM_DEBUG
353 pblk_print_failed_rqd(pblk, rqd, rqd->error);
357 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
359 /* Empty page read is not necessarily an error (e.g., L2P recovery) */
360 if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
361 atomic_long_inc(&pblk->read_empty);
365 switch (rqd->error) {
366 case NVM_RSP_WARN_HIGHECC:
367 atomic_long_inc(&pblk->read_high_ecc);
369 case NVM_RSP_ERR_FAILECC:
370 case NVM_RSP_ERR_FAILCRC:
371 atomic_long_inc(&pblk->read_failed);
374 pr_err("pblk: unknown read error:%d\n", rqd->error);
376 #ifdef CONFIG_NVM_DEBUG
377 pblk_print_failed_rqd(pblk, rqd, rqd->error);
381 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
383 pblk->sec_per_write = sec_per_write;
386 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
388 struct nvm_tgt_dev *dev = pblk->dev;
390 #ifdef CONFIG_NVM_DEBUG
391 struct ppa_addr *ppa_list;
393 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
394 if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
399 if (rqd->opcode == NVM_OP_PWRITE) {
400 struct pblk_line *line;
404 for (i = 0; i < rqd->nr_ppas; i++) {
406 line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
408 spin_lock(&line->lock);
409 if (line->state != PBLK_LINESTATE_OPEN) {
410 pr_err("pblk: bad ppa: line:%d,state:%d\n",
411 line->id, line->state);
413 spin_unlock(&line->lock);
416 spin_unlock(&line->lock);
421 atomic_inc(&pblk->inflight_io);
423 return nvm_submit_io(dev, rqd);
426 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
427 unsigned int nr_secs, unsigned int len,
428 int alloc_type, gfp_t gfp_mask)
430 struct nvm_tgt_dev *dev = pblk->dev;
436 if (alloc_type == PBLK_KMALLOC_META)
437 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
439 bio = bio_kmalloc(gfp_mask, nr_secs);
441 return ERR_PTR(-ENOMEM);
443 for (i = 0; i < nr_secs; i++) {
444 page = vmalloc_to_page(kaddr);
446 pr_err("pblk: could not map vmalloc bio\n");
448 bio = ERR_PTR(-ENOMEM);
452 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
453 if (ret != PAGE_SIZE) {
454 pr_err("pblk: could not add page to bio\n");
456 bio = ERR_PTR(-ENOMEM);
466 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
467 unsigned long secs_to_flush)
469 int max = pblk->sec_per_write;
470 int min = pblk->min_write_pgs;
471 int secs_to_sync = 0;
473 if (secs_avail >= max)
475 else if (secs_avail >= min)
476 secs_to_sync = min * (secs_avail / min);
477 else if (secs_to_flush)
483 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
488 spin_lock(&line->lock);
489 addr = find_next_zero_bit(line->map_bitmap,
490 pblk->lm.sec_per_line, line->cur_sec);
491 line->cur_sec = addr - nr_secs;
493 for (i = 0; i < nr_secs; i++, line->cur_sec--)
494 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
495 spin_unlock(&line->lock);
498 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
503 lockdep_assert_held(&line->lock);
505 /* logic error: ppa out-of-bounds. Prevent generating bad address */
506 if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
507 WARN(1, "pblk: page allocation out of bounds\n");
508 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
511 line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
512 pblk->lm.sec_per_line, line->cur_sec);
513 for (i = 0; i < nr_secs; i++, line->cur_sec++)
514 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
519 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
523 /* Lock needed in case a write fails and a recovery needs to remap
524 * failed write buffer entries
526 spin_lock(&line->lock);
527 addr = __pblk_alloc_page(pblk, line, nr_secs);
528 line->left_msecs -= nr_secs;
529 WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
530 spin_unlock(&line->lock);
535 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
539 spin_lock(&line->lock);
540 paddr = find_next_zero_bit(line->map_bitmap,
541 pblk->lm.sec_per_line, line->cur_sec);
542 spin_unlock(&line->lock);
548 * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
549 * taking the per LUN semaphore.
551 static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
552 void *emeta_buf, u64 paddr, int dir)
554 struct nvm_tgt_dev *dev = pblk->dev;
555 struct nvm_geo *geo = &dev->geo;
556 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
557 struct pblk_line_meta *lm = &pblk->lm;
558 void *ppa_list, *meta_list;
561 dma_addr_t dma_ppa_list, dma_meta_list;
562 int min = pblk->min_write_pgs;
563 int left_ppas = lm->emeta_sec[0];
569 DECLARE_COMPLETION_ONSTACK(wait);
572 bio_op = REQ_OP_WRITE;
573 cmd_op = NVM_OP_PWRITE;
574 } else if (dir == READ) {
575 bio_op = REQ_OP_READ;
576 cmd_op = NVM_OP_PREAD;
580 meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
585 ppa_list = meta_list + pblk_dma_meta_size;
586 dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
589 memset(&rqd, 0, sizeof(struct nvm_rq));
591 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
592 rq_len = rq_ppas * geo->sec_size;
594 bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
595 l_mg->emeta_alloc_type, GFP_KERNEL);
601 bio->bi_iter.bi_sector = 0; /* internal bio */
602 bio_set_op_attrs(bio, bio_op, 0);
605 rqd.meta_list = meta_list;
606 rqd.ppa_list = ppa_list;
607 rqd.dma_meta_list = dma_meta_list;
608 rqd.dma_ppa_list = dma_ppa_list;
610 rqd.nr_ppas = rq_ppas;
611 rqd.end_io = pblk_end_io_sync;
615 struct pblk_sec_meta *meta_list = rqd.meta_list;
617 rqd.flags = pblk_set_progr_mode(pblk, WRITE);
618 for (i = 0; i < rqd.nr_ppas; ) {
619 spin_lock(&line->lock);
620 paddr = __pblk_alloc_page(pblk, line, min);
621 spin_unlock(&line->lock);
622 for (j = 0; j < min; j++, i++, paddr++) {
623 meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
625 addr_to_gen_ppa(pblk, paddr, id);
629 for (i = 0; i < rqd.nr_ppas; ) {
630 struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
631 int pos = pblk_dev_ppa_to_pos(geo, ppa);
632 int read_type = PBLK_READ_RANDOM;
634 if (pblk_io_aligned(pblk, rq_ppas))
635 read_type = PBLK_READ_SEQUENTIAL;
636 rqd.flags = pblk_set_read_mode(pblk, read_type);
638 while (test_bit(pos, line->blk_bitmap)) {
640 if (pblk_boundary_paddr_checks(pblk, paddr)) {
641 pr_err("pblk: corrupt emeta line:%d\n",
648 ppa = addr_to_gen_ppa(pblk, paddr, id);
649 pos = pblk_dev_ppa_to_pos(geo, ppa);
652 if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
653 pr_err("pblk: corrupt emeta line:%d\n",
660 for (j = 0; j < min; j++, i++, paddr++)
662 addr_to_gen_ppa(pblk, paddr, line->id);
666 ret = pblk_submit_io(pblk, &rqd);
668 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
673 if (!wait_for_completion_io_timeout(&wait,
674 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
675 pr_err("pblk: emeta I/O timed out\n");
677 atomic_dec(&pblk->inflight_io);
678 reinit_completion(&wait);
680 if (likely(pblk->l_mg.emeta_alloc_type == PBLK_VMALLOC_META))
685 pblk_log_write_err(pblk, &rqd);
687 pblk_log_read_err(pblk, &rqd);
691 left_ppas -= rq_ppas;
695 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
699 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
701 struct nvm_tgt_dev *dev = pblk->dev;
702 struct nvm_geo *geo = &dev->geo;
703 struct pblk_line_meta *lm = &pblk->lm;
706 /* This usually only happens on bad lines */
707 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
708 if (bit >= lm->blk_per_line)
711 return bit * geo->sec_per_pl;
714 static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
717 struct nvm_tgt_dev *dev = pblk->dev;
718 struct pblk_line_meta *lm = &pblk->lm;
721 __le64 *lba_list = NULL;
725 DECLARE_COMPLETION_ONSTACK(wait);
728 bio_op = REQ_OP_WRITE;
729 cmd_op = NVM_OP_PWRITE;
730 flags = pblk_set_progr_mode(pblk, WRITE);
731 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
732 } else if (dir == READ) {
733 bio_op = REQ_OP_READ;
734 cmd_op = NVM_OP_PREAD;
735 flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
739 memset(&rqd, 0, sizeof(struct nvm_rq));
741 rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
746 rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
747 rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
749 bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
755 bio->bi_iter.bi_sector = 0; /* internal bio */
756 bio_set_op_attrs(bio, bio_op, 0);
761 rqd.nr_ppas = lm->smeta_sec;
762 rqd.end_io = pblk_end_io_sync;
765 for (i = 0; i < lm->smeta_sec; i++, paddr++) {
766 struct pblk_sec_meta *meta_list = rqd.meta_list;
768 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
771 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
773 meta_list[i].lba = lba_list[paddr] = addr_empty;
778 * This I/O is sent by the write thread when a line is replace. Since
779 * the write thread is the only one sending write and erase commands,
780 * there is no need to take the LUN semaphore.
782 ret = pblk_submit_io(pblk, &rqd);
784 pr_err("pblk: smeta I/O submission failed: %d\n", ret);
789 if (!wait_for_completion_io_timeout(&wait,
790 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
791 pr_err("pblk: smeta I/O timed out\n");
793 atomic_dec(&pblk->inflight_io);
797 pblk_log_write_err(pblk, &rqd);
799 pblk_log_read_err(pblk, &rqd);
803 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
808 int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
810 u64 bpaddr = pblk_line_smeta_start(pblk, line);
812 return pblk_line_submit_smeta_io(pblk, line, bpaddr, READ);
815 int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
818 return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
819 line->emeta_ssec, READ);
822 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
825 rqd->opcode = NVM_OP_ERASE;
828 rqd->flags = pblk_set_progr_mode(pblk, ERASE);
832 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
836 DECLARE_COMPLETION_ONSTACK(wait);
838 memset(&rqd, 0, sizeof(struct nvm_rq));
840 pblk_setup_e_rq(pblk, &rqd, ppa);
842 rqd.end_io = pblk_end_io_sync;
845 /* The write thread schedules erases so that it minimizes disturbances
846 * with writes. Thus, there is no need to take the LUN semaphore.
848 ret = pblk_submit_io(pblk, &rqd);
850 struct nvm_tgt_dev *dev = pblk->dev;
851 struct nvm_geo *geo = &dev->geo;
853 pr_err("pblk: could not sync erase line:%d,blk:%d\n",
854 pblk_dev_ppa_to_line(ppa),
855 pblk_dev_ppa_to_pos(geo, ppa));
861 if (!wait_for_completion_io_timeout(&wait,
862 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
863 pr_err("pblk: sync erase timed out\n");
868 __pblk_end_io_erase(pblk, &rqd);
873 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
875 struct pblk_line_meta *lm = &pblk->lm;
879 /* Erase only good blocks, one at a time */
881 spin_lock(&line->lock);
882 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
884 if (bit >= lm->blk_per_line) {
885 spin_unlock(&line->lock);
889 ppa = pblk->luns[bit].bppa; /* set ch and lun */
890 ppa.g.blk = line->id;
892 atomic_dec(&line->left_eblks);
893 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
894 spin_unlock(&line->lock);
896 ret = pblk_blk_erase_sync(pblk, ppa);
898 pr_err("pblk: failed to erase line %d\n", line->id);
906 static void pblk_line_setup_metadata(struct pblk_line *line,
907 struct pblk_line_mgmt *l_mg,
908 struct pblk_line_meta *lm)
912 lockdep_assert_held(&l_mg->free_lock);
915 meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
916 if (meta_line == PBLK_DATA_LINES) {
917 spin_unlock(&l_mg->free_lock);
919 spin_lock(&l_mg->free_lock);
923 set_bit(meta_line, &l_mg->meta_bitmap);
924 line->meta_line = meta_line;
926 line->smeta = l_mg->sline_meta[meta_line];
927 line->emeta = l_mg->eline_meta[meta_line];
929 memset(line->smeta, 0, lm->smeta_len);
930 memset(line->emeta->buf, 0, lm->emeta_len[0]);
932 line->emeta->mem = 0;
933 atomic_set(&line->emeta->sync, 0);
936 /* For now lines are always assumed full lines. Thus, smeta former and current
937 * lun bitmaps are omitted.
939 static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
940 struct pblk_line *cur)
942 struct nvm_tgt_dev *dev = pblk->dev;
943 struct nvm_geo *geo = &dev->geo;
944 struct pblk_line_meta *lm = &pblk->lm;
945 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
946 struct pblk_emeta *emeta = line->emeta;
947 struct line_emeta *emeta_buf = emeta->buf;
948 struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
951 /* After erasing the line, new bad blocks might appear and we risk
952 * having an invalid line
954 nr_blk_line = lm->blk_per_line -
955 bitmap_weight(line->blk_bitmap, lm->blk_per_line);
956 if (nr_blk_line < lm->min_blk_line) {
957 spin_lock(&l_mg->free_lock);
958 spin_lock(&line->lock);
959 line->state = PBLK_LINESTATE_BAD;
960 spin_unlock(&line->lock);
962 list_add_tail(&line->list, &l_mg->bad_list);
963 spin_unlock(&l_mg->free_lock);
965 pr_debug("pblk: line %d is bad\n", line->id);
970 /* Run-time metadata */
971 line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
973 /* Mark LUNs allocated in this line (all for now) */
974 bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
976 smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
977 memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
978 smeta_buf->header.id = cpu_to_le32(line->id);
979 smeta_buf->header.type = cpu_to_le16(line->type);
980 smeta_buf->header.version = cpu_to_le16(1);
983 smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
984 smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns);
986 /* Fill metadata among lines */
988 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
989 smeta_buf->prev_id = cpu_to_le32(cur->id);
990 cur->emeta->buf->next_id = cpu_to_le32(line->id);
992 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
995 /* All smeta must be set at this point */
996 smeta_buf->header.crc = cpu_to_le32(
997 pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
998 smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
1001 memcpy(&emeta_buf->header, &smeta_buf->header,
1002 sizeof(struct line_header));
1003 emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1004 emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1005 emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1006 emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1007 emeta_buf->crc = cpu_to_le32(0);
1008 emeta_buf->prev_id = smeta_buf->prev_id;
1013 /* For now lines are always assumed full lines. Thus, smeta former and current
1014 * lun bitmaps are omitted.
1016 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1019 struct nvm_tgt_dev *dev = pblk->dev;
1020 struct nvm_geo *geo = &dev->geo;
1021 struct pblk_line_meta *lm = &pblk->lm;
1022 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1027 line->sec_in_line = lm->sec_per_line;
1029 /* Capture bad block information on line mapping bitmaps */
1030 while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1031 bit + 1)) < lm->blk_per_line) {
1032 off = bit * geo->sec_per_pl;
1033 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1035 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1037 line->sec_in_line -= geo->sec_per_blk;
1038 if (bit >= lm->emeta_bb)
1042 /* Mark smeta metadata sectors as bad sectors */
1043 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1044 off = bit * geo->sec_per_pl;
1045 bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1046 line->sec_in_line -= lm->smeta_sec;
1047 line->smeta_ssec = off;
1048 line->cur_sec = off + lm->smeta_sec;
1050 if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
1051 pr_debug("pblk: line smeta I/O failed. Retry\n");
1055 bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1057 /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1058 * blocks to make sure that there are enough sectors to store emeta
1060 bit = lm->sec_per_line;
1061 off = lm->sec_per_line - lm->emeta_sec[0];
1062 bitmap_set(line->invalid_bitmap, off, lm->emeta_sec[0]);
1064 off -= geo->sec_per_pl;
1065 if (!test_bit(off, line->invalid_bitmap)) {
1066 bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
1071 line->sec_in_line -= lm->emeta_sec[0];
1072 line->emeta_ssec = off;
1073 line->nr_valid_lbas = 0;
1074 line->left_msecs = line->sec_in_line;
1075 *line->vsc = cpu_to_le32(line->sec_in_line);
1077 if (lm->sec_per_line - line->sec_in_line !=
1078 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1079 spin_lock(&line->lock);
1080 line->state = PBLK_LINESTATE_BAD;
1081 spin_unlock(&line->lock);
1083 list_add_tail(&line->list, &l_mg->bad_list);
1084 pr_err("pblk: unexpected line %d is bad\n", line->id);
1092 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1094 struct pblk_line_meta *lm = &pblk->lm;
1095 int blk_in_line = atomic_read(&line->blk_in_line);
1097 line->map_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1098 if (!line->map_bitmap)
1100 memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1102 /* invalid_bitmap is special since it is used when line is closed. No
1103 * need to zeroized; it will be initialized using bb info form
1106 line->invalid_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1107 if (!line->invalid_bitmap) {
1108 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1112 spin_lock(&line->lock);
1113 if (line->state != PBLK_LINESTATE_FREE) {
1114 mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
1115 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1116 spin_unlock(&line->lock);
1117 WARN(1, "pblk: corrupted line %d, state %d\n",
1118 line->id, line->state);
1122 line->state = PBLK_LINESTATE_OPEN;
1124 atomic_set(&line->left_eblks, blk_in_line);
1125 atomic_set(&line->left_seblks, blk_in_line);
1127 line->meta_distance = lm->meta_distance;
1128 spin_unlock(&line->lock);
1130 /* Bad blocks do not need to be erased */
1131 bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1133 kref_init(&line->ref);
1138 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1140 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1143 spin_lock(&l_mg->free_lock);
1144 l_mg->data_line = line;
1145 list_del(&line->list);
1147 ret = pblk_line_prepare(pblk, line);
1149 list_add(&line->list, &l_mg->free_list);
1150 spin_unlock(&l_mg->free_lock);
1153 spin_unlock(&l_mg->free_lock);
1155 pblk_rl_free_lines_dec(&pblk->rl, line);
1157 if (!pblk_line_init_bb(pblk, line, 0)) {
1158 list_add(&line->list, &l_mg->free_list);
1165 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1167 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1168 line->map_bitmap = NULL;
1173 struct pblk_line *pblk_line_get(struct pblk *pblk)
1175 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1176 struct pblk_line_meta *lm = &pblk->lm;
1177 struct pblk_line *line;
1180 lockdep_assert_held(&l_mg->free_lock);
1183 if (list_empty(&l_mg->free_list)) {
1184 pr_err("pblk: no free lines\n");
1188 line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1189 list_del(&line->list);
1190 l_mg->nr_free_lines--;
1192 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1193 if (unlikely(bit >= lm->blk_per_line)) {
1194 spin_lock(&line->lock);
1195 line->state = PBLK_LINESTATE_BAD;
1196 spin_unlock(&line->lock);
1198 list_add_tail(&line->list, &l_mg->bad_list);
1200 pr_debug("pblk: line %d is bad\n", line->id);
1204 ret = pblk_line_prepare(pblk, line);
1206 if (ret == -EAGAIN) {
1207 list_add(&line->list, &l_mg->corrupt_list);
1210 pr_err("pblk: failed to prepare line %d\n", line->id);
1211 list_add(&line->list, &l_mg->free_list);
1212 l_mg->nr_free_lines++;
1220 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1221 struct pblk_line *line)
1223 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1224 struct pblk_line *retry_line;
1227 spin_lock(&l_mg->free_lock);
1228 retry_line = pblk_line_get(pblk);
1230 l_mg->data_line = NULL;
1231 spin_unlock(&l_mg->free_lock);
1235 retry_line->smeta = line->smeta;
1236 retry_line->emeta = line->emeta;
1237 retry_line->meta_line = line->meta_line;
1239 pblk_line_free(pblk, line);
1240 l_mg->data_line = retry_line;
1241 spin_unlock(&l_mg->free_lock);
1243 pblk_rl_free_lines_dec(&pblk->rl, retry_line);
1245 if (pblk_line_erase(pblk, retry_line))
1251 static void pblk_set_space_limit(struct pblk *pblk)
1253 struct pblk_rl *rl = &pblk->rl;
1255 atomic_set(&rl->rb_space, 0);
1258 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1260 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1261 struct pblk_line *line;
1264 spin_lock(&l_mg->free_lock);
1265 line = pblk_line_get(pblk);
1267 spin_unlock(&l_mg->free_lock);
1271 line->seq_nr = l_mg->d_seq_nr++;
1272 line->type = PBLK_LINETYPE_DATA;
1273 l_mg->data_line = line;
1275 pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1277 /* Allocate next line for preparation */
1278 l_mg->data_next = pblk_line_get(pblk);
1279 if (!l_mg->data_next) {
1280 /* If we cannot get a new line, we need to stop the pipeline.
1281 * Only allow as many writes in as we can store safely and then
1284 pblk_set_space_limit(pblk);
1286 l_mg->data_next = NULL;
1288 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1289 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1292 spin_unlock(&l_mg->free_lock);
1294 if (pblk_line_erase(pblk, line)) {
1295 line = pblk_line_retry(pblk, line);
1300 pblk_rl_free_lines_dec(&pblk->rl, line);
1302 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1305 if (!pblk_line_init_metadata(pblk, line, NULL)) {
1306 line = pblk_line_retry(pblk, line);
1313 if (!pblk_line_init_bb(pblk, line, 1)) {
1314 line = pblk_line_retry(pblk, line);
1324 static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1326 lockdep_assert_held(&pblk->l_mg.free_lock);
1328 pblk_set_space_limit(pblk);
1329 pblk->state = PBLK_STATE_STOPPING;
1332 void pblk_pipeline_stop(struct pblk *pblk)
1334 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1337 spin_lock(&l_mg->free_lock);
1338 if (pblk->state == PBLK_STATE_RECOVERING ||
1339 pblk->state == PBLK_STATE_STOPPED) {
1340 spin_unlock(&l_mg->free_lock);
1343 pblk->state = PBLK_STATE_RECOVERING;
1344 spin_unlock(&l_mg->free_lock);
1346 pblk_flush_writer(pblk);
1347 pblk_wait_for_meta(pblk);
1349 ret = pblk_recov_pad(pblk);
1351 pr_err("pblk: could not close data on teardown(%d)\n", ret);
1355 flush_workqueue(pblk->bb_wq);
1356 pblk_line_close_meta_sync(pblk);
1358 spin_lock(&l_mg->free_lock);
1359 pblk->state = PBLK_STATE_STOPPED;
1360 l_mg->data_line = NULL;
1361 l_mg->data_next = NULL;
1362 spin_unlock(&l_mg->free_lock);
1365 void pblk_line_replace_data(struct pblk *pblk)
1367 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1368 struct pblk_line *cur, *new;
1369 unsigned int left_seblks;
1372 cur = l_mg->data_line;
1373 new = l_mg->data_next;
1376 l_mg->data_line = new;
1378 spin_lock(&l_mg->free_lock);
1379 if (pblk->state != PBLK_STATE_RUNNING) {
1380 l_mg->data_line = NULL;
1381 l_mg->data_next = NULL;
1382 spin_unlock(&l_mg->free_lock);
1386 pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1387 spin_unlock(&l_mg->free_lock);
1390 left_seblks = atomic_read(&new->left_seblks);
1392 /* If line is not fully erased, erase it */
1393 if (atomic_read(&new->left_eblks)) {
1394 if (pblk_line_erase(pblk, new))
1403 if (!pblk_line_init_metadata(pblk, new, cur)) {
1404 new = pblk_line_retry(pblk, new);
1411 if (!pblk_line_init_bb(pblk, new, 1)) {
1412 new = pblk_line_retry(pblk, new);
1419 /* Allocate next line for preparation */
1420 spin_lock(&l_mg->free_lock);
1421 l_mg->data_next = pblk_line_get(pblk);
1422 if (!l_mg->data_next) {
1423 /* If we cannot get a new line, we need to stop the pipeline.
1424 * Only allow as many writes in as we can store safely and then
1427 pblk_stop_writes(pblk, new);
1428 l_mg->data_next = NULL;
1430 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1431 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1434 spin_unlock(&l_mg->free_lock);
1437 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1440 void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
1442 if (line->map_bitmap)
1443 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1444 if (line->invalid_bitmap)
1445 mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
1447 *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1449 line->map_bitmap = NULL;
1450 line->invalid_bitmap = NULL;
1455 void pblk_line_put(struct kref *ref)
1457 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1458 struct pblk *pblk = line->pblk;
1459 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1461 spin_lock(&line->lock);
1462 WARN_ON(line->state != PBLK_LINESTATE_GC);
1463 line->state = PBLK_LINESTATE_FREE;
1464 line->gc_group = PBLK_LINEGC_NONE;
1465 pblk_line_free(pblk, line);
1466 spin_unlock(&line->lock);
1468 spin_lock(&l_mg->free_lock);
1469 list_add_tail(&line->list, &l_mg->free_list);
1470 l_mg->nr_free_lines++;
1471 spin_unlock(&l_mg->free_lock);
1473 pblk_rl_free_lines_inc(&pblk->rl, line);
1476 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1481 rqd = mempool_alloc(pblk->g_rq_pool, GFP_KERNEL);
1482 memset(rqd, 0, pblk_g_rq_size);
1484 pblk_setup_e_rq(pblk, rqd, ppa);
1486 rqd->end_io = pblk_end_io_erase;
1487 rqd->private = pblk;
1489 /* The write thread schedules erases so that it minimizes disturbances
1490 * with writes. Thus, there is no need to take the LUN semaphore.
1492 err = pblk_submit_io(pblk, rqd);
1494 struct nvm_tgt_dev *dev = pblk->dev;
1495 struct nvm_geo *geo = &dev->geo;
1497 pr_err("pblk: could not async erase line:%d,blk:%d\n",
1498 pblk_dev_ppa_to_line(ppa),
1499 pblk_dev_ppa_to_pos(geo, ppa));
1505 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1507 return pblk->l_mg.data_line;
1510 /* For now, always erase next line */
1511 struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1513 return pblk->l_mg.data_next;
1516 int pblk_line_is_full(struct pblk_line *line)
1518 return (line->left_msecs == 0);
1521 void pblk_line_close_meta_sync(struct pblk *pblk)
1523 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1524 struct pblk_line_meta *lm = &pblk->lm;
1525 struct pblk_line *line, *tline;
1528 spin_lock(&l_mg->close_lock);
1529 if (list_empty(&l_mg->emeta_list)) {
1530 spin_unlock(&l_mg->close_lock);
1534 list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1535 spin_unlock(&l_mg->close_lock);
1537 list_for_each_entry_safe(line, tline, &list, list) {
1538 struct pblk_emeta *emeta = line->emeta;
1540 while (emeta->mem < lm->emeta_len[0]) {
1543 ret = pblk_submit_meta_io(pblk, line);
1545 pr_err("pblk: sync meta line %d failed (%d)\n",
1552 pblk_wait_for_meta(pblk);
1553 flush_workqueue(pblk->close_wq);
1556 static void pblk_line_should_sync_meta(struct pblk *pblk)
1558 if (pblk_rl_is_limit(&pblk->rl))
1559 pblk_line_close_meta_sync(pblk);
1562 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1564 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1565 struct list_head *move_list;
1567 #ifdef CONFIG_NVM_DEBUG
1568 struct pblk_line_meta *lm = &pblk->lm;
1570 WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1571 "pblk: corrupt closed line %d\n", line->id);
1574 spin_lock(&l_mg->free_lock);
1575 WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1576 spin_unlock(&l_mg->free_lock);
1578 spin_lock(&l_mg->gc_lock);
1579 spin_lock(&line->lock);
1580 WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1581 line->state = PBLK_LINESTATE_CLOSED;
1582 move_list = pblk_line_gc_list(pblk, line);
1584 list_add_tail(&line->list, move_list);
1586 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1587 line->map_bitmap = NULL;
1591 spin_unlock(&line->lock);
1592 spin_unlock(&l_mg->gc_lock);
1594 pblk_gc_should_kick(pblk);
1597 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1599 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1600 struct pblk_line_meta *lm = &pblk->lm;
1601 struct pblk_emeta *emeta = line->emeta;
1602 struct line_emeta *emeta_buf = emeta->buf;
1604 /* No need for exact vsc value; avoid a big line lock and take aprox. */
1605 memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1606 memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1608 emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1609 emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1611 spin_lock(&l_mg->close_lock);
1612 spin_lock(&line->lock);
1613 list_add_tail(&line->list, &l_mg->emeta_list);
1614 spin_unlock(&line->lock);
1615 spin_unlock(&l_mg->close_lock);
1617 pblk_line_should_sync_meta(pblk);
1620 void pblk_line_close_ws(struct work_struct *work)
1622 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1624 struct pblk *pblk = line_ws->pblk;
1625 struct pblk_line *line = line_ws->line;
1627 pblk_line_close(pblk, line);
1628 mempool_free(line_ws, pblk->line_ws_pool);
1631 void pblk_line_mark_bb(struct work_struct *work)
1633 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1635 struct pblk *pblk = line_ws->pblk;
1636 struct nvm_tgt_dev *dev = pblk->dev;
1637 struct ppa_addr *ppa = line_ws->priv;
1640 ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
1642 struct pblk_line *line;
1645 line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
1646 pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
1648 pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
1653 mempool_free(line_ws, pblk->line_ws_pool);
1656 void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1657 void (*work)(struct work_struct *),
1658 struct workqueue_struct *wq)
1660 struct pblk_line_ws *line_ws;
1662 line_ws = mempool_alloc(pblk->line_ws_pool, GFP_ATOMIC);
1666 line_ws->pblk = pblk;
1667 line_ws->line = line;
1668 line_ws->priv = priv;
1670 INIT_WORK(&line_ws->ws, work);
1671 queue_work(wq, &line_ws->ws);
1674 static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
1675 int nr_ppas, int pos)
1677 struct pblk_lun *rlun = &pblk->luns[pos];
1681 * Only send one inflight I/O per LUN. Since we map at a page
1682 * granurality, all ppas in the I/O will map to the same LUN
1684 #ifdef CONFIG_NVM_DEBUG
1687 for (i = 1; i < nr_ppas; i++)
1688 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
1689 ppa_list[0].g.ch != ppa_list[i].g.ch);
1692 ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
1696 pr_err("pblk: lun semaphore timed out\n");
1699 pr_err("pblk: lun semaphore timed out\n");
1705 void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
1707 struct nvm_tgt_dev *dev = pblk->dev;
1708 struct nvm_geo *geo = &dev->geo;
1709 int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1711 __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
1714 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1715 unsigned long *lun_bitmap)
1717 struct nvm_tgt_dev *dev = pblk->dev;
1718 struct nvm_geo *geo = &dev->geo;
1719 int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1721 /* If the LUN has been locked for this same request, do no attempt to
1724 if (test_and_set_bit(pos, lun_bitmap))
1727 __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
1730 void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
1732 struct nvm_tgt_dev *dev = pblk->dev;
1733 struct nvm_geo *geo = &dev->geo;
1734 struct pblk_lun *rlun;
1735 int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1737 #ifdef CONFIG_NVM_DEBUG
1740 for (i = 1; i < nr_ppas; i++)
1741 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
1742 ppa_list[0].g.ch != ppa_list[i].g.ch);
1745 rlun = &pblk->luns[pos];
1749 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1750 unsigned long *lun_bitmap)
1752 struct nvm_tgt_dev *dev = pblk->dev;
1753 struct nvm_geo *geo = &dev->geo;
1754 struct pblk_lun *rlun;
1755 int nr_luns = geo->nr_luns;
1758 while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
1759 rlun = &pblk->luns[bit];
1766 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1768 struct ppa_addr l2p_ppa;
1770 /* logic error: lba out-of-bounds. Ignore update */
1771 if (!(lba < pblk->rl.nr_secs)) {
1772 WARN(1, "pblk: corrupted L2P map request\n");
1776 spin_lock(&pblk->trans_lock);
1777 l2p_ppa = pblk_trans_map_get(pblk, lba);
1779 if (!pblk_addr_in_cache(l2p_ppa) && !pblk_ppa_empty(l2p_ppa))
1780 pblk_map_invalidate(pblk, l2p_ppa);
1782 pblk_trans_map_set(pblk, lba, ppa);
1783 spin_unlock(&pblk->trans_lock);
1786 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1788 #ifdef CONFIG_NVM_DEBUG
1789 /* Callers must ensure that the ppa points to a cache address */
1790 BUG_ON(!pblk_addr_in_cache(ppa));
1791 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1794 pblk_update_map(pblk, lba, ppa);
1797 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1798 struct pblk_line *gc_line)
1800 struct ppa_addr l2p_ppa;
1803 #ifdef CONFIG_NVM_DEBUG
1804 /* Callers must ensure that the ppa points to a cache address */
1805 BUG_ON(!pblk_addr_in_cache(ppa));
1806 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1809 /* logic error: lba out-of-bounds. Ignore update */
1810 if (!(lba < pblk->rl.nr_secs)) {
1811 WARN(1, "pblk: corrupted L2P map request\n");
1815 spin_lock(&pblk->trans_lock);
1816 l2p_ppa = pblk_trans_map_get(pblk, lba);
1818 /* Prevent updated entries to be overwritten by GC */
1819 if (pblk_addr_in_cache(l2p_ppa) || pblk_ppa_empty(l2p_ppa) ||
1820 pblk_tgt_ppa_to_line(l2p_ppa) != gc_line->id) {
1825 pblk_trans_map_set(pblk, lba, ppa);
1827 spin_unlock(&pblk->trans_lock);
1831 void pblk_update_map_dev(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1832 struct ppa_addr entry_line)
1834 struct ppa_addr l2p_line;
1836 #ifdef CONFIG_NVM_DEBUG
1837 /* Callers must ensure that the ppa points to a device address */
1838 BUG_ON(pblk_addr_in_cache(ppa));
1840 /* Invalidate and discard padded entries */
1841 if (lba == ADDR_EMPTY) {
1842 #ifdef CONFIG_NVM_DEBUG
1843 atomic_long_inc(&pblk->padded_wb);
1845 pblk_map_invalidate(pblk, ppa);
1849 /* logic error: lba out-of-bounds. Ignore update */
1850 if (!(lba < pblk->rl.nr_secs)) {
1851 WARN(1, "pblk: corrupted L2P map request\n");
1855 spin_lock(&pblk->trans_lock);
1856 l2p_line = pblk_trans_map_get(pblk, lba);
1858 /* Do not update L2P if the cacheline has been updated. In this case,
1859 * the mapped ppa must be invalidated
1861 if (l2p_line.ppa != entry_line.ppa) {
1862 if (!pblk_ppa_empty(ppa))
1863 pblk_map_invalidate(pblk, ppa);
1867 #ifdef CONFIG_NVM_DEBUG
1868 WARN_ON(!pblk_addr_in_cache(l2p_line) && !pblk_ppa_empty(l2p_line));
1871 pblk_trans_map_set(pblk, lba, ppa);
1873 spin_unlock(&pblk->trans_lock);
1876 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
1877 sector_t blba, int nr_secs)
1881 spin_lock(&pblk->trans_lock);
1882 for (i = 0; i < nr_secs; i++)
1883 ppas[i] = pblk_trans_map_get(pblk, blba + i);
1884 spin_unlock(&pblk->trans_lock);
1887 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
1888 u64 *lba_list, int nr_secs)
1893 spin_lock(&pblk->trans_lock);
1894 for (i = 0; i < nr_secs; i++) {
1896 if (lba == ADDR_EMPTY) {
1897 ppas[i].ppa = ADDR_EMPTY;
1899 /* logic error: lba out-of-bounds. Ignore update */
1900 if (!(lba < pblk->rl.nr_secs)) {
1901 WARN(1, "pblk: corrupted L2P map request\n");
1904 ppas[i] = pblk_trans_map_get(pblk, lba);
1907 spin_unlock(&pblk->trans_lock);