GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / lightnvm / pblk-core.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-core.c - pblk's core functionality
16  *
17  */
18
19 #include "pblk.h"
20
21 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
22                          struct ppa_addr *ppa)
23 {
24         struct nvm_tgt_dev *dev = pblk->dev;
25         struct nvm_geo *geo = &dev->geo;
26         int pos = pblk_dev_ppa_to_pos(geo, *ppa);
27
28         pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
29         atomic_long_inc(&pblk->erase_failed);
30
31         atomic_dec(&line->blk_in_line);
32         if (test_and_set_bit(pos, line->blk_bitmap))
33                 pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
34                                                         line->id, pos);
35
36         pblk_line_run_ws(pblk, NULL, ppa, pblk_line_mark_bb, pblk->bb_wq);
37 }
38
39 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
40 {
41         struct pblk_line *line;
42
43         line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
44         atomic_dec(&line->left_seblks);
45
46         if (rqd->error) {
47                 struct ppa_addr *ppa;
48
49                 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
50                 if (!ppa)
51                         return;
52
53                 *ppa = rqd->ppa_addr;
54                 pblk_mark_bb(pblk, line, ppa);
55         }
56
57         atomic_dec(&pblk->inflight_io);
58 }
59
60 /* Erase completion assumes that only one block is erased at the time */
61 static void pblk_end_io_erase(struct nvm_rq *rqd)
62 {
63         struct pblk *pblk = rqd->private;
64
65         __pblk_end_io_erase(pblk, rqd);
66         mempool_free(rqd, pblk->g_rq_pool);
67 }
68
69 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
70                            u64 paddr)
71 {
72         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
73         struct list_head *move_list = NULL;
74
75         /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
76          * table is modified with reclaimed sectors, a check is done to endure
77          * that newer updates are not overwritten.
78          */
79         spin_lock(&line->lock);
80         if (line->state == PBLK_LINESTATE_GC ||
81                                         line->state == PBLK_LINESTATE_FREE) {
82                 spin_unlock(&line->lock);
83                 return;
84         }
85
86         if (test_and_set_bit(paddr, line->invalid_bitmap)) {
87                 WARN_ONCE(1, "pblk: double invalidate\n");
88                 spin_unlock(&line->lock);
89                 return;
90         }
91         le32_add_cpu(line->vsc, -1);
92
93         if (line->state == PBLK_LINESTATE_CLOSED)
94                 move_list = pblk_line_gc_list(pblk, line);
95         spin_unlock(&line->lock);
96
97         if (move_list) {
98                 spin_lock(&l_mg->gc_lock);
99                 spin_lock(&line->lock);
100                 /* Prevent moving a line that has just been chosen for GC */
101                 if (line->state == PBLK_LINESTATE_GC ||
102                                         line->state == PBLK_LINESTATE_FREE) {
103                         spin_unlock(&line->lock);
104                         spin_unlock(&l_mg->gc_lock);
105                         return;
106                 }
107                 spin_unlock(&line->lock);
108
109                 list_move_tail(&line->list, move_list);
110                 spin_unlock(&l_mg->gc_lock);
111         }
112 }
113
114 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
115 {
116         struct pblk_line *line;
117         u64 paddr;
118         int line_id;
119
120 #ifdef CONFIG_NVM_DEBUG
121         /* Callers must ensure that the ppa points to a device address */
122         BUG_ON(pblk_addr_in_cache(ppa));
123         BUG_ON(pblk_ppa_empty(ppa));
124 #endif
125
126         line_id = pblk_tgt_ppa_to_line(ppa);
127         line = &pblk->lines[line_id];
128         paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
129
130         __pblk_map_invalidate(pblk, line, paddr);
131 }
132
133 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
134                                   unsigned int nr_secs)
135 {
136         sector_t lba;
137
138         spin_lock(&pblk->trans_lock);
139         for (lba = slba; lba < slba + nr_secs; lba++) {
140                 struct ppa_addr ppa;
141
142                 ppa = pblk_trans_map_get(pblk, lba);
143
144                 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
145                         pblk_map_invalidate(pblk, ppa);
146
147                 pblk_ppa_set_empty(&ppa);
148                 pblk_trans_map_set(pblk, lba, ppa);
149         }
150         spin_unlock(&pblk->trans_lock);
151 }
152
153 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
154 {
155         mempool_t *pool;
156         struct nvm_rq *rqd;
157         int rq_size;
158
159         if (rw == WRITE) {
160                 pool = pblk->w_rq_pool;
161                 rq_size = pblk_w_rq_size;
162         } else {
163                 pool = pblk->g_rq_pool;
164                 rq_size = pblk_g_rq_size;
165         }
166
167         rqd = mempool_alloc(pool, GFP_KERNEL);
168         memset(rqd, 0, rq_size);
169
170         return rqd;
171 }
172
173 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
174 {
175         mempool_t *pool;
176
177         if (rw == WRITE)
178                 pool = pblk->w_rq_pool;
179         else
180                 pool = pblk->g_rq_pool;
181
182         mempool_free(rqd, pool);
183 }
184
185 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
186                          int nr_pages)
187 {
188         struct bio_vec bv;
189         int i;
190
191         WARN_ON(off + nr_pages != bio->bi_vcnt);
192
193         for (i = off; i < nr_pages + off; i++) {
194                 bv = bio->bi_io_vec[i];
195                 mempool_free(bv.bv_page, pblk->page_bio_pool);
196         }
197 }
198
199 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
200                        int nr_pages)
201 {
202         struct request_queue *q = pblk->dev->q;
203         struct page *page;
204         int i, ret;
205
206         for (i = 0; i < nr_pages; i++) {
207                 page = mempool_alloc(pblk->page_bio_pool, flags);
208                 if (!page)
209                         goto err;
210
211                 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
212                 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
213                         pr_err("pblk: could not add page to bio\n");
214                         mempool_free(page, pblk->page_bio_pool);
215                         goto err;
216                 }
217         }
218
219         return 0;
220 err:
221         pblk_bio_free_pages(pblk, bio, 0, i - 1);
222         return -1;
223 }
224
225 static void pblk_write_kick(struct pblk *pblk)
226 {
227         wake_up_process(pblk->writer_ts);
228         mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
229 }
230
231 void pblk_write_timer_fn(unsigned long data)
232 {
233         struct pblk *pblk = (struct pblk *)data;
234
235         /* kick the write thread every tick to flush outstanding data */
236         pblk_write_kick(pblk);
237 }
238
239 void pblk_write_should_kick(struct pblk *pblk)
240 {
241         unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
242
243         if (secs_avail >= pblk->min_write_pgs)
244                 pblk_write_kick(pblk);
245 }
246
247 void pblk_end_bio_sync(struct bio *bio)
248 {
249         struct completion *waiting = bio->bi_private;
250
251         complete(waiting);
252 }
253
254 void pblk_end_io_sync(struct nvm_rq *rqd)
255 {
256         struct completion *waiting = rqd->private;
257
258         complete(waiting);
259 }
260
261 void pblk_wait_for_meta(struct pblk *pblk)
262 {
263         do {
264                 if (!atomic_read(&pblk->inflight_io))
265                         break;
266
267                 schedule();
268         } while (1);
269 }
270
271 static void pblk_flush_writer(struct pblk *pblk)
272 {
273         pblk_rb_flush(&pblk->rwb);
274         do {
275                 if (!pblk_rb_sync_count(&pblk->rwb))
276                         break;
277
278                 pblk_write_kick(pblk);
279                 schedule();
280         } while (1);
281 }
282
283 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
284 {
285         struct pblk_line_meta *lm = &pblk->lm;
286         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
287         struct list_head *move_list = NULL;
288         int vsc = le32_to_cpu(*line->vsc);
289
290         lockdep_assert_held(&line->lock);
291
292         if (!vsc) {
293                 if (line->gc_group != PBLK_LINEGC_FULL) {
294                         line->gc_group = PBLK_LINEGC_FULL;
295                         move_list = &l_mg->gc_full_list;
296                 }
297         } else if (vsc < lm->high_thrs) {
298                 if (line->gc_group != PBLK_LINEGC_HIGH) {
299                         line->gc_group = PBLK_LINEGC_HIGH;
300                         move_list = &l_mg->gc_high_list;
301                 }
302         } else if (vsc < lm->mid_thrs) {
303                 if (line->gc_group != PBLK_LINEGC_MID) {
304                         line->gc_group = PBLK_LINEGC_MID;
305                         move_list = &l_mg->gc_mid_list;
306                 }
307         } else if (vsc < line->sec_in_line) {
308                 if (line->gc_group != PBLK_LINEGC_LOW) {
309                         line->gc_group = PBLK_LINEGC_LOW;
310                         move_list = &l_mg->gc_low_list;
311                 }
312         } else if (vsc == line->sec_in_line) {
313                 if (line->gc_group != PBLK_LINEGC_EMPTY) {
314                         line->gc_group = PBLK_LINEGC_EMPTY;
315                         move_list = &l_mg->gc_empty_list;
316                 }
317         } else {
318                 line->state = PBLK_LINESTATE_CORRUPT;
319                 line->gc_group = PBLK_LINEGC_NONE;
320                 move_list =  &l_mg->corrupt_list;
321                 pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
322                                                 line->id, vsc,
323                                                 line->sec_in_line,
324                                                 lm->high_thrs, lm->mid_thrs);
325         }
326
327         return move_list;
328 }
329
330 void pblk_discard(struct pblk *pblk, struct bio *bio)
331 {
332         sector_t slba = pblk_get_lba(bio);
333         sector_t nr_secs = pblk_get_secs(bio);
334
335         pblk_invalidate_range(pblk, slba, nr_secs);
336 }
337
338 struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba)
339 {
340         struct ppa_addr ppa;
341
342         spin_lock(&pblk->trans_lock);
343         ppa = pblk_trans_map_get(pblk, lba);
344         spin_unlock(&pblk->trans_lock);
345
346         return ppa;
347 }
348
349 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
350 {
351         atomic_long_inc(&pblk->write_failed);
352 #ifdef CONFIG_NVM_DEBUG
353         pblk_print_failed_rqd(pblk, rqd, rqd->error);
354 #endif
355 }
356
357 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
358 {
359         /* Empty page read is not necessarily an error (e.g., L2P recovery) */
360         if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
361                 atomic_long_inc(&pblk->read_empty);
362                 return;
363         }
364
365         switch (rqd->error) {
366         case NVM_RSP_WARN_HIGHECC:
367                 atomic_long_inc(&pblk->read_high_ecc);
368                 break;
369         case NVM_RSP_ERR_FAILECC:
370         case NVM_RSP_ERR_FAILCRC:
371                 atomic_long_inc(&pblk->read_failed);
372                 break;
373         default:
374                 pr_err("pblk: unknown read error:%d\n", rqd->error);
375         }
376 #ifdef CONFIG_NVM_DEBUG
377         pblk_print_failed_rqd(pblk, rqd, rqd->error);
378 #endif
379 }
380
381 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
382 {
383         pblk->sec_per_write = sec_per_write;
384 }
385
386 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
387 {
388         struct nvm_tgt_dev *dev = pblk->dev;
389
390 #ifdef CONFIG_NVM_DEBUG
391         struct ppa_addr *ppa_list;
392
393         ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
394         if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
395                 WARN_ON(1);
396                 return -EINVAL;
397         }
398
399         if (rqd->opcode == NVM_OP_PWRITE) {
400                 struct pblk_line *line;
401                 struct ppa_addr ppa;
402                 int i;
403
404                 for (i = 0; i < rqd->nr_ppas; i++) {
405                         ppa = ppa_list[i];
406                         line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
407
408                         spin_lock(&line->lock);
409                         if (line->state != PBLK_LINESTATE_OPEN) {
410                                 pr_err("pblk: bad ppa: line:%d,state:%d\n",
411                                                         line->id, line->state);
412                                 WARN_ON(1);
413                                 spin_unlock(&line->lock);
414                                 return -EINVAL;
415                         }
416                         spin_unlock(&line->lock);
417                 }
418         }
419 #endif
420
421         atomic_inc(&pblk->inflight_io);
422
423         return nvm_submit_io(dev, rqd);
424 }
425
426 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
427                               unsigned int nr_secs, unsigned int len,
428                               int alloc_type, gfp_t gfp_mask)
429 {
430         struct nvm_tgt_dev *dev = pblk->dev;
431         void *kaddr = data;
432         struct page *page;
433         struct bio *bio;
434         int i, ret;
435
436         if (alloc_type == PBLK_KMALLOC_META)
437                 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
438
439         bio = bio_kmalloc(gfp_mask, nr_secs);
440         if (!bio)
441                 return ERR_PTR(-ENOMEM);
442
443         for (i = 0; i < nr_secs; i++) {
444                 page = vmalloc_to_page(kaddr);
445                 if (!page) {
446                         pr_err("pblk: could not map vmalloc bio\n");
447                         bio_put(bio);
448                         bio = ERR_PTR(-ENOMEM);
449                         goto out;
450                 }
451
452                 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
453                 if (ret != PAGE_SIZE) {
454                         pr_err("pblk: could not add page to bio\n");
455                         bio_put(bio);
456                         bio = ERR_PTR(-ENOMEM);
457                         goto out;
458                 }
459
460                 kaddr += PAGE_SIZE;
461         }
462 out:
463         return bio;
464 }
465
466 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
467                    unsigned long secs_to_flush)
468 {
469         int max = pblk->sec_per_write;
470         int min = pblk->min_write_pgs;
471         int secs_to_sync = 0;
472
473         if (secs_avail >= max)
474                 secs_to_sync = max;
475         else if (secs_avail >= min)
476                 secs_to_sync = min * (secs_avail / min);
477         else if (secs_to_flush)
478                 secs_to_sync = min;
479
480         return secs_to_sync;
481 }
482
483 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
484 {
485         u64 addr;
486         int i;
487
488         spin_lock(&line->lock);
489         addr = find_next_zero_bit(line->map_bitmap,
490                                         pblk->lm.sec_per_line, line->cur_sec);
491         line->cur_sec = addr - nr_secs;
492
493         for (i = 0; i < nr_secs; i++, line->cur_sec--)
494                 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
495         spin_unlock(&line->lock);
496 }
497
498 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
499 {
500         u64 addr;
501         int i;
502
503         lockdep_assert_held(&line->lock);
504
505         /* logic error: ppa out-of-bounds. Prevent generating bad address */
506         if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
507                 WARN(1, "pblk: page allocation out of bounds\n");
508                 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
509         }
510
511         line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
512                                         pblk->lm.sec_per_line, line->cur_sec);
513         for (i = 0; i < nr_secs; i++, line->cur_sec++)
514                 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
515
516         return addr;
517 }
518
519 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
520 {
521         u64 addr;
522
523         /* Lock needed in case a write fails and a recovery needs to remap
524          * failed write buffer entries
525          */
526         spin_lock(&line->lock);
527         addr = __pblk_alloc_page(pblk, line, nr_secs);
528         line->left_msecs -= nr_secs;
529         WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
530         spin_unlock(&line->lock);
531
532         return addr;
533 }
534
535 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
536 {
537         u64 paddr;
538
539         spin_lock(&line->lock);
540         paddr = find_next_zero_bit(line->map_bitmap,
541                                         pblk->lm.sec_per_line, line->cur_sec);
542         spin_unlock(&line->lock);
543
544         return paddr;
545 }
546
547 /*
548  * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
549  * taking the per LUN semaphore.
550  */
551 static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
552                                      void *emeta_buf, u64 paddr, int dir)
553 {
554         struct nvm_tgt_dev *dev = pblk->dev;
555         struct nvm_geo *geo = &dev->geo;
556         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
557         struct pblk_line_meta *lm = &pblk->lm;
558         void *ppa_list, *meta_list;
559         struct bio *bio;
560         struct nvm_rq rqd;
561         dma_addr_t dma_ppa_list, dma_meta_list;
562         int min = pblk->min_write_pgs;
563         int left_ppas = lm->emeta_sec[0];
564         int id = line->id;
565         int rq_ppas, rq_len;
566         int cmd_op, bio_op;
567         int i, j;
568         int ret;
569         DECLARE_COMPLETION_ONSTACK(wait);
570
571         if (dir == WRITE) {
572                 bio_op = REQ_OP_WRITE;
573                 cmd_op = NVM_OP_PWRITE;
574         } else if (dir == READ) {
575                 bio_op = REQ_OP_READ;
576                 cmd_op = NVM_OP_PREAD;
577         } else
578                 return -EINVAL;
579
580         meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
581                                                         &dma_meta_list);
582         if (!meta_list)
583                 return -ENOMEM;
584
585         ppa_list = meta_list + pblk_dma_meta_size;
586         dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
587
588 next_rq:
589         memset(&rqd, 0, sizeof(struct nvm_rq));
590
591         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
592         rq_len = rq_ppas * geo->sec_size;
593
594         bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
595                                         l_mg->emeta_alloc_type, GFP_KERNEL);
596         if (IS_ERR(bio)) {
597                 ret = PTR_ERR(bio);
598                 goto free_rqd_dma;
599         }
600
601         bio->bi_iter.bi_sector = 0; /* internal bio */
602         bio_set_op_attrs(bio, bio_op, 0);
603
604         rqd.bio = bio;
605         rqd.meta_list = meta_list;
606         rqd.ppa_list = ppa_list;
607         rqd.dma_meta_list = dma_meta_list;
608         rqd.dma_ppa_list = dma_ppa_list;
609         rqd.opcode = cmd_op;
610         rqd.nr_ppas = rq_ppas;
611         rqd.end_io = pblk_end_io_sync;
612         rqd.private = &wait;
613
614         if (dir == WRITE) {
615                 struct pblk_sec_meta *meta_list = rqd.meta_list;
616
617                 rqd.flags = pblk_set_progr_mode(pblk, WRITE);
618                 for (i = 0; i < rqd.nr_ppas; ) {
619                         spin_lock(&line->lock);
620                         paddr = __pblk_alloc_page(pblk, line, min);
621                         spin_unlock(&line->lock);
622                         for (j = 0; j < min; j++, i++, paddr++) {
623                                 meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
624                                 rqd.ppa_list[i] =
625                                         addr_to_gen_ppa(pblk, paddr, id);
626                         }
627                 }
628         } else {
629                 for (i = 0; i < rqd.nr_ppas; ) {
630                         struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
631                         int pos = pblk_dev_ppa_to_pos(geo, ppa);
632                         int read_type = PBLK_READ_RANDOM;
633
634                         if (pblk_io_aligned(pblk, rq_ppas))
635                                 read_type = PBLK_READ_SEQUENTIAL;
636                         rqd.flags = pblk_set_read_mode(pblk, read_type);
637
638                         while (test_bit(pos, line->blk_bitmap)) {
639                                 paddr += min;
640                                 if (pblk_boundary_paddr_checks(pblk, paddr)) {
641                                         pr_err("pblk: corrupt emeta line:%d\n",
642                                                                 line->id);
643                                         bio_put(bio);
644                                         ret = -EINTR;
645                                         goto free_rqd_dma;
646                                 }
647
648                                 ppa = addr_to_gen_ppa(pblk, paddr, id);
649                                 pos = pblk_dev_ppa_to_pos(geo, ppa);
650                         }
651
652                         if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
653                                 pr_err("pblk: corrupt emeta line:%d\n",
654                                                                 line->id);
655                                 bio_put(bio);
656                                 ret = -EINTR;
657                                 goto free_rqd_dma;
658                         }
659
660                         for (j = 0; j < min; j++, i++, paddr++)
661                                 rqd.ppa_list[i] =
662                                         addr_to_gen_ppa(pblk, paddr, line->id);
663                 }
664         }
665
666         ret = pblk_submit_io(pblk, &rqd);
667         if (ret) {
668                 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
669                 bio_put(bio);
670                 goto free_rqd_dma;
671         }
672
673         if (!wait_for_completion_io_timeout(&wait,
674                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
675                 pr_err("pblk: emeta I/O timed out\n");
676         }
677         atomic_dec(&pblk->inflight_io);
678         reinit_completion(&wait);
679
680         if (likely(pblk->l_mg.emeta_alloc_type == PBLK_VMALLOC_META))
681                 bio_put(bio);
682
683         if (rqd.error) {
684                 if (dir == WRITE)
685                         pblk_log_write_err(pblk, &rqd);
686                 else
687                         pblk_log_read_err(pblk, &rqd);
688         }
689
690         emeta_buf += rq_len;
691         left_ppas -= rq_ppas;
692         if (left_ppas)
693                 goto next_rq;
694 free_rqd_dma:
695         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
696         return ret;
697 }
698
699 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
700 {
701         struct nvm_tgt_dev *dev = pblk->dev;
702         struct nvm_geo *geo = &dev->geo;
703         struct pblk_line_meta *lm = &pblk->lm;
704         int bit;
705
706         /* This usually only happens on bad lines */
707         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
708         if (bit >= lm->blk_per_line)
709                 return -1;
710
711         return bit * geo->sec_per_pl;
712 }
713
714 static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
715                                      u64 paddr, int dir)
716 {
717         struct nvm_tgt_dev *dev = pblk->dev;
718         struct pblk_line_meta *lm = &pblk->lm;
719         struct bio *bio;
720         struct nvm_rq rqd;
721         __le64 *lba_list = NULL;
722         int i, ret;
723         int cmd_op, bio_op;
724         int flags;
725         DECLARE_COMPLETION_ONSTACK(wait);
726
727         if (dir == WRITE) {
728                 bio_op = REQ_OP_WRITE;
729                 cmd_op = NVM_OP_PWRITE;
730                 flags = pblk_set_progr_mode(pblk, WRITE);
731                 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
732         } else if (dir == READ) {
733                 bio_op = REQ_OP_READ;
734                 cmd_op = NVM_OP_PREAD;
735                 flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
736         } else
737                 return -EINVAL;
738
739         memset(&rqd, 0, sizeof(struct nvm_rq));
740
741         rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
742                                                         &rqd.dma_meta_list);
743         if (!rqd.meta_list)
744                 return -ENOMEM;
745
746         rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
747         rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
748
749         bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
750         if (IS_ERR(bio)) {
751                 ret = PTR_ERR(bio);
752                 goto free_ppa_list;
753         }
754
755         bio->bi_iter.bi_sector = 0; /* internal bio */
756         bio_set_op_attrs(bio, bio_op, 0);
757
758         rqd.bio = bio;
759         rqd.opcode = cmd_op;
760         rqd.flags = flags;
761         rqd.nr_ppas = lm->smeta_sec;
762         rqd.end_io = pblk_end_io_sync;
763         rqd.private = &wait;
764
765         for (i = 0; i < lm->smeta_sec; i++, paddr++) {
766                 struct pblk_sec_meta *meta_list = rqd.meta_list;
767
768                 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
769
770                 if (dir == WRITE) {
771                         __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
772
773                         meta_list[i].lba = lba_list[paddr] = addr_empty;
774                 }
775         }
776
777         /*
778          * This I/O is sent by the write thread when a line is replace. Since
779          * the write thread is the only one sending write and erase commands,
780          * there is no need to take the LUN semaphore.
781          */
782         ret = pblk_submit_io(pblk, &rqd);
783         if (ret) {
784                 pr_err("pblk: smeta I/O submission failed: %d\n", ret);
785                 bio_put(bio);
786                 goto free_ppa_list;
787         }
788
789         if (!wait_for_completion_io_timeout(&wait,
790                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
791                 pr_err("pblk: smeta I/O timed out\n");
792         }
793         atomic_dec(&pblk->inflight_io);
794
795         if (rqd.error) {
796                 if (dir == WRITE)
797                         pblk_log_write_err(pblk, &rqd);
798                 else
799                         pblk_log_read_err(pblk, &rqd);
800         }
801
802 free_ppa_list:
803         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
804
805         return ret;
806 }
807
808 int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
809 {
810         u64 bpaddr = pblk_line_smeta_start(pblk, line);
811
812         return pblk_line_submit_smeta_io(pblk, line, bpaddr, READ);
813 }
814
815 int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
816                          void *emeta_buf)
817 {
818         return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
819                                                 line->emeta_ssec, READ);
820 }
821
822 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
823                             struct ppa_addr ppa)
824 {
825         rqd->opcode = NVM_OP_ERASE;
826         rqd->ppa_addr = ppa;
827         rqd->nr_ppas = 1;
828         rqd->flags = pblk_set_progr_mode(pblk, ERASE);
829         rqd->bio = NULL;
830 }
831
832 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
833 {
834         struct nvm_rq rqd;
835         int ret = 0;
836         DECLARE_COMPLETION_ONSTACK(wait);
837
838         memset(&rqd, 0, sizeof(struct nvm_rq));
839
840         pblk_setup_e_rq(pblk, &rqd, ppa);
841
842         rqd.end_io = pblk_end_io_sync;
843         rqd.private = &wait;
844
845         /* The write thread schedules erases so that it minimizes disturbances
846          * with writes. Thus, there is no need to take the LUN semaphore.
847          */
848         ret = pblk_submit_io(pblk, &rqd);
849         if (ret) {
850                 struct nvm_tgt_dev *dev = pblk->dev;
851                 struct nvm_geo *geo = &dev->geo;
852
853                 pr_err("pblk: could not sync erase line:%d,blk:%d\n",
854                                         pblk_dev_ppa_to_line(ppa),
855                                         pblk_dev_ppa_to_pos(geo, ppa));
856
857                 rqd.error = ret;
858                 goto out;
859         }
860
861         if (!wait_for_completion_io_timeout(&wait,
862                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
863                 pr_err("pblk: sync erase timed out\n");
864         }
865
866 out:
867         rqd.private = pblk;
868         __pblk_end_io_erase(pblk, &rqd);
869
870         return ret;
871 }
872
873 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
874 {
875         struct pblk_line_meta *lm = &pblk->lm;
876         struct ppa_addr ppa;
877         int ret, bit = -1;
878
879         /* Erase only good blocks, one at a time */
880         do {
881                 spin_lock(&line->lock);
882                 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
883                                                                 bit + 1);
884                 if (bit >= lm->blk_per_line) {
885                         spin_unlock(&line->lock);
886                         break;
887                 }
888
889                 ppa = pblk->luns[bit].bppa; /* set ch and lun */
890                 ppa.g.blk = line->id;
891
892                 atomic_dec(&line->left_eblks);
893                 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
894                 spin_unlock(&line->lock);
895
896                 ret = pblk_blk_erase_sync(pblk, ppa);
897                 if (ret) {
898                         pr_err("pblk: failed to erase line %d\n", line->id);
899                         return ret;
900                 }
901         } while (1);
902
903         return 0;
904 }
905
906 static void pblk_line_setup_metadata(struct pblk_line *line,
907                                      struct pblk_line_mgmt *l_mg,
908                                      struct pblk_line_meta *lm)
909 {
910         int meta_line;
911
912         lockdep_assert_held(&l_mg->free_lock);
913
914 retry_meta:
915         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
916         if (meta_line == PBLK_DATA_LINES) {
917                 spin_unlock(&l_mg->free_lock);
918                 io_schedule();
919                 spin_lock(&l_mg->free_lock);
920                 goto retry_meta;
921         }
922
923         set_bit(meta_line, &l_mg->meta_bitmap);
924         line->meta_line = meta_line;
925
926         line->smeta = l_mg->sline_meta[meta_line];
927         line->emeta = l_mg->eline_meta[meta_line];
928
929         memset(line->smeta, 0, lm->smeta_len);
930         memset(line->emeta->buf, 0, lm->emeta_len[0]);
931
932         line->emeta->mem = 0;
933         atomic_set(&line->emeta->sync, 0);
934 }
935
936 /* For now lines are always assumed full lines. Thus, smeta former and current
937  * lun bitmaps are omitted.
938  */
939 static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
940                                   struct pblk_line *cur)
941 {
942         struct nvm_tgt_dev *dev = pblk->dev;
943         struct nvm_geo *geo = &dev->geo;
944         struct pblk_line_meta *lm = &pblk->lm;
945         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
946         struct pblk_emeta *emeta = line->emeta;
947         struct line_emeta *emeta_buf = emeta->buf;
948         struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
949         int nr_blk_line;
950
951         /* After erasing the line, new bad blocks might appear and we risk
952          * having an invalid line
953          */
954         nr_blk_line = lm->blk_per_line -
955                         bitmap_weight(line->blk_bitmap, lm->blk_per_line);
956         if (nr_blk_line < lm->min_blk_line) {
957                 spin_lock(&l_mg->free_lock);
958                 spin_lock(&line->lock);
959                 line->state = PBLK_LINESTATE_BAD;
960                 spin_unlock(&line->lock);
961
962                 list_add_tail(&line->list, &l_mg->bad_list);
963                 spin_unlock(&l_mg->free_lock);
964
965                 pr_debug("pblk: line %d is bad\n", line->id);
966
967                 return 0;
968         }
969
970         /* Run-time metadata */
971         line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
972
973         /* Mark LUNs allocated in this line (all for now) */
974         bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
975
976         smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
977         memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
978         smeta_buf->header.id = cpu_to_le32(line->id);
979         smeta_buf->header.type = cpu_to_le16(line->type);
980         smeta_buf->header.version = cpu_to_le16(1);
981
982         /* Start metadata */
983         smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
984         smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns);
985
986         /* Fill metadata among lines */
987         if (cur) {
988                 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
989                 smeta_buf->prev_id = cpu_to_le32(cur->id);
990                 cur->emeta->buf->next_id = cpu_to_le32(line->id);
991         } else {
992                 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
993         }
994
995         /* All smeta must be set at this point */
996         smeta_buf->header.crc = cpu_to_le32(
997                         pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
998         smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
999
1000         /* End metadata */
1001         memcpy(&emeta_buf->header, &smeta_buf->header,
1002                                                 sizeof(struct line_header));
1003         emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1004         emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1005         emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1006         emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1007         emeta_buf->crc = cpu_to_le32(0);
1008         emeta_buf->prev_id = smeta_buf->prev_id;
1009
1010         return 1;
1011 }
1012
1013 /* For now lines are always assumed full lines. Thus, smeta former and current
1014  * lun bitmaps are omitted.
1015  */
1016 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1017                              int init)
1018 {
1019         struct nvm_tgt_dev *dev = pblk->dev;
1020         struct nvm_geo *geo = &dev->geo;
1021         struct pblk_line_meta *lm = &pblk->lm;
1022         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1023         int nr_bb = 0;
1024         u64 off;
1025         int bit = -1;
1026
1027         line->sec_in_line = lm->sec_per_line;
1028
1029         /* Capture bad block information on line mapping bitmaps */
1030         while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1031                                         bit + 1)) < lm->blk_per_line) {
1032                 off = bit * geo->sec_per_pl;
1033                 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1034                                                         lm->sec_per_line);
1035                 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1036                                                         lm->sec_per_line);
1037                 line->sec_in_line -= geo->sec_per_blk;
1038                 if (bit >= lm->emeta_bb)
1039                         nr_bb++;
1040         }
1041
1042         /* Mark smeta metadata sectors as bad sectors */
1043         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1044         off = bit * geo->sec_per_pl;
1045         bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1046         line->sec_in_line -= lm->smeta_sec;
1047         line->smeta_ssec = off;
1048         line->cur_sec = off + lm->smeta_sec;
1049
1050         if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
1051                 pr_debug("pblk: line smeta I/O failed. Retry\n");
1052                 return 1;
1053         }
1054
1055         bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1056
1057         /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1058          * blocks to make sure that there are enough sectors to store emeta
1059          */
1060         bit = lm->sec_per_line;
1061         off = lm->sec_per_line - lm->emeta_sec[0];
1062         bitmap_set(line->invalid_bitmap, off, lm->emeta_sec[0]);
1063         while (nr_bb) {
1064                 off -= geo->sec_per_pl;
1065                 if (!test_bit(off, line->invalid_bitmap)) {
1066                         bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
1067                         nr_bb--;
1068                 }
1069         }
1070
1071         line->sec_in_line -= lm->emeta_sec[0];
1072         line->emeta_ssec = off;
1073         line->nr_valid_lbas = 0;
1074         line->left_msecs = line->sec_in_line;
1075         *line->vsc = cpu_to_le32(line->sec_in_line);
1076
1077         if (lm->sec_per_line - line->sec_in_line !=
1078                 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1079                 spin_lock(&line->lock);
1080                 line->state = PBLK_LINESTATE_BAD;
1081                 spin_unlock(&line->lock);
1082
1083                 list_add_tail(&line->list, &l_mg->bad_list);
1084                 pr_err("pblk: unexpected line %d is bad\n", line->id);
1085
1086                 return 0;
1087         }
1088
1089         return 1;
1090 }
1091
1092 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1093 {
1094         struct pblk_line_meta *lm = &pblk->lm;
1095         int blk_in_line = atomic_read(&line->blk_in_line);
1096
1097         line->map_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1098         if (!line->map_bitmap)
1099                 return -ENOMEM;
1100         memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1101
1102         /* invalid_bitmap is special since it is used when line is closed. No
1103          * need to zeroized; it will be initialized using bb info form
1104          * map_bitmap
1105          */
1106         line->invalid_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1107         if (!line->invalid_bitmap) {
1108                 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1109                 return -ENOMEM;
1110         }
1111
1112         spin_lock(&line->lock);
1113         if (line->state != PBLK_LINESTATE_FREE) {
1114                 mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
1115                 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1116                 spin_unlock(&line->lock);
1117                 WARN(1, "pblk: corrupted line %d, state %d\n",
1118                                                         line->id, line->state);
1119                 return -EAGAIN;
1120         }
1121
1122         line->state = PBLK_LINESTATE_OPEN;
1123
1124         atomic_set(&line->left_eblks, blk_in_line);
1125         atomic_set(&line->left_seblks, blk_in_line);
1126
1127         line->meta_distance = lm->meta_distance;
1128         spin_unlock(&line->lock);
1129
1130         /* Bad blocks do not need to be erased */
1131         bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1132
1133         kref_init(&line->ref);
1134
1135         return 0;
1136 }
1137
1138 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1139 {
1140         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1141         int ret;
1142
1143         spin_lock(&l_mg->free_lock);
1144         l_mg->data_line = line;
1145         list_del(&line->list);
1146
1147         ret = pblk_line_prepare(pblk, line);
1148         if (ret) {
1149                 list_add(&line->list, &l_mg->free_list);
1150                 spin_unlock(&l_mg->free_lock);
1151                 return ret;
1152         }
1153         spin_unlock(&l_mg->free_lock);
1154
1155         pblk_rl_free_lines_dec(&pblk->rl, line);
1156
1157         if (!pblk_line_init_bb(pblk, line, 0)) {
1158                 list_add(&line->list, &l_mg->free_list);
1159                 return -EINTR;
1160         }
1161
1162         return 0;
1163 }
1164
1165 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1166 {
1167         mempool_free(line->map_bitmap, pblk->line_meta_pool);
1168         line->map_bitmap = NULL;
1169         line->smeta = NULL;
1170         line->emeta = NULL;
1171 }
1172
1173 struct pblk_line *pblk_line_get(struct pblk *pblk)
1174 {
1175         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1176         struct pblk_line_meta *lm = &pblk->lm;
1177         struct pblk_line *line;
1178         int ret, bit;
1179
1180         lockdep_assert_held(&l_mg->free_lock);
1181
1182 retry:
1183         if (list_empty(&l_mg->free_list)) {
1184                 pr_err("pblk: no free lines\n");
1185                 return NULL;
1186         }
1187
1188         line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1189         list_del(&line->list);
1190         l_mg->nr_free_lines--;
1191
1192         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1193         if (unlikely(bit >= lm->blk_per_line)) {
1194                 spin_lock(&line->lock);
1195                 line->state = PBLK_LINESTATE_BAD;
1196                 spin_unlock(&line->lock);
1197
1198                 list_add_tail(&line->list, &l_mg->bad_list);
1199
1200                 pr_debug("pblk: line %d is bad\n", line->id);
1201                 goto retry;
1202         }
1203
1204         ret = pblk_line_prepare(pblk, line);
1205         if (ret) {
1206                 if (ret == -EAGAIN) {
1207                         list_add(&line->list, &l_mg->corrupt_list);
1208                         goto retry;
1209                 } else {
1210                         pr_err("pblk: failed to prepare line %d\n", line->id);
1211                         list_add(&line->list, &l_mg->free_list);
1212                         l_mg->nr_free_lines++;
1213                         return NULL;
1214                 }
1215         }
1216
1217         return line;
1218 }
1219
1220 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1221                                          struct pblk_line *line)
1222 {
1223         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1224         struct pblk_line *retry_line;
1225
1226 retry:
1227         spin_lock(&l_mg->free_lock);
1228         retry_line = pblk_line_get(pblk);
1229         if (!retry_line) {
1230                 l_mg->data_line = NULL;
1231                 spin_unlock(&l_mg->free_lock);
1232                 return NULL;
1233         }
1234
1235         retry_line->smeta = line->smeta;
1236         retry_line->emeta = line->emeta;
1237         retry_line->meta_line = line->meta_line;
1238
1239         pblk_line_free(pblk, line);
1240         l_mg->data_line = retry_line;
1241         spin_unlock(&l_mg->free_lock);
1242
1243         pblk_rl_free_lines_dec(&pblk->rl, retry_line);
1244
1245         if (pblk_line_erase(pblk, retry_line))
1246                 goto retry;
1247
1248         return retry_line;
1249 }
1250
1251 static void pblk_set_space_limit(struct pblk *pblk)
1252 {
1253         struct pblk_rl *rl = &pblk->rl;
1254
1255         atomic_set(&rl->rb_space, 0);
1256 }
1257
1258 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1259 {
1260         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1261         struct pblk_line *line;
1262         int is_next = 0;
1263
1264         spin_lock(&l_mg->free_lock);
1265         line = pblk_line_get(pblk);
1266         if (!line) {
1267                 spin_unlock(&l_mg->free_lock);
1268                 return NULL;
1269         }
1270
1271         line->seq_nr = l_mg->d_seq_nr++;
1272         line->type = PBLK_LINETYPE_DATA;
1273         l_mg->data_line = line;
1274
1275         pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1276
1277         /* Allocate next line for preparation */
1278         l_mg->data_next = pblk_line_get(pblk);
1279         if (!l_mg->data_next) {
1280                 /* If we cannot get a new line, we need to stop the pipeline.
1281                  * Only allow as many writes in as we can store safely and then
1282                  * fail gracefully
1283                  */
1284                 pblk_set_space_limit(pblk);
1285
1286                 l_mg->data_next = NULL;
1287         } else {
1288                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1289                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1290                 is_next = 1;
1291         }
1292         spin_unlock(&l_mg->free_lock);
1293
1294         if (pblk_line_erase(pblk, line)) {
1295                 line = pblk_line_retry(pblk, line);
1296                 if (!line)
1297                         return NULL;
1298         }
1299
1300         pblk_rl_free_lines_dec(&pblk->rl, line);
1301         if (is_next)
1302                 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1303
1304 retry_setup:
1305         if (!pblk_line_init_metadata(pblk, line, NULL)) {
1306                 line = pblk_line_retry(pblk, line);
1307                 if (!line)
1308                         return NULL;
1309
1310                 goto retry_setup;
1311         }
1312
1313         if (!pblk_line_init_bb(pblk, line, 1)) {
1314                 line = pblk_line_retry(pblk, line);
1315                 if (!line)
1316                         return NULL;
1317
1318                 goto retry_setup;
1319         }
1320
1321         return line;
1322 }
1323
1324 static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1325 {
1326         lockdep_assert_held(&pblk->l_mg.free_lock);
1327
1328         pblk_set_space_limit(pblk);
1329         pblk->state = PBLK_STATE_STOPPING;
1330 }
1331
1332 void pblk_pipeline_stop(struct pblk *pblk)
1333 {
1334         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1335         int ret;
1336
1337         spin_lock(&l_mg->free_lock);
1338         if (pblk->state == PBLK_STATE_RECOVERING ||
1339                                         pblk->state == PBLK_STATE_STOPPED) {
1340                 spin_unlock(&l_mg->free_lock);
1341                 return;
1342         }
1343         pblk->state = PBLK_STATE_RECOVERING;
1344         spin_unlock(&l_mg->free_lock);
1345
1346         pblk_flush_writer(pblk);
1347         pblk_wait_for_meta(pblk);
1348
1349         ret = pblk_recov_pad(pblk);
1350         if (ret) {
1351                 pr_err("pblk: could not close data on teardown(%d)\n", ret);
1352                 return;
1353         }
1354
1355         flush_workqueue(pblk->bb_wq);
1356         pblk_line_close_meta_sync(pblk);
1357
1358         spin_lock(&l_mg->free_lock);
1359         pblk->state = PBLK_STATE_STOPPED;
1360         l_mg->data_line = NULL;
1361         l_mg->data_next = NULL;
1362         spin_unlock(&l_mg->free_lock);
1363 }
1364
1365 void pblk_line_replace_data(struct pblk *pblk)
1366 {
1367         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1368         struct pblk_line *cur, *new;
1369         unsigned int left_seblks;
1370         int is_next = 0;
1371
1372         cur = l_mg->data_line;
1373         new = l_mg->data_next;
1374         if (!new)
1375                 return;
1376         l_mg->data_line = new;
1377
1378         spin_lock(&l_mg->free_lock);
1379         if (pblk->state != PBLK_STATE_RUNNING) {
1380                 l_mg->data_line = NULL;
1381                 l_mg->data_next = NULL;
1382                 spin_unlock(&l_mg->free_lock);
1383                 return;
1384         }
1385
1386         pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1387         spin_unlock(&l_mg->free_lock);
1388
1389 retry_erase:
1390         left_seblks = atomic_read(&new->left_seblks);
1391         if (left_seblks) {
1392                 /* If line is not fully erased, erase it */
1393                 if (atomic_read(&new->left_eblks)) {
1394                         if (pblk_line_erase(pblk, new))
1395                                 return;
1396                 } else {
1397                         io_schedule();
1398                 }
1399                 goto retry_erase;
1400         }
1401
1402 retry_setup:
1403         if (!pblk_line_init_metadata(pblk, new, cur)) {
1404                 new = pblk_line_retry(pblk, new);
1405                 if (!new)
1406                         return;
1407
1408                 goto retry_setup;
1409         }
1410
1411         if (!pblk_line_init_bb(pblk, new, 1)) {
1412                 new = pblk_line_retry(pblk, new);
1413                 if (!new)
1414                         return;
1415
1416                 goto retry_setup;
1417         }
1418
1419         /* Allocate next line for preparation */
1420         spin_lock(&l_mg->free_lock);
1421         l_mg->data_next = pblk_line_get(pblk);
1422         if (!l_mg->data_next) {
1423                 /* If we cannot get a new line, we need to stop the pipeline.
1424                  * Only allow as many writes in as we can store safely and then
1425                  * fail gracefully
1426                  */
1427                 pblk_stop_writes(pblk, new);
1428                 l_mg->data_next = NULL;
1429         } else {
1430                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1431                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1432                 is_next = 1;
1433         }
1434         spin_unlock(&l_mg->free_lock);
1435
1436         if (is_next)
1437                 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1438 }
1439
1440 void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
1441 {
1442         if (line->map_bitmap)
1443                 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1444         if (line->invalid_bitmap)
1445                 mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
1446
1447         *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1448
1449         line->map_bitmap = NULL;
1450         line->invalid_bitmap = NULL;
1451         line->smeta = NULL;
1452         line->emeta = NULL;
1453 }
1454
1455 void pblk_line_put(struct kref *ref)
1456 {
1457         struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1458         struct pblk *pblk = line->pblk;
1459         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1460
1461         spin_lock(&line->lock);
1462         WARN_ON(line->state != PBLK_LINESTATE_GC);
1463         line->state = PBLK_LINESTATE_FREE;
1464         line->gc_group = PBLK_LINEGC_NONE;
1465         pblk_line_free(pblk, line);
1466         spin_unlock(&line->lock);
1467
1468         spin_lock(&l_mg->free_lock);
1469         list_add_tail(&line->list, &l_mg->free_list);
1470         l_mg->nr_free_lines++;
1471         spin_unlock(&l_mg->free_lock);
1472
1473         pblk_rl_free_lines_inc(&pblk->rl, line);
1474 }
1475
1476 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1477 {
1478         struct nvm_rq *rqd;
1479         int err;
1480
1481         rqd = mempool_alloc(pblk->g_rq_pool, GFP_KERNEL);
1482         memset(rqd, 0, pblk_g_rq_size);
1483
1484         pblk_setup_e_rq(pblk, rqd, ppa);
1485
1486         rqd->end_io = pblk_end_io_erase;
1487         rqd->private = pblk;
1488
1489         /* The write thread schedules erases so that it minimizes disturbances
1490          * with writes. Thus, there is no need to take the LUN semaphore.
1491          */
1492         err = pblk_submit_io(pblk, rqd);
1493         if (err) {
1494                 struct nvm_tgt_dev *dev = pblk->dev;
1495                 struct nvm_geo *geo = &dev->geo;
1496
1497                 pr_err("pblk: could not async erase line:%d,blk:%d\n",
1498                                         pblk_dev_ppa_to_line(ppa),
1499                                         pblk_dev_ppa_to_pos(geo, ppa));
1500         }
1501
1502         return err;
1503 }
1504
1505 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1506 {
1507         return pblk->l_mg.data_line;
1508 }
1509
1510 /* For now, always erase next line */
1511 struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1512 {
1513         return pblk->l_mg.data_next;
1514 }
1515
1516 int pblk_line_is_full(struct pblk_line *line)
1517 {
1518         return (line->left_msecs == 0);
1519 }
1520
1521 void pblk_line_close_meta_sync(struct pblk *pblk)
1522 {
1523         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1524         struct pblk_line_meta *lm = &pblk->lm;
1525         struct pblk_line *line, *tline;
1526         LIST_HEAD(list);
1527
1528         spin_lock(&l_mg->close_lock);
1529         if (list_empty(&l_mg->emeta_list)) {
1530                 spin_unlock(&l_mg->close_lock);
1531                 return;
1532         }
1533
1534         list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1535         spin_unlock(&l_mg->close_lock);
1536
1537         list_for_each_entry_safe(line, tline, &list, list) {
1538                 struct pblk_emeta *emeta = line->emeta;
1539
1540                 while (emeta->mem < lm->emeta_len[0]) {
1541                         int ret;
1542
1543                         ret = pblk_submit_meta_io(pblk, line);
1544                         if (ret) {
1545                                 pr_err("pblk: sync meta line %d failed (%d)\n",
1546                                                         line->id, ret);
1547                                 return;
1548                         }
1549                 }
1550         }
1551
1552         pblk_wait_for_meta(pblk);
1553         flush_workqueue(pblk->close_wq);
1554 }
1555
1556 static void pblk_line_should_sync_meta(struct pblk *pblk)
1557 {
1558         if (pblk_rl_is_limit(&pblk->rl))
1559                 pblk_line_close_meta_sync(pblk);
1560 }
1561
1562 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1563 {
1564         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1565         struct list_head *move_list;
1566
1567 #ifdef CONFIG_NVM_DEBUG
1568         struct pblk_line_meta *lm = &pblk->lm;
1569
1570         WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1571                                 "pblk: corrupt closed line %d\n", line->id);
1572 #endif
1573
1574         spin_lock(&l_mg->free_lock);
1575         WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1576         spin_unlock(&l_mg->free_lock);
1577
1578         spin_lock(&l_mg->gc_lock);
1579         spin_lock(&line->lock);
1580         WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1581         line->state = PBLK_LINESTATE_CLOSED;
1582         move_list = pblk_line_gc_list(pblk, line);
1583
1584         list_add_tail(&line->list, move_list);
1585
1586         mempool_free(line->map_bitmap, pblk->line_meta_pool);
1587         line->map_bitmap = NULL;
1588         line->smeta = NULL;
1589         line->emeta = NULL;
1590
1591         spin_unlock(&line->lock);
1592         spin_unlock(&l_mg->gc_lock);
1593
1594         pblk_gc_should_kick(pblk);
1595 }
1596
1597 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1598 {
1599         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1600         struct pblk_line_meta *lm = &pblk->lm;
1601         struct pblk_emeta *emeta = line->emeta;
1602         struct line_emeta *emeta_buf = emeta->buf;
1603
1604         /* No need for exact vsc value; avoid a big line lock and take aprox. */
1605         memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1606         memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1607
1608         emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1609         emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1610
1611         spin_lock(&l_mg->close_lock);
1612         spin_lock(&line->lock);
1613         list_add_tail(&line->list, &l_mg->emeta_list);
1614         spin_unlock(&line->lock);
1615         spin_unlock(&l_mg->close_lock);
1616
1617         pblk_line_should_sync_meta(pblk);
1618 }
1619
1620 void pblk_line_close_ws(struct work_struct *work)
1621 {
1622         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1623                                                                         ws);
1624         struct pblk *pblk = line_ws->pblk;
1625         struct pblk_line *line = line_ws->line;
1626
1627         pblk_line_close(pblk, line);
1628         mempool_free(line_ws, pblk->line_ws_pool);
1629 }
1630
1631 void pblk_line_mark_bb(struct work_struct *work)
1632 {
1633         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1634                                                                         ws);
1635         struct pblk *pblk = line_ws->pblk;
1636         struct nvm_tgt_dev *dev = pblk->dev;
1637         struct ppa_addr *ppa = line_ws->priv;
1638         int ret;
1639
1640         ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
1641         if (ret) {
1642                 struct pblk_line *line;
1643                 int pos;
1644
1645                 line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
1646                 pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
1647
1648                 pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
1649                                 line->id, pos);
1650         }
1651
1652         kfree(ppa);
1653         mempool_free(line_ws, pblk->line_ws_pool);
1654 }
1655
1656 void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1657                       void (*work)(struct work_struct *),
1658                       struct workqueue_struct *wq)
1659 {
1660         struct pblk_line_ws *line_ws;
1661
1662         line_ws = mempool_alloc(pblk->line_ws_pool, GFP_ATOMIC);
1663         if (!line_ws)
1664                 return;
1665
1666         line_ws->pblk = pblk;
1667         line_ws->line = line;
1668         line_ws->priv = priv;
1669
1670         INIT_WORK(&line_ws->ws, work);
1671         queue_work(wq, &line_ws->ws);
1672 }
1673
1674 static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
1675                              int nr_ppas, int pos)
1676 {
1677         struct pblk_lun *rlun = &pblk->luns[pos];
1678         int ret;
1679
1680         /*
1681          * Only send one inflight I/O per LUN. Since we map at a page
1682          * granurality, all ppas in the I/O will map to the same LUN
1683          */
1684 #ifdef CONFIG_NVM_DEBUG
1685         int i;
1686
1687         for (i = 1; i < nr_ppas; i++)
1688                 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
1689                                 ppa_list[0].g.ch != ppa_list[i].g.ch);
1690 #endif
1691
1692         ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
1693         if (ret) {
1694                 switch (ret) {
1695                 case -ETIME:
1696                         pr_err("pblk: lun semaphore timed out\n");
1697                         break;
1698                 case -EINTR:
1699                         pr_err("pblk: lun semaphore timed out\n");
1700                         break;
1701                 }
1702         }
1703 }
1704
1705 void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
1706 {
1707         struct nvm_tgt_dev *dev = pblk->dev;
1708         struct nvm_geo *geo = &dev->geo;
1709         int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1710
1711         __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
1712 }
1713
1714 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1715                   unsigned long *lun_bitmap)
1716 {
1717         struct nvm_tgt_dev *dev = pblk->dev;
1718         struct nvm_geo *geo = &dev->geo;
1719         int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1720
1721         /* If the LUN has been locked for this same request, do no attempt to
1722          * lock it again
1723          */
1724         if (test_and_set_bit(pos, lun_bitmap))
1725                 return;
1726
1727         __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
1728 }
1729
1730 void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
1731 {
1732         struct nvm_tgt_dev *dev = pblk->dev;
1733         struct nvm_geo *geo = &dev->geo;
1734         struct pblk_lun *rlun;
1735         int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1736
1737 #ifdef CONFIG_NVM_DEBUG
1738         int i;
1739
1740         for (i = 1; i < nr_ppas; i++)
1741                 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
1742                                 ppa_list[0].g.ch != ppa_list[i].g.ch);
1743 #endif
1744
1745         rlun = &pblk->luns[pos];
1746         up(&rlun->wr_sem);
1747 }
1748
1749 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1750                 unsigned long *lun_bitmap)
1751 {
1752         struct nvm_tgt_dev *dev = pblk->dev;
1753         struct nvm_geo *geo = &dev->geo;
1754         struct pblk_lun *rlun;
1755         int nr_luns = geo->nr_luns;
1756         int bit = -1;
1757
1758         while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
1759                 rlun = &pblk->luns[bit];
1760                 up(&rlun->wr_sem);
1761         }
1762
1763         kfree(lun_bitmap);
1764 }
1765
1766 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1767 {
1768         struct ppa_addr l2p_ppa;
1769
1770         /* logic error: lba out-of-bounds. Ignore update */
1771         if (!(lba < pblk->rl.nr_secs)) {
1772                 WARN(1, "pblk: corrupted L2P map request\n");
1773                 return;
1774         }
1775
1776         spin_lock(&pblk->trans_lock);
1777         l2p_ppa = pblk_trans_map_get(pblk, lba);
1778
1779         if (!pblk_addr_in_cache(l2p_ppa) && !pblk_ppa_empty(l2p_ppa))
1780                 pblk_map_invalidate(pblk, l2p_ppa);
1781
1782         pblk_trans_map_set(pblk, lba, ppa);
1783         spin_unlock(&pblk->trans_lock);
1784 }
1785
1786 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1787 {
1788 #ifdef CONFIG_NVM_DEBUG
1789         /* Callers must ensure that the ppa points to a cache address */
1790         BUG_ON(!pblk_addr_in_cache(ppa));
1791         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1792 #endif
1793
1794         pblk_update_map(pblk, lba, ppa);
1795 }
1796
1797 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1798                        struct pblk_line *gc_line)
1799 {
1800         struct ppa_addr l2p_ppa;
1801         int ret = 1;
1802
1803 #ifdef CONFIG_NVM_DEBUG
1804         /* Callers must ensure that the ppa points to a cache address */
1805         BUG_ON(!pblk_addr_in_cache(ppa));
1806         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1807 #endif
1808
1809         /* logic error: lba out-of-bounds. Ignore update */
1810         if (!(lba < pblk->rl.nr_secs)) {
1811                 WARN(1, "pblk: corrupted L2P map request\n");
1812                 return 0;
1813         }
1814
1815         spin_lock(&pblk->trans_lock);
1816         l2p_ppa = pblk_trans_map_get(pblk, lba);
1817
1818         /* Prevent updated entries to be overwritten by GC */
1819         if (pblk_addr_in_cache(l2p_ppa) || pblk_ppa_empty(l2p_ppa) ||
1820                                 pblk_tgt_ppa_to_line(l2p_ppa) != gc_line->id) {
1821                 ret = 0;
1822                 goto out;
1823         }
1824
1825         pblk_trans_map_set(pblk, lba, ppa);
1826 out:
1827         spin_unlock(&pblk->trans_lock);
1828         return ret;
1829 }
1830
1831 void pblk_update_map_dev(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1832                          struct ppa_addr entry_line)
1833 {
1834         struct ppa_addr l2p_line;
1835
1836 #ifdef CONFIG_NVM_DEBUG
1837         /* Callers must ensure that the ppa points to a device address */
1838         BUG_ON(pblk_addr_in_cache(ppa));
1839 #endif
1840         /* Invalidate and discard padded entries */
1841         if (lba == ADDR_EMPTY) {
1842 #ifdef CONFIG_NVM_DEBUG
1843                 atomic_long_inc(&pblk->padded_wb);
1844 #endif
1845                 pblk_map_invalidate(pblk, ppa);
1846                 return;
1847         }
1848
1849         /* logic error: lba out-of-bounds. Ignore update */
1850         if (!(lba < pblk->rl.nr_secs)) {
1851                 WARN(1, "pblk: corrupted L2P map request\n");
1852                 return;
1853         }
1854
1855         spin_lock(&pblk->trans_lock);
1856         l2p_line = pblk_trans_map_get(pblk, lba);
1857
1858         /* Do not update L2P if the cacheline has been updated. In this case,
1859          * the mapped ppa must be invalidated
1860          */
1861         if (l2p_line.ppa != entry_line.ppa) {
1862                 if (!pblk_ppa_empty(ppa))
1863                         pblk_map_invalidate(pblk, ppa);
1864                 goto out;
1865         }
1866
1867 #ifdef CONFIG_NVM_DEBUG
1868         WARN_ON(!pblk_addr_in_cache(l2p_line) && !pblk_ppa_empty(l2p_line));
1869 #endif
1870
1871         pblk_trans_map_set(pblk, lba, ppa);
1872 out:
1873         spin_unlock(&pblk->trans_lock);
1874 }
1875
1876 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
1877                          sector_t blba, int nr_secs)
1878 {
1879         int i;
1880
1881         spin_lock(&pblk->trans_lock);
1882         for (i = 0; i < nr_secs; i++)
1883                 ppas[i] = pblk_trans_map_get(pblk, blba + i);
1884         spin_unlock(&pblk->trans_lock);
1885 }
1886
1887 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
1888                           u64 *lba_list, int nr_secs)
1889 {
1890         sector_t lba;
1891         int i;
1892
1893         spin_lock(&pblk->trans_lock);
1894         for (i = 0; i < nr_secs; i++) {
1895                 lba = lba_list[i];
1896                 if (lba == ADDR_EMPTY) {
1897                         ppas[i].ppa = ADDR_EMPTY;
1898                 } else {
1899                         /* logic error: lba out-of-bounds. Ignore update */
1900                         if (!(lba < pblk->rl.nr_secs)) {
1901                                 WARN(1, "pblk: corrupted L2P map request\n");
1902                                 continue;
1903                         }
1904                         ppas[i] = pblk_trans_map_get(pblk, lba);
1905                 }
1906         }
1907         spin_unlock(&pblk->trans_lock);
1908 }