GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / lightnvm / pblk-write.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-write.c - pblk's write path from write buffer to media
16  */
17
18 #include "pblk.h"
19
20 static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
21                                     struct pblk_c_ctx *c_ctx)
22 {
23         struct bio *original_bio;
24         struct pblk_rb *rwb = &pblk->rwb;
25         unsigned long ret;
26         int i;
27
28         for (i = 0; i < c_ctx->nr_valid; i++) {
29                 struct pblk_w_ctx *w_ctx;
30                 int pos = c_ctx->sentry + i;
31                 int flags;
32
33                 w_ctx = pblk_rb_w_ctx(rwb, pos);
34                 flags = READ_ONCE(w_ctx->flags);
35
36                 if (flags & PBLK_FLUSH_ENTRY) {
37                         flags &= ~PBLK_FLUSH_ENTRY;
38                         /* Release flags on context. Protect from writes */
39                         smp_store_release(&w_ctx->flags, flags);
40
41 #ifdef CONFIG_NVM_PBLK_DEBUG
42                         atomic_dec(&rwb->inflight_flush_point);
43 #endif
44                 }
45
46                 while ((original_bio = bio_list_pop(&w_ctx->bios)))
47                         bio_endio(original_bio);
48         }
49
50         if (c_ctx->nr_padded)
51                 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
52                                                         c_ctx->nr_padded);
53
54 #ifdef CONFIG_NVM_PBLK_DEBUG
55         atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
56 #endif
57
58         ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
59
60         bio_put(rqd->bio);
61         pblk_free_rqd(pblk, rqd, PBLK_WRITE);
62
63         return ret;
64 }
65
66 static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
67                                            struct nvm_rq *rqd,
68                                            struct pblk_c_ctx *c_ctx)
69 {
70         list_del(&c_ctx->list);
71         return pblk_end_w_bio(pblk, rqd, c_ctx);
72 }
73
74 static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
75                                 struct pblk_c_ctx *c_ctx)
76 {
77         struct pblk_c_ctx *c, *r;
78         unsigned long flags;
79         unsigned long pos;
80
81 #ifdef CONFIG_NVM_PBLK_DEBUG
82         atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
83 #endif
84
85         pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
86
87         pos = pblk_rb_sync_init(&pblk->rwb, &flags);
88         if (pos == c_ctx->sentry) {
89                 pos = pblk_end_w_bio(pblk, rqd, c_ctx);
90
91 retry:
92                 list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
93                         rqd = nvm_rq_from_c_ctx(c);
94                         if (c->sentry == pos) {
95                                 pos = pblk_end_queued_w_bio(pblk, rqd, c);
96                                 goto retry;
97                         }
98                 }
99         } else {
100                 WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
101                 list_add_tail(&c_ctx->list, &pblk->compl_list);
102         }
103         pblk_rb_sync_end(&pblk->rwb, &flags);
104 }
105
106 /* Map remaining sectors in chunk, starting from ppa */
107 static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa)
108 {
109         struct nvm_tgt_dev *dev = pblk->dev;
110         struct nvm_geo *geo = &dev->geo;
111         struct pblk_line *line;
112         struct ppa_addr map_ppa = *ppa;
113         u64 paddr;
114         int done = 0;
115
116         line = &pblk->lines[pblk_ppa_to_line(*ppa)];
117         spin_lock(&line->lock);
118
119         while (!done)  {
120                 paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
121
122                 if (!test_and_set_bit(paddr, line->map_bitmap))
123                         line->left_msecs--;
124
125                 if (!test_and_set_bit(paddr, line->invalid_bitmap))
126                         le32_add_cpu(line->vsc, -1);
127
128                 if (geo->version == NVM_OCSSD_SPEC_12) {
129                         map_ppa.ppa++;
130                         if (map_ppa.g.pg == geo->num_pg)
131                                 done = 1;
132                 } else {
133                         map_ppa.m.sec++;
134                         if (map_ppa.m.sec == geo->clba)
135                                 done = 1;
136                 }
137         }
138
139         line->w_err_gc->has_write_err = 1;
140         spin_unlock(&line->lock);
141 }
142
143 static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
144                                   unsigned int nr_entries)
145 {
146         struct pblk_rb *rb = &pblk->rwb;
147         struct pblk_rb_entry *entry;
148         struct pblk_line *line;
149         struct pblk_w_ctx *w_ctx;
150         struct ppa_addr ppa_l2p;
151         int flags;
152         unsigned int pos, i;
153
154         spin_lock(&pblk->trans_lock);
155         pos = sentry;
156         for (i = 0; i < nr_entries; i++) {
157                 entry = &rb->entries[pos];
158                 w_ctx = &entry->w_ctx;
159
160                 /* Check if the lba has been overwritten */
161                 if (w_ctx->lba != ADDR_EMPTY) {
162                         ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
163                         if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
164                                 w_ctx->lba = ADDR_EMPTY;
165                 }
166
167                 /* Mark up the entry as submittable again */
168                 flags = READ_ONCE(w_ctx->flags);
169                 flags |= PBLK_WRITTEN_DATA;
170                 /* Release flags on write context. Protect from writes */
171                 smp_store_release(&w_ctx->flags, flags);
172
173                 /* Decrese the reference count to the line as we will
174                  * re-map these entries
175                  */
176                 line = &pblk->lines[pblk_ppa_to_line(w_ctx->ppa)];
177                 kref_put(&line->ref, pblk_line_put);
178
179                 pos = (pos + 1) & (rb->nr_entries - 1);
180         }
181         spin_unlock(&pblk->trans_lock);
182 }
183
184 static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
185 {
186         struct pblk_c_ctx *r_ctx;
187
188         r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL);
189         if (!r_ctx)
190                 return;
191
192         r_ctx->lun_bitmap = NULL;
193         r_ctx->sentry = c_ctx->sentry;
194         r_ctx->nr_valid = c_ctx->nr_valid;
195         r_ctx->nr_padded = c_ctx->nr_padded;
196
197         spin_lock(&pblk->resubmit_lock);
198         list_add_tail(&r_ctx->list, &pblk->resubmit_list);
199         spin_unlock(&pblk->resubmit_lock);
200
201 #ifdef CONFIG_NVM_PBLK_DEBUG
202         atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
203 #endif
204 }
205
206 static void pblk_submit_rec(struct work_struct *work)
207 {
208         struct pblk_rec_ctx *recovery =
209                         container_of(work, struct pblk_rec_ctx, ws_rec);
210         struct pblk *pblk = recovery->pblk;
211         struct nvm_rq *rqd = recovery->rqd;
212         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
213         struct ppa_addr *ppa_list;
214
215         pblk_log_write_err(pblk, rqd);
216
217         if (rqd->nr_ppas == 1)
218                 ppa_list = &rqd->ppa_addr;
219         else
220                 ppa_list = rqd->ppa_list;
221
222         pblk_map_remaining(pblk, ppa_list);
223         pblk_queue_resubmit(pblk, c_ctx);
224
225         pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
226         if (c_ctx->nr_padded)
227                 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
228                                                         c_ctx->nr_padded);
229         bio_put(rqd->bio);
230         pblk_free_rqd(pblk, rqd, PBLK_WRITE);
231         mempool_free(recovery, &pblk->rec_pool);
232
233         atomic_dec(&pblk->inflight_io);
234 }
235
236
237 static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
238 {
239         struct pblk_rec_ctx *recovery;
240
241         recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
242         if (!recovery) {
243                 pblk_err(pblk, "could not allocate recovery work\n");
244                 return;
245         }
246
247         recovery->pblk = pblk;
248         recovery->rqd = rqd;
249
250         INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
251         queue_work(pblk->close_wq, &recovery->ws_rec);
252 }
253
254 static void pblk_end_io_write(struct nvm_rq *rqd)
255 {
256         struct pblk *pblk = rqd->private;
257         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
258
259         if (rqd->error) {
260                 pblk_end_w_fail(pblk, rqd);
261                 return;
262         }
263 #ifdef CONFIG_NVM_PBLK_DEBUG
264         else
265                 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
266 #endif
267
268         pblk_complete_write(pblk, rqd, c_ctx);
269         atomic_dec(&pblk->inflight_io);
270 }
271
272 static void pblk_end_io_write_meta(struct nvm_rq *rqd)
273 {
274         struct pblk *pblk = rqd->private;
275         struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
276         struct pblk_line *line = m_ctx->private;
277         struct pblk_emeta *emeta = line->emeta;
278         int sync;
279
280         pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
281
282         if (rqd->error) {
283                 pblk_log_write_err(pblk, rqd);
284                 pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
285                 line->w_err_gc->has_write_err = 1;
286         }
287
288         sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
289         if (sync == emeta->nr_entries)
290                 pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
291                                                 GFP_ATOMIC, pblk->close_wq);
292
293         pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
294
295         atomic_dec(&pblk->inflight_io);
296 }
297
298 static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
299                            unsigned int nr_secs,
300                            nvm_end_io_fn(*end_io))
301 {
302         struct nvm_tgt_dev *dev = pblk->dev;
303
304         /* Setup write request */
305         rqd->opcode = NVM_OP_PWRITE;
306         rqd->nr_ppas = nr_secs;
307         rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
308         rqd->private = pblk;
309         rqd->end_io = end_io;
310
311         rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
312                                                         &rqd->dma_meta_list);
313         if (!rqd->meta_list)
314                 return -ENOMEM;
315
316         rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
317         rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
318
319         return 0;
320 }
321
322 static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
323                            struct ppa_addr *erase_ppa)
324 {
325         struct pblk_line_meta *lm = &pblk->lm;
326         struct pblk_line *e_line = pblk_line_get_erase(pblk);
327         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
328         unsigned int valid = c_ctx->nr_valid;
329         unsigned int padded = c_ctx->nr_padded;
330         unsigned int nr_secs = valid + padded;
331         unsigned long *lun_bitmap;
332         int ret;
333
334         lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
335         if (!lun_bitmap)
336                 return -ENOMEM;
337         c_ctx->lun_bitmap = lun_bitmap;
338
339         ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
340         if (ret) {
341                 kfree(lun_bitmap);
342                 return ret;
343         }
344
345         if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
346                 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
347         else
348                 pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
349                                                         valid, erase_ppa);
350
351         return 0;
352 }
353
354 static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
355                                   unsigned int secs_to_flush)
356 {
357         int secs_to_sync;
358
359         secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
360
361 #ifdef CONFIG_NVM_PBLK_DEBUG
362         if ((!secs_to_sync && secs_to_flush)
363                         || (secs_to_sync < 0)
364                         || (secs_to_sync > secs_avail && !secs_to_flush)) {
365                 pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n",
366                                 secs_avail, secs_to_sync, secs_to_flush);
367         }
368 #endif
369
370         return secs_to_sync;
371 }
372
373 int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
374 {
375         struct nvm_tgt_dev *dev = pblk->dev;
376         struct nvm_geo *geo = &dev->geo;
377         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
378         struct pblk_line_meta *lm = &pblk->lm;
379         struct pblk_emeta *emeta = meta_line->emeta;
380         struct pblk_g_ctx *m_ctx;
381         struct bio *bio;
382         struct nvm_rq *rqd;
383         void *data;
384         u64 paddr;
385         int rq_ppas = pblk->min_write_pgs;
386         int id = meta_line->id;
387         int rq_len;
388         int i, j;
389         int ret;
390
391         rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
392
393         m_ctx = nvm_rq_to_pdu(rqd);
394         m_ctx->private = meta_line;
395
396         rq_len = rq_ppas * geo->csecs;
397         data = ((void *)emeta->buf) + emeta->mem;
398
399         bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
400                                         l_mg->emeta_alloc_type, GFP_KERNEL);
401         if (IS_ERR(bio)) {
402                 pblk_err(pblk, "failed to map emeta io");
403                 ret = PTR_ERR(bio);
404                 goto fail_free_rqd;
405         }
406         bio->bi_iter.bi_sector = 0; /* internal bio */
407         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
408         rqd->bio = bio;
409
410         ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
411         if (ret)
412                 goto fail_free_bio;
413
414         for (i = 0; i < rqd->nr_ppas; ) {
415                 spin_lock(&meta_line->lock);
416                 paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
417                 spin_unlock(&meta_line->lock);
418                 for (j = 0; j < rq_ppas; j++, i++, paddr++)
419                         rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
420         }
421
422         spin_lock(&l_mg->close_lock);
423         emeta->mem += rq_len;
424         if (emeta->mem >= lm->emeta_len[0])
425                 list_del(&meta_line->list);
426         spin_unlock(&l_mg->close_lock);
427
428         pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
429
430         ret = pblk_submit_io(pblk, rqd);
431         if (ret) {
432                 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
433                 goto fail_rollback;
434         }
435
436         return NVM_IO_OK;
437
438 fail_rollback:
439         pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
440         spin_lock(&l_mg->close_lock);
441         pblk_dealloc_page(pblk, meta_line, rq_ppas);
442         list_add(&meta_line->list, &meta_line->list);
443         spin_unlock(&l_mg->close_lock);
444 fail_free_bio:
445         bio_put(bio);
446 fail_free_rqd:
447         pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
448         return ret;
449 }
450
451 static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
452                                        struct pblk_line *meta_line,
453                                        struct nvm_rq *data_rqd)
454 {
455         struct nvm_tgt_dev *dev = pblk->dev;
456         struct nvm_geo *geo = &dev->geo;
457         struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
458         struct pblk_line *data_line = pblk_line_get_data(pblk);
459         struct ppa_addr ppa, ppa_opt;
460         u64 paddr;
461         int pos_opt;
462
463         /* Schedule a metadata I/O that is half the distance from the data I/O
464          * with regards to the number of LUNs forming the pblk instance. This
465          * balances LUN conflicts across every I/O.
466          *
467          * When the LUN configuration changes (e.g., due to GC), this distance
468          * can align, which would result on metadata and data I/Os colliding. In
469          * this case, modify the distance to not be optimal, but move the
470          * optimal in the right direction.
471          */
472         paddr = pblk_lookup_page(pblk, meta_line);
473         ppa = addr_to_gen_ppa(pblk, paddr, 0);
474         ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
475         pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
476
477         if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
478                                 test_bit(pos_opt, data_line->blk_bitmap))
479                 return true;
480
481         if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
482                 data_line->meta_distance--;
483
484         return false;
485 }
486
487 static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
488                                                     struct nvm_rq *data_rqd)
489 {
490         struct pblk_line_meta *lm = &pblk->lm;
491         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
492         struct pblk_line *meta_line;
493
494         spin_lock(&l_mg->close_lock);
495         if (list_empty(&l_mg->emeta_list)) {
496                 spin_unlock(&l_mg->close_lock);
497                 return NULL;
498         }
499         meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
500         if (meta_line->emeta->mem >= lm->emeta_len[0]) {
501                 spin_unlock(&l_mg->close_lock);
502                 return NULL;
503         }
504         spin_unlock(&l_mg->close_lock);
505
506         if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
507                 return NULL;
508
509         return meta_line;
510 }
511
512 static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
513 {
514         struct ppa_addr erase_ppa;
515         struct pblk_line *meta_line;
516         int err;
517
518         pblk_ppa_set_empty(&erase_ppa);
519
520         /* Assign lbas to ppas and populate request structure */
521         err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
522         if (err) {
523                 pblk_err(pblk, "could not setup write request: %d\n", err);
524                 return NVM_IO_ERR;
525         }
526
527         meta_line = pblk_should_submit_meta_io(pblk, rqd);
528
529         /* Submit data write for current data line */
530         err = pblk_submit_io(pblk, rqd);
531         if (err) {
532                 pblk_err(pblk, "data I/O submission failed: %d\n", err);
533                 return NVM_IO_ERR;
534         }
535
536         if (!pblk_ppa_empty(erase_ppa)) {
537                 /* Submit erase for next data line */
538                 if (pblk_blk_erase_async(pblk, erase_ppa)) {
539                         struct pblk_line *e_line = pblk_line_get_erase(pblk);
540                         struct nvm_tgt_dev *dev = pblk->dev;
541                         struct nvm_geo *geo = &dev->geo;
542                         int bit;
543
544                         atomic_inc(&e_line->left_eblks);
545                         bit = pblk_ppa_to_pos(geo, erase_ppa);
546                         WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
547                 }
548         }
549
550         if (meta_line) {
551                 /* Submit metadata write for previous data line */
552                 err = pblk_submit_meta_io(pblk, meta_line);
553                 if (err) {
554                         pblk_err(pblk, "metadata I/O submission failed: %d",
555                                         err);
556                         return NVM_IO_ERR;
557                 }
558         }
559
560         return NVM_IO_OK;
561 }
562
563 static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
564 {
565         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
566         struct bio *bio = rqd->bio;
567
568         if (c_ctx->nr_padded)
569                 pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
570                                                         c_ctx->nr_padded);
571 }
572
573 static int pblk_submit_write(struct pblk *pblk)
574 {
575         struct bio *bio;
576         struct nvm_rq *rqd;
577         unsigned int secs_avail, secs_to_sync, secs_to_com;
578         unsigned int secs_to_flush;
579         unsigned long pos;
580         unsigned int resubmit;
581
582         spin_lock(&pblk->resubmit_lock);
583         resubmit = !list_empty(&pblk->resubmit_list);
584         spin_unlock(&pblk->resubmit_lock);
585
586         /* Resubmit failed writes first */
587         if (resubmit) {
588                 struct pblk_c_ctx *r_ctx;
589
590                 spin_lock(&pblk->resubmit_lock);
591                 r_ctx = list_first_entry(&pblk->resubmit_list,
592                                         struct pblk_c_ctx, list);
593                 list_del(&r_ctx->list);
594                 spin_unlock(&pblk->resubmit_lock);
595
596                 secs_avail = r_ctx->nr_valid;
597                 pos = r_ctx->sentry;
598
599                 pblk_prepare_resubmit(pblk, pos, secs_avail);
600                 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
601                                 secs_avail);
602
603                 kfree(r_ctx);
604         } else {
605                 /* If there are no sectors in the cache,
606                  * flushes (bios without data) will be cleared on
607                  * the cache threads
608                  */
609                 secs_avail = pblk_rb_read_count(&pblk->rwb);
610                 if (!secs_avail)
611                         return 1;
612
613                 secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
614                 if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
615                         return 1;
616
617                 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
618                                         secs_to_flush);
619                 if (secs_to_sync > pblk->max_write_pgs) {
620                         pblk_err(pblk, "bad buffer sync calculation\n");
621                         return 1;
622                 }
623
624                 secs_to_com = (secs_to_sync > secs_avail) ?
625                         secs_avail : secs_to_sync;
626                 pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
627         }
628
629         bio = bio_alloc(GFP_KERNEL, secs_to_sync);
630
631         bio->bi_iter.bi_sector = 0; /* internal bio */
632         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
633
634         rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
635         rqd->bio = bio;
636
637         if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
638                                                                 secs_avail)) {
639                 pblk_err(pblk, "corrupted write bio\n");
640                 goto fail_put_bio;
641         }
642
643         if (pblk_submit_io_set(pblk, rqd))
644                 goto fail_free_bio;
645
646 #ifdef CONFIG_NVM_PBLK_DEBUG
647         atomic_long_add(secs_to_sync, &pblk->sub_writes);
648 #endif
649
650         return 0;
651
652 fail_free_bio:
653         pblk_free_write_rqd(pblk, rqd);
654 fail_put_bio:
655         bio_put(bio);
656         pblk_free_rqd(pblk, rqd, PBLK_WRITE);
657
658         return 1;
659 }
660
661 int pblk_write_ts(void *data)
662 {
663         struct pblk *pblk = data;
664
665         while (!kthread_should_stop()) {
666                 if (!pblk_submit_write(pblk))
667                         continue;
668                 set_current_state(TASK_INTERRUPTIBLE);
669                 io_schedule();
670         }
671
672         return 0;
673 }