GNU Linux-libre 4.19.264-gnu1
[releases.git] / drivers / staging / erofs / unzip_vle.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/drivers/staging/erofs/unzip_vle.c
4  *
5  * Copyright (C) 2018 HUAWEI, Inc.
6  *             http://www.huawei.com/
7  * Created by Gao Xiang <gaoxiang25@huawei.com>
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file COPYING in the main directory of the Linux
11  * distribution for more details.
12  */
13 #include "unzip_vle.h"
14 #include <linux/prefetch.h>
15
16 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
17 static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
18
19 void z_erofs_exit_zip_subsystem(void)
20 {
21         destroy_workqueue(z_erofs_workqueue);
22         kmem_cache_destroy(z_erofs_workgroup_cachep);
23 }
24
25 static inline int init_unzip_workqueue(void)
26 {
27         const unsigned onlinecpus = num_possible_cpus();
28
29         /*
30          * we don't need too many threads, limiting threads
31          * could improve scheduling performance.
32          */
33         z_erofs_workqueue = alloc_workqueue("erofs_unzipd",
34                 WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
35                 onlinecpus + onlinecpus / 4);
36
37         return z_erofs_workqueue != NULL ? 0 : -ENOMEM;
38 }
39
40 int z_erofs_init_zip_subsystem(void)
41 {
42         z_erofs_workgroup_cachep =
43                 kmem_cache_create("erofs_compress",
44                 Z_EROFS_WORKGROUP_SIZE, 0,
45                 SLAB_RECLAIM_ACCOUNT, NULL);
46
47         if (z_erofs_workgroup_cachep != NULL) {
48                 if (!init_unzip_workqueue())
49                         return 0;
50
51                 kmem_cache_destroy(z_erofs_workgroup_cachep);
52         }
53         return -ENOMEM;
54 }
55
56 enum z_erofs_vle_work_role {
57         Z_EROFS_VLE_WORK_SECONDARY,
58         Z_EROFS_VLE_WORK_PRIMARY,
59         /*
60          * The current work was the tail of an exist chain, and the previous
61          * processed chained works are all decided to be hooked up to it.
62          * A new chain should be created for the remaining unprocessed works,
63          * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
64          * the next work cannot reuse the whole page in the following scenario:
65          *  ________________________________________________________________
66          * |      tail (partial) page     |       head (partial) page       |
67          * |  (belongs to the next work)  |  (belongs to the current work)  |
68          * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
69          */
70         Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
71         /*
72          * The current work has been linked with the processed chained works,
73          * and could be also linked with the potential remaining works, which
74          * means if the processing page is the tail partial page of the work,
75          * the current work can safely use the whole page (since the next work
76          * is under control) for in-place decompression, as illustrated below:
77          *  ________________________________________________________________
78          * |  tail (partial) page  |          head (partial) page           |
79          * | (of the current work) |         (of the previous work)         |
80          * |  PRIMARY_FOLLOWED or  |                                        |
81          * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
82          *
83          * [  (*) the above page can be used for the current work itself.  ]
84          */
85         Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
86         Z_EROFS_VLE_WORK_MAX
87 };
88
89 struct z_erofs_vle_work_builder {
90         enum z_erofs_vle_work_role role;
91         /*
92          * 'hosted = false' means that the current workgroup doesn't belong to
93          * the owned chained workgroups. In the other words, it is none of our
94          * business to submit this workgroup.
95          */
96         bool hosted;
97
98         struct z_erofs_vle_workgroup *grp;
99         struct z_erofs_vle_work *work;
100         struct z_erofs_pagevec_ctor vector;
101
102         /* pages used for reading the compressed data */
103         struct page **compressed_pages;
104         unsigned compressed_deficit;
105 };
106
107 #define VLE_WORK_BUILDER_INIT() \
108         { .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
109
110 #ifdef EROFS_FS_HAS_MANAGED_CACHE
111
112 static bool grab_managed_cache_pages(struct address_space *mapping,
113                                      erofs_blk_t start,
114                                      struct page **compressed_pages,
115                                      int clusterblks,
116                                      bool reserve_allocation)
117 {
118         bool noio = true;
119         unsigned int i;
120
121         /* TODO: optimize by introducing find_get_pages_range */
122         for (i = 0; i < clusterblks; ++i) {
123                 struct page *page, *found;
124
125                 if (READ_ONCE(compressed_pages[i]) != NULL)
126                         continue;
127
128                 page = found = find_get_page(mapping, start + i);
129                 if (found == NULL) {
130                         noio = false;
131                         if (!reserve_allocation)
132                                 continue;
133                         page = EROFS_UNALLOCATED_CACHED_PAGE;
134                 }
135
136                 if (NULL == cmpxchg(compressed_pages + i, NULL, page))
137                         continue;
138
139                 if (found != NULL)
140                         put_page(found);
141         }
142         return noio;
143 }
144
145 /* called by erofs_shrinker to get rid of all compressed_pages */
146 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
147                                        struct erofs_workgroup *egrp)
148 {
149         struct z_erofs_vle_workgroup *const grp =
150                 container_of(egrp, struct z_erofs_vle_workgroup, obj);
151         struct address_space *const mapping = sbi->managed_cache->i_mapping;
152         const int clusterpages = erofs_clusterpages(sbi);
153         int i;
154
155         /*
156          * refcount of workgroup is now freezed as 1,
157          * therefore no need to worry about available decompression users.
158          */
159         for (i = 0; i < clusterpages; ++i) {
160                 struct page *page = grp->compressed_pages[i];
161
162                 if (page == NULL || page->mapping != mapping)
163                         continue;
164
165                 /* block other users from reclaiming or migrating the page */
166                 if (!trylock_page(page))
167                         return -EBUSY;
168
169                 /* barrier is implied in the following 'unlock_page' */
170                 WRITE_ONCE(grp->compressed_pages[i], NULL);
171
172                 set_page_private(page, 0);
173                 ClearPagePrivate(page);
174
175                 unlock_page(page);
176                 put_page(page);
177         }
178         return 0;
179 }
180
181 int erofs_try_to_free_cached_page(struct address_space *mapping,
182                                   struct page *page)
183 {
184         struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb);
185         const unsigned int clusterpages = erofs_clusterpages(sbi);
186
187         struct z_erofs_vle_workgroup *grp;
188         int ret = 0;    /* 0 - busy */
189
190         /* prevent the workgroup from being freed */
191         rcu_read_lock();
192         grp = (void *)page_private(page);
193
194         if (erofs_workgroup_try_to_freeze(&grp->obj, 1)) {
195                 unsigned int i;
196
197                 for (i = 0; i < clusterpages; ++i) {
198                         if (grp->compressed_pages[i] == page) {
199                                 WRITE_ONCE(grp->compressed_pages[i], NULL);
200                                 ret = 1;
201                                 break;
202                         }
203                 }
204                 erofs_workgroup_unfreeze(&grp->obj, 1);
205         }
206         rcu_read_unlock();
207
208         if (ret) {
209                 ClearPagePrivate(page);
210                 put_page(page);
211         }
212         return ret;
213 }
214 #endif
215
216 /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
217 static inline bool try_to_reuse_as_compressed_page(
218         struct z_erofs_vle_work_builder *b,
219         struct page *page)
220 {
221         while (b->compressed_deficit) {
222                 --b->compressed_deficit;
223                 if (NULL == cmpxchg(b->compressed_pages++, NULL, page))
224                         return true;
225         }
226
227         return false;
228 }
229
230 /* callers must be with work->lock held */
231 static int z_erofs_vle_work_add_page(struct z_erofs_vle_work_builder *builder,
232                                      struct page *page,
233                                      enum z_erofs_page_type type,
234                                      bool pvec_safereuse)
235 {
236         int ret;
237
238         /* give priority for the compressed data storage */
239         if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY &&
240                 type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
241                 try_to_reuse_as_compressed_page(builder, page))
242                 return 0;
243
244         ret = z_erofs_pagevec_ctor_enqueue(&builder->vector, page, type,
245                                            pvec_safereuse);
246         builder->work->vcnt += (unsigned)ret;
247         return ret ? 0 : -EAGAIN;
248 }
249
250 static enum z_erofs_vle_work_role
251 try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
252                        z_erofs_vle_owned_workgrp_t *owned_head,
253                        bool *hosted)
254 {
255         DBG_BUGON(*hosted == true);
256
257         /* let's claim these following types of workgroup */
258 retry:
259         if (grp->next == Z_EROFS_VLE_WORKGRP_NIL) {
260                 /* type 1, nil workgroup */
261                 if (Z_EROFS_VLE_WORKGRP_NIL != cmpxchg(&grp->next,
262                         Z_EROFS_VLE_WORKGRP_NIL, *owned_head))
263                         goto retry;
264
265                 *owned_head = grp;
266                 *hosted = true;
267                 /* lucky, I am the followee :) */
268                 return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
269
270         } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
271                 /*
272                  * type 2, link to the end of a existing open chain,
273                  * be careful that its submission itself is governed
274                  * by the original owned chain.
275                  */
276                 if (Z_EROFS_VLE_WORKGRP_TAIL != cmpxchg(&grp->next,
277                         Z_EROFS_VLE_WORKGRP_TAIL, *owned_head))
278                         goto retry;
279                 *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
280                 return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
281         }
282
283         return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
284 }
285
286 static struct z_erofs_vle_work *
287 z_erofs_vle_work_lookup(struct super_block *sb,
288                         pgoff_t idx, unsigned pageofs,
289                         struct z_erofs_vle_workgroup **grp_ret,
290                         enum z_erofs_vle_work_role *role,
291                         z_erofs_vle_owned_workgrp_t *owned_head,
292                         bool *hosted)
293 {
294         bool tag, primary;
295         struct erofs_workgroup *egrp;
296         struct z_erofs_vle_workgroup *grp;
297         struct z_erofs_vle_work *work;
298
299         egrp = erofs_find_workgroup(sb, idx, &tag);
300         if (egrp == NULL) {
301                 *grp_ret = NULL;
302                 return NULL;
303         }
304
305         *grp_ret = grp = container_of(egrp,
306                 struct z_erofs_vle_workgroup, obj);
307
308         work = z_erofs_vle_grab_work(grp, pageofs);
309         /* if multiref is disabled, `primary' is always true */
310         primary = true;
311
312         if (work->pageofs != pageofs) {
313                 DBG_BUGON(1);
314                 erofs_workgroup_put(egrp);
315                 return ERR_PTR(-EIO);
316         }
317
318         /*
319          * lock must be taken first to avoid grp->next == NIL between
320          * claiming workgroup and adding pages:
321          *                        grp->next != NIL
322          *   grp->next = NIL
323          *   mutex_unlock_all
324          *                        mutex_lock(&work->lock)
325          *                        add all pages to pagevec
326          *
327          * [correct locking case 1]:
328          *   mutex_lock(grp->work[a])
329          *   ...
330          *   mutex_lock(grp->work[b])     mutex_lock(grp->work[c])
331          *   ...                          *role = SECONDARY
332          *                                add all pages to pagevec
333          *                                ...
334          *                                mutex_unlock(grp->work[c])
335          *   mutex_lock(grp->work[c])
336          *   ...
337          *   grp->next = NIL
338          *   mutex_unlock_all
339          *
340          * [correct locking case 2]:
341          *   mutex_lock(grp->work[b])
342          *   ...
343          *   mutex_lock(grp->work[a])
344          *   ...
345          *   mutex_lock(grp->work[c])
346          *   ...
347          *   grp->next = NIL
348          *   mutex_unlock_all
349          *                                mutex_lock(grp->work[a])
350          *                                *role = PRIMARY_OWNER
351          *                                add all pages to pagevec
352          *                                ...
353          */
354         mutex_lock(&work->lock);
355
356         *hosted = false;
357         if (!primary)
358                 *role = Z_EROFS_VLE_WORK_SECONDARY;
359         else    /* claim the workgroup if possible */
360                 *role = try_to_claim_workgroup(grp, owned_head, hosted);
361         return work;
362 }
363
364 static struct z_erofs_vle_work *
365 z_erofs_vle_work_register(struct super_block *sb,
366                           struct z_erofs_vle_workgroup **grp_ret,
367                           struct erofs_map_blocks *map,
368                           pgoff_t index, unsigned pageofs,
369                           enum z_erofs_vle_work_role *role,
370                           z_erofs_vle_owned_workgrp_t *owned_head,
371                           bool *hosted)
372 {
373         bool newgrp = false;
374         struct z_erofs_vle_workgroup *grp = *grp_ret;
375         struct z_erofs_vle_work *work;
376
377         /* if multiref is disabled, grp should never be nullptr */
378         if (unlikely(grp)) {
379                 DBG_BUGON(1);
380                 return ERR_PTR(-EINVAL);
381         }
382
383         /* no available workgroup, let's allocate one */
384         grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
385         if (unlikely(grp == NULL))
386                 return ERR_PTR(-ENOMEM);
387
388         grp->obj.index = index;
389         grp->llen = map->m_llen;
390
391         z_erofs_vle_set_workgrp_fmt(grp,
392                 (map->m_flags & EROFS_MAP_ZIPPED) ?
393                         Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
394                         Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
395         atomic_set(&grp->obj.refcount, 1);
396
397         /* new workgrps have been claimed as type 1 */
398         WRITE_ONCE(grp->next, *owned_head);
399         /* primary and followed work for all new workgrps */
400         *role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
401         /* it should be submitted by ourselves */
402         *hosted = true;
403
404         newgrp = true;
405         work = z_erofs_vle_grab_primary_work(grp);
406         work->pageofs = pageofs;
407
408         mutex_init(&work->lock);
409
410         if (newgrp) {
411                 int err = erofs_register_workgroup(sb, &grp->obj, 0);
412
413                 if (err) {
414                         kmem_cache_free(z_erofs_workgroup_cachep, grp);
415                         return ERR_PTR(-EAGAIN);
416                 }
417         }
418
419         *owned_head = *grp_ret = grp;
420
421         mutex_lock(&work->lock);
422         return work;
423 }
424
425 static inline void __update_workgrp_llen(struct z_erofs_vle_workgroup *grp,
426                                          unsigned int llen)
427 {
428         while (1) {
429                 unsigned int orig_llen = grp->llen;
430
431                 if (orig_llen >= llen || orig_llen ==
432                         cmpxchg(&grp->llen, orig_llen, llen))
433                         break;
434         }
435 }
436
437 #define builder_is_hooked(builder) \
438         ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
439
440 #define builder_is_followed(builder) \
441         ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
442
443 static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
444                                        struct super_block *sb,
445                                        struct erofs_map_blocks *map,
446                                        z_erofs_vle_owned_workgrp_t *owned_head)
447 {
448         const unsigned clusterpages = erofs_clusterpages(EROFS_SB(sb));
449         const erofs_blk_t index = erofs_blknr(map->m_pa);
450         const unsigned pageofs = map->m_la & ~PAGE_MASK;
451         struct z_erofs_vle_workgroup *grp;
452         struct z_erofs_vle_work *work;
453
454         DBG_BUGON(builder->work != NULL);
455
456         /* must be Z_EROFS_WORK_TAIL or the next chained work */
457         DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_NIL);
458         DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
459
460         DBG_BUGON(erofs_blkoff(map->m_pa));
461
462 repeat:
463         work = z_erofs_vle_work_lookup(sb, index,
464                 pageofs, &grp, &builder->role, owned_head, &builder->hosted);
465         if (work != NULL) {
466                 __update_workgrp_llen(grp, map->m_llen);
467                 goto got_it;
468         }
469
470         work = z_erofs_vle_work_register(sb, &grp, map, index, pageofs,
471                 &builder->role, owned_head, &builder->hosted);
472
473         if (unlikely(work == ERR_PTR(-EAGAIN)))
474                 goto repeat;
475
476         if (unlikely(IS_ERR(work)))
477                 return PTR_ERR(work);
478 got_it:
479         z_erofs_pagevec_ctor_init(&builder->vector,
480                 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, work->vcnt);
481
482         if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY) {
483                 /* enable possibly in-place decompression */
484                 builder->compressed_pages = grp->compressed_pages;
485                 builder->compressed_deficit = clusterpages;
486         } else {
487                 builder->compressed_pages = NULL;
488                 builder->compressed_deficit = 0;
489         }
490
491         builder->grp = grp;
492         builder->work = work;
493         return 0;
494 }
495
496 /*
497  * keep in mind that no referenced workgroups will be freed
498  * only after a RCU grace period, so rcu_read_lock() could
499  * prevent a workgroup from being freed.
500  */
501 static void z_erofs_rcu_callback(struct rcu_head *head)
502 {
503         struct z_erofs_vle_work *work = container_of(head,
504                 struct z_erofs_vle_work, rcu);
505         struct z_erofs_vle_workgroup *grp =
506                 z_erofs_vle_work_workgroup(work, true);
507
508         kmem_cache_free(z_erofs_workgroup_cachep, grp);
509 }
510
511 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
512 {
513         struct z_erofs_vle_workgroup *const vgrp = container_of(grp,
514                 struct z_erofs_vle_workgroup, obj);
515         struct z_erofs_vle_work *const work = &vgrp->work;
516
517         call_rcu(&work->rcu, z_erofs_rcu_callback);
518 }
519
520 static void __z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
521         struct z_erofs_vle_work *work __maybe_unused)
522 {
523         erofs_workgroup_put(&grp->obj);
524 }
525
526 void z_erofs_vle_work_release(struct z_erofs_vle_work *work)
527 {
528         struct z_erofs_vle_workgroup *grp =
529                 z_erofs_vle_work_workgroup(work, true);
530
531         __z_erofs_vle_work_release(grp, work);
532 }
533
534 static inline bool
535 z_erofs_vle_work_iter_end(struct z_erofs_vle_work_builder *builder)
536 {
537         struct z_erofs_vle_work *work = builder->work;
538
539         if (work == NULL)
540                 return false;
541
542         z_erofs_pagevec_ctor_exit(&builder->vector, false);
543         mutex_unlock(&work->lock);
544
545         /*
546          * if all pending pages are added, don't hold work reference
547          * any longer if the current work isn't hosted by ourselves.
548          */
549         if (!builder->hosted)
550                 __z_erofs_vle_work_release(builder->grp, work);
551
552         builder->work = NULL;
553         builder->grp = NULL;
554         return true;
555 }
556
557 static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
558                                                gfp_t gfp)
559 {
560         struct page *page = erofs_allocpage(pagepool, gfp);
561
562         if (unlikely(page == NULL))
563                 return NULL;
564
565         page->mapping = Z_EROFS_MAPPING_STAGING;
566         return page;
567 }
568
569 struct z_erofs_vle_frontend {
570         struct inode *const inode;
571
572         struct z_erofs_vle_work_builder builder;
573         struct erofs_map_blocks_iter m_iter;
574
575         z_erofs_vle_owned_workgrp_t owned_head;
576
577         bool initial;
578 #if (EROFS_FS_ZIP_CACHE_LVL >= 2)
579         erofs_off_t cachedzone_la;
580 #endif
581 };
582
583 #define VLE_FRONTEND_INIT(__i) { \
584         .inode = __i, \
585         .m_iter = { \
586                 { .m_llen = 0, .m_plen = 0 }, \
587                 .mpage = NULL \
588         }, \
589         .builder = VLE_WORK_BUILDER_INIT(), \
590         .owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
591         .initial = true, }
592
593 static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
594                                 struct page *page,
595                                 struct list_head *page_pool)
596 {
597         struct super_block *const sb = fe->inode->i_sb;
598         struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
599         struct erofs_map_blocks_iter *const m = &fe->m_iter;
600         struct erofs_map_blocks *const map = &m->map;
601         struct z_erofs_vle_work_builder *const builder = &fe->builder;
602         const loff_t offset = page_offset(page);
603
604         bool tight = builder_is_hooked(builder);
605         struct z_erofs_vle_work *work = builder->work;
606
607 #ifdef EROFS_FS_HAS_MANAGED_CACHE
608         struct address_space *const mngda = sbi->managed_cache->i_mapping;
609         struct z_erofs_vle_workgroup *grp;
610         bool noio_outoforder;
611 #endif
612
613         enum z_erofs_page_type page_type;
614         unsigned cur, end, spiltted, index;
615         int err = 0;
616
617         /* register locked file pages as online pages in pack */
618         z_erofs_onlinepage_init(page);
619
620         spiltted = 0;
621         end = PAGE_SIZE;
622 repeat:
623         cur = end - 1;
624
625         /* lucky, within the range of the current map_blocks */
626         if (offset + cur >= map->m_la &&
627                 offset + cur < map->m_la + map->m_llen) {
628                 /* didn't get a valid unzip work previously (very rare) */
629                 if (!builder->work)
630                         goto restart_now;
631                 goto hitted;
632         }
633
634         /* go ahead the next map_blocks */
635         debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
636
637         if (z_erofs_vle_work_iter_end(builder))
638                 fe->initial = false;
639
640         map->m_la = offset + cur;
641         map->m_llen = 0;
642         err = erofs_map_blocks_iter(fe->inode, map, &m->mpage, 0);
643         if (unlikely(err))
644                 goto err_out;
645
646 restart_now:
647         if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
648                 goto hitted;
649
650         DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
651         DBG_BUGON(erofs_blkoff(map->m_pa));
652
653         err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
654         if (unlikely(err))
655                 goto err_out;
656
657 #ifdef EROFS_FS_HAS_MANAGED_CACHE
658         grp = fe->builder.grp;
659
660         /* let's do out-of-order decompression for noio */
661         noio_outoforder = grab_managed_cache_pages(mngda,
662                 erofs_blknr(map->m_pa),
663                 grp->compressed_pages, erofs_blknr(map->m_plen),
664                 /* compressed page caching selection strategy */
665                 fe->initial | (EROFS_FS_ZIP_CACHE_LVL >= 2 ?
666                         map->m_la < fe->cachedzone_la : 0));
667
668         if (noio_outoforder && builder_is_followed(builder))
669                 builder->role = Z_EROFS_VLE_WORK_PRIMARY;
670 #endif
671
672         tight &= builder_is_hooked(builder);
673         work = builder->work;
674 hitted:
675         cur = end - min_t(unsigned, offset + end - map->m_la, end);
676         if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
677                 zero_user_segment(page, cur, end);
678                 goto next_part;
679         }
680
681         /* let's derive page type */
682         page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
683                 (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
684                         (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
685                                 Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
686
687         if (cur)
688                 tight &= builder_is_followed(builder);
689
690 retry:
691         err = z_erofs_vle_work_add_page(builder, page, page_type,
692                                         builder_is_followed(builder));
693         /* should allocate an additional staging page for pagevec */
694         if (err == -EAGAIN) {
695                 struct page *const newpage =
696                         __stagingpage_alloc(page_pool, GFP_NOFS);
697
698                 err = z_erofs_vle_work_add_page(builder,
699                         newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE, true);
700                 if (likely(!err))
701                         goto retry;
702         }
703
704         if (unlikely(err))
705                 goto err_out;
706
707         index = page->index - map->m_la / PAGE_SIZE;
708
709         /* FIXME! avoid the last relundant fixup & endio */
710         z_erofs_onlinepage_fixup(page, index, true);
711
712         /* bump up the number of spiltted parts of a page */
713         ++spiltted;
714         /* also update nr_pages */
715         work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
716 next_part:
717         /* can be used for verification */
718         map->m_llen = offset + cur - map->m_la;
719
720         end = cur;
721         if (end > 0)
722                 goto repeat;
723
724 out:
725         /* FIXME! avoid the last relundant fixup & endio */
726         z_erofs_onlinepage_endio(page);
727
728         debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
729                 __func__, page, spiltted, map->m_llen);
730         return err;
731
732         /* if some error occurred while processing this page */
733 err_out:
734         SetPageError(page);
735         goto out;
736 }
737
738 static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
739 {
740         tagptr1_t t = tagptr_init(tagptr1_t, ptr);
741         struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
742         bool background = tagptr_unfold_tags(t);
743
744         if (!background) {
745                 unsigned long flags;
746
747                 spin_lock_irqsave(&io->u.wait.lock, flags);
748                 if (!atomic_add_return(bios, &io->pending_bios))
749                         wake_up_locked(&io->u.wait);
750                 spin_unlock_irqrestore(&io->u.wait.lock, flags);
751                 return;
752         }
753
754         if (!atomic_add_return(bios, &io->pending_bios))
755                 queue_work(z_erofs_workqueue, &io->u.work);
756 }
757
758 static inline void z_erofs_vle_read_endio(struct bio *bio)
759 {
760         const blk_status_t err = bio->bi_status;
761         unsigned i;
762         struct bio_vec *bvec;
763 #ifdef EROFS_FS_HAS_MANAGED_CACHE
764         struct address_space *mngda = NULL;
765 #endif
766
767         bio_for_each_segment_all(bvec, bio, i) {
768                 struct page *page = bvec->bv_page;
769                 bool cachemngd = false;
770
771                 DBG_BUGON(PageUptodate(page));
772                 DBG_BUGON(!page->mapping);
773
774 #ifdef EROFS_FS_HAS_MANAGED_CACHE
775                 if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) {
776                         struct inode *const inode = page->mapping->host;
777                         struct super_block *const sb = inode->i_sb;
778
779                         mngda = EROFS_SB(sb)->managed_cache->i_mapping;
780                 }
781
782                 /*
783                  * If mngda has not gotten, it equals NULL,
784                  * however, page->mapping never be NULL if working properly.
785                  */
786                 cachemngd = (page->mapping == mngda);
787 #endif
788
789                 if (unlikely(err))
790                         SetPageError(page);
791                 else if (cachemngd)
792                         SetPageUptodate(page);
793
794                 if (cachemngd)
795                         unlock_page(page);
796         }
797
798         z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
799         bio_put(bio);
800 }
801
802 static struct page *z_pagemap_global[Z_EROFS_VLE_VMAP_GLOBAL_PAGES];
803 static DEFINE_MUTEX(z_pagemap_global_lock);
804
805 static int z_erofs_vle_unzip(struct super_block *sb,
806         struct z_erofs_vle_workgroup *grp,
807         struct list_head *page_pool)
808 {
809         struct erofs_sb_info *const sbi = EROFS_SB(sb);
810 #ifdef EROFS_FS_HAS_MANAGED_CACHE
811         struct address_space *const mngda = sbi->managed_cache->i_mapping;
812 #endif
813         const unsigned clusterpages = erofs_clusterpages(sbi);
814
815         struct z_erofs_pagevec_ctor ctor;
816         unsigned int nr_pages;
817         unsigned int sparsemem_pages = 0;
818         struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
819         struct page **pages, **compressed_pages, *page;
820         unsigned i, llen;
821
822         enum z_erofs_page_type page_type;
823         bool overlapped;
824         struct z_erofs_vle_work *work;
825         void *vout;
826         int err;
827
828         might_sleep();
829         work = z_erofs_vle_grab_primary_work(grp);
830         DBG_BUGON(!READ_ONCE(work->nr_pages));
831
832         mutex_lock(&work->lock);
833         nr_pages = work->nr_pages;
834
835         if (likely(nr_pages <= Z_EROFS_VLE_VMAP_ONSTACK_PAGES))
836                 pages = pages_onstack;
837         else if (nr_pages <= Z_EROFS_VLE_VMAP_GLOBAL_PAGES &&
838                 mutex_trylock(&z_pagemap_global_lock))
839                 pages = z_pagemap_global;
840         else {
841 repeat:
842                 pages = kvmalloc_array(nr_pages,
843                         sizeof(struct page *), GFP_KERNEL);
844
845                 /* fallback to global pagemap for the lowmem scenario */
846                 if (unlikely(pages == NULL)) {
847                         if (nr_pages > Z_EROFS_VLE_VMAP_GLOBAL_PAGES)
848                                 goto repeat;
849                         else {
850                                 mutex_lock(&z_pagemap_global_lock);
851                                 pages = z_pagemap_global;
852                         }
853                 }
854         }
855
856         for (i = 0; i < nr_pages; ++i)
857                 pages[i] = NULL;
858
859         err = 0;
860         z_erofs_pagevec_ctor_init(&ctor,
861                 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
862
863         for (i = 0; i < work->vcnt; ++i) {
864                 unsigned pagenr;
865
866                 page = z_erofs_pagevec_ctor_dequeue(&ctor, &page_type);
867
868                 /* all pages in pagevec ought to be valid */
869                 DBG_BUGON(page == NULL);
870                 DBG_BUGON(page->mapping == NULL);
871
872                 if (z_erofs_gather_if_stagingpage(page_pool, page))
873                         continue;
874
875                 if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
876                         pagenr = 0;
877                 else
878                         pagenr = z_erofs_onlinepage_index(page);
879
880                 DBG_BUGON(pagenr >= nr_pages);
881
882                 /*
883                  * currently EROFS doesn't support multiref(dedup),
884                  * so here erroring out one multiref page.
885                  */
886                 if (pages[pagenr]) {
887                         DBG_BUGON(1);
888                         SetPageError(pages[pagenr]);
889                         z_erofs_onlinepage_endio(pages[pagenr]);
890                         err = -EIO;
891                 }
892                 pages[pagenr] = page;
893         }
894         sparsemem_pages = i;
895
896         z_erofs_pagevec_ctor_exit(&ctor, true);
897
898         overlapped = false;
899         compressed_pages = grp->compressed_pages;
900
901         for (i = 0; i < clusterpages; ++i) {
902                 unsigned pagenr;
903
904                 page = compressed_pages[i];
905
906                 /* all compressed pages ought to be valid */
907                 DBG_BUGON(page == NULL);
908                 DBG_BUGON(page->mapping == NULL);
909
910                 if (!z_erofs_is_stagingpage(page)) {
911 #ifdef EROFS_FS_HAS_MANAGED_CACHE
912                         if (page->mapping == mngda) {
913                                 if (unlikely(!PageUptodate(page)))
914                                         err = -EIO;
915                                 continue;
916                         }
917 #endif
918
919                         /*
920                          * only if non-head page can be selected
921                          * for inplace decompression
922                          */
923                         pagenr = z_erofs_onlinepage_index(page);
924
925                         DBG_BUGON(pagenr >= nr_pages);
926                         if (pages[pagenr]) {
927                                 DBG_BUGON(1);
928                                 SetPageError(pages[pagenr]);
929                                 z_erofs_onlinepage_endio(pages[pagenr]);
930                                 err = -EIO;
931                         }
932                         ++sparsemem_pages;
933                         pages[pagenr] = page;
934
935                         overlapped = true;
936                 }
937
938                 /* PG_error needs checking for inplaced and staging pages */
939                 if (unlikely(PageError(page))) {
940                         DBG_BUGON(PageUptodate(page));
941                         err = -EIO;
942                 }
943         }
944
945         if (unlikely(err))
946                 goto out;
947
948         llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
949
950         if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
951                 err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
952                         pages, nr_pages, work->pageofs);
953                 goto out;
954         }
955
956         if (llen > grp->llen)
957                 llen = grp->llen;
958
959         err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
960                                             pages, llen, work->pageofs);
961         if (err != -ENOTSUPP)
962                 goto out;
963
964         if (sparsemem_pages >= nr_pages)
965                 goto skip_allocpage;
966
967         for (i = 0; i < nr_pages; ++i) {
968                 if (pages[i] != NULL)
969                         continue;
970
971                 pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
972         }
973
974 skip_allocpage:
975         vout = erofs_vmap(pages, nr_pages);
976         if (!vout) {
977                 err = -ENOMEM;
978                 goto out;
979         }
980
981         err = z_erofs_vle_unzip_vmap(compressed_pages,
982                 clusterpages, vout, llen, work->pageofs, overlapped);
983
984         erofs_vunmap(vout, nr_pages);
985
986 out:
987         /* must handle all compressed pages before endding pages */
988         for (i = 0; i < clusterpages; ++i) {
989                 page = compressed_pages[i];
990
991 #ifdef EROFS_FS_HAS_MANAGED_CACHE
992                 if (page->mapping == mngda)
993                         continue;
994 #endif
995                 /* recycle all individual staging pages */
996                 (void)z_erofs_gather_if_stagingpage(page_pool, page);
997
998                 WRITE_ONCE(compressed_pages[i], NULL);
999         }
1000
1001         for (i = 0; i < nr_pages; ++i) {
1002                 page = pages[i];
1003                 if (!page)
1004                         continue;
1005
1006                 DBG_BUGON(page->mapping == NULL);
1007
1008                 /* recycle all individual staging pages */
1009                 if (z_erofs_gather_if_stagingpage(page_pool, page))
1010                         continue;
1011
1012                 if (unlikely(err < 0))
1013                         SetPageError(page);
1014
1015                 z_erofs_onlinepage_endio(page);
1016         }
1017
1018         if (pages == z_pagemap_global)
1019                 mutex_unlock(&z_pagemap_global_lock);
1020         else if (unlikely(pages != pages_onstack))
1021                 kvfree(pages);
1022
1023         work->nr_pages = 0;
1024         work->vcnt = 0;
1025
1026         /* all work locks MUST be taken before the following line */
1027
1028         WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_NIL);
1029
1030         /* all work locks SHOULD be released right now */
1031         mutex_unlock(&work->lock);
1032
1033         z_erofs_vle_work_release(work);
1034         return err;
1035 }
1036
1037 static void z_erofs_vle_unzip_all(struct super_block *sb,
1038                                   struct z_erofs_vle_unzip_io *io,
1039                                   struct list_head *page_pool)
1040 {
1041         z_erofs_vle_owned_workgrp_t owned = io->head;
1042
1043         while (owned != Z_EROFS_VLE_WORKGRP_TAIL_CLOSED) {
1044                 struct z_erofs_vle_workgroup *grp;
1045
1046                 /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
1047                 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_TAIL);
1048
1049                 /* no possible that 'owned' equals NULL */
1050                 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_NIL);
1051
1052                 grp = owned;
1053                 owned = READ_ONCE(grp->next);
1054
1055                 z_erofs_vle_unzip(sb, grp, page_pool);
1056         }
1057 }
1058
1059 static void z_erofs_vle_unzip_wq(struct work_struct *work)
1060 {
1061         struct z_erofs_vle_unzip_io_sb *iosb = container_of(work,
1062                 struct z_erofs_vle_unzip_io_sb, io.u.work);
1063         LIST_HEAD(page_pool);
1064
1065         DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1066         z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
1067
1068         put_pages_list(&page_pool);
1069         kvfree(iosb);
1070 }
1071
1072 static inline struct z_erofs_vle_unzip_io *
1073 prepare_io_handler(struct super_block *sb,
1074                    struct z_erofs_vle_unzip_io *io,
1075                    bool background)
1076 {
1077         struct z_erofs_vle_unzip_io_sb *iosb;
1078
1079         if (!background) {
1080                 /* waitqueue available for foreground io */
1081                 BUG_ON(io == NULL);
1082
1083                 init_waitqueue_head(&io->u.wait);
1084                 atomic_set(&io->pending_bios, 0);
1085                 goto out;
1086         }
1087
1088         if (io != NULL)
1089                 BUG();
1090         else {
1091                 /* allocate extra io descriptor for background io */
1092                 iosb = kvzalloc(sizeof(struct z_erofs_vle_unzip_io_sb),
1093                         GFP_KERNEL | __GFP_NOFAIL);
1094                 BUG_ON(iosb == NULL);
1095
1096                 io = &iosb->io;
1097         }
1098
1099         iosb->sb = sb;
1100         INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
1101 out:
1102         io->head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1103         return io;
1104 }
1105
1106 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1107 /* true - unlocked (noio), false - locked (need submit io) */
1108 static inline bool recover_managed_page(struct z_erofs_vle_workgroup *grp,
1109                                         struct page *page)
1110 {
1111         wait_on_page_locked(page);
1112         if (PagePrivate(page) && PageUptodate(page))
1113                 return true;
1114
1115         lock_page(page);
1116         ClearPageError(page);
1117
1118         if (unlikely(!PagePrivate(page))) {
1119                 set_page_private(page, (unsigned long)grp);
1120                 SetPagePrivate(page);
1121         }
1122         if (unlikely(PageUptodate(page))) {
1123                 unlock_page(page);
1124                 return true;
1125         }
1126         return false;
1127 }
1128
1129 #define __FSIO_1 1
1130 #else
1131 #define __FSIO_1 0
1132 #endif
1133
1134 static bool z_erofs_vle_submit_all(struct super_block *sb,
1135                                    z_erofs_vle_owned_workgrp_t owned_head,
1136                                    struct list_head *pagepool,
1137                                    struct z_erofs_vle_unzip_io *fg_io,
1138                                    bool force_fg)
1139 {
1140         struct erofs_sb_info *const sbi = EROFS_SB(sb);
1141         const unsigned clusterpages = erofs_clusterpages(sbi);
1142         const gfp_t gfp = GFP_NOFS;
1143 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1144         struct address_space *const mngda = sbi->managed_cache->i_mapping;
1145         struct z_erofs_vle_workgroup *lstgrp_noio = NULL, *lstgrp_io = NULL;
1146 #endif
1147         struct z_erofs_vle_unzip_io *ios[1 + __FSIO_1];
1148         struct bio *bio;
1149         tagptr1_t bi_private;
1150         /* since bio will be NULL, no need to initialize last_index */
1151         pgoff_t uninitialized_var(last_index);
1152         bool force_submit = false;
1153         unsigned nr_bios;
1154
1155         if (unlikely(owned_head == Z_EROFS_VLE_WORKGRP_TAIL))
1156                 return false;
1157
1158         /*
1159          * force_fg == 1, (io, fg_io[0]) no io, (io, fg_io[1]) need submit io
1160          * force_fg == 0, (io, fg_io[0]) no io; (io[1], bg_io) need submit io
1161          */
1162 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1163         ios[0] = prepare_io_handler(sb, fg_io + 0, false);
1164 #endif
1165
1166         if (force_fg) {
1167                 ios[__FSIO_1] = prepare_io_handler(sb, fg_io + __FSIO_1, false);
1168                 bi_private = tagptr_fold(tagptr1_t, ios[__FSIO_1], 0);
1169         } else {
1170                 ios[__FSIO_1] = prepare_io_handler(sb, NULL, true);
1171                 bi_private = tagptr_fold(tagptr1_t, ios[__FSIO_1], 1);
1172         }
1173
1174         nr_bios = 0;
1175         force_submit = false;
1176         bio = NULL;
1177
1178         /* by default, all need io submission */
1179         ios[__FSIO_1]->head = owned_head;
1180
1181         do {
1182                 struct z_erofs_vle_workgroup *grp;
1183                 struct page **compressed_pages, *oldpage, *page;
1184                 pgoff_t first_index;
1185                 unsigned i = 0;
1186 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1187                 unsigned int noio = 0;
1188                 bool cachemngd;
1189 #endif
1190                 int err;
1191
1192                 /* no possible 'owned_head' equals the following */
1193                 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1194                 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_NIL);
1195
1196                 grp = owned_head;
1197
1198                 /* close the main owned chain at first */
1199                 owned_head = cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
1200                         Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1201
1202                 first_index = grp->obj.index;
1203                 compressed_pages = grp->compressed_pages;
1204
1205                 force_submit |= (first_index != last_index + 1);
1206 repeat:
1207                 /* fulfill all compressed pages */
1208                 oldpage = page = READ_ONCE(compressed_pages[i]);
1209
1210 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1211                 cachemngd = false;
1212
1213                 if (page == EROFS_UNALLOCATED_CACHED_PAGE) {
1214                         cachemngd = true;
1215                         goto do_allocpage;
1216                 } else if (page != NULL) {
1217                         if (page->mapping != mngda)
1218                                 BUG_ON(PageUptodate(page));
1219                         else if (recover_managed_page(grp, page)) {
1220                                 /* page is uptodate, skip io submission */
1221                                 force_submit = true;
1222                                 ++noio;
1223                                 goto skippage;
1224                         }
1225                 } else {
1226 do_allocpage:
1227 #else
1228                 if (page != NULL)
1229                         BUG_ON(PageUptodate(page));
1230                 else {
1231 #endif
1232                         page = __stagingpage_alloc(pagepool, gfp);
1233
1234                         if (oldpage != cmpxchg(compressed_pages + i,
1235                                 oldpage, page)) {
1236                                 list_add(&page->lru, pagepool);
1237                                 goto repeat;
1238 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1239                         } else if (cachemngd && !add_to_page_cache_lru(page,
1240                                 mngda, first_index + i, gfp)) {
1241                                 set_page_private(page, (unsigned long)grp);
1242                                 SetPagePrivate(page);
1243 #endif
1244                         }
1245                 }
1246
1247                 if (bio != NULL && force_submit) {
1248 submit_bio_retry:
1249                         __submit_bio(bio, REQ_OP_READ, 0);
1250                         bio = NULL;
1251                 }
1252
1253                 if (bio == NULL) {
1254                         bio = prepare_bio(sb, first_index + i,
1255                                 BIO_MAX_PAGES, z_erofs_vle_read_endio);
1256                         bio->bi_private = tagptr_cast_ptr(bi_private);
1257
1258                         ++nr_bios;
1259                 }
1260
1261                 err = bio_add_page(bio, page, PAGE_SIZE, 0);
1262                 if (err < PAGE_SIZE)
1263                         goto submit_bio_retry;
1264
1265                 force_submit = false;
1266                 last_index = first_index + i;
1267 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1268 skippage:
1269 #endif
1270                 if (++i < clusterpages)
1271                         goto repeat;
1272
1273 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1274                 if (noio < clusterpages) {
1275                         lstgrp_io = grp;
1276                 } else {
1277                         z_erofs_vle_owned_workgrp_t iogrp_next =
1278                                 owned_head == Z_EROFS_VLE_WORKGRP_TAIL ?
1279                                 Z_EROFS_VLE_WORKGRP_TAIL_CLOSED :
1280                                 owned_head;
1281
1282                         if (lstgrp_io == NULL)
1283                                 ios[1]->head = iogrp_next;
1284                         else
1285                                 WRITE_ONCE(lstgrp_io->next, iogrp_next);
1286
1287                         if (lstgrp_noio == NULL)
1288                                 ios[0]->head = grp;
1289                         else
1290                                 WRITE_ONCE(lstgrp_noio->next, grp);
1291
1292                         lstgrp_noio = grp;
1293                 }
1294 #endif
1295         } while (owned_head != Z_EROFS_VLE_WORKGRP_TAIL);
1296
1297         if (bio != NULL)
1298                 __submit_bio(bio, REQ_OP_READ, 0);
1299
1300 #ifndef EROFS_FS_HAS_MANAGED_CACHE
1301         BUG_ON(!nr_bios);
1302 #else
1303         if (lstgrp_noio != NULL)
1304                 WRITE_ONCE(lstgrp_noio->next, Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1305
1306         if (!force_fg && !nr_bios) {
1307                 kvfree(container_of(ios[1],
1308                         struct z_erofs_vle_unzip_io_sb, io));
1309                 return true;
1310         }
1311 #endif
1312
1313         z_erofs_vle_unzip_kickoff(tagptr_cast_ptr(bi_private), nr_bios);
1314         return true;
1315 }
1316
1317 static void z_erofs_submit_and_unzip(struct z_erofs_vle_frontend *f,
1318                                      struct list_head *pagepool,
1319                                      bool force_fg)
1320 {
1321         struct super_block *sb = f->inode->i_sb;
1322         struct z_erofs_vle_unzip_io io[1 + __FSIO_1];
1323
1324         if (!z_erofs_vle_submit_all(sb, f->owned_head, pagepool, io, force_fg))
1325                 return;
1326
1327 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1328         z_erofs_vle_unzip_all(sb, &io[0], pagepool);
1329 #endif
1330         if (!force_fg)
1331                 return;
1332
1333         /* wait until all bios are completed */
1334         wait_event(io[__FSIO_1].u.wait,
1335                 !atomic_read(&io[__FSIO_1].pending_bios));
1336
1337         /* let's synchronous decompression */
1338         z_erofs_vle_unzip_all(sb, &io[__FSIO_1], pagepool);
1339 }
1340
1341 static int z_erofs_vle_normalaccess_readpage(struct file *file,
1342                                              struct page *page)
1343 {
1344         struct inode *const inode = page->mapping->host;
1345         struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1346         int err;
1347         LIST_HEAD(pagepool);
1348
1349 #if (EROFS_FS_ZIP_CACHE_LVL >= 2)
1350         f.cachedzone_la = page->index << PAGE_SHIFT;
1351 #endif
1352         err = z_erofs_do_read_page(&f, page, &pagepool);
1353         (void)z_erofs_vle_work_iter_end(&f.builder);
1354
1355         /* if some compressed cluster ready, need submit them anyway */
1356         z_erofs_submit_and_unzip(&f, &pagepool, true);
1357
1358         if (err)
1359                 errln("%s, failed to read, err [%d]", __func__, err);
1360
1361         if (f.m_iter.mpage != NULL)
1362                 put_page(f.m_iter.mpage);
1363
1364         /* clean up the remaining free pages */
1365         put_pages_list(&pagepool);
1366         return err;
1367 }
1368
1369 static inline int __z_erofs_vle_normalaccess_readpages(
1370         struct file *filp,
1371         struct address_space *mapping,
1372         struct list_head *pages, unsigned nr_pages, bool sync)
1373 {
1374         struct inode *const inode = mapping->host;
1375
1376         struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1377         gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
1378         struct page *head = NULL;
1379         LIST_HEAD(pagepool);
1380
1381 #if (EROFS_FS_ZIP_CACHE_LVL >= 2)
1382         f.cachedzone_la = lru_to_page(pages)->index << PAGE_SHIFT;
1383 #endif
1384         for (; nr_pages; --nr_pages) {
1385                 struct page *page = lru_to_page(pages);
1386
1387                 prefetchw(&page->flags);
1388                 list_del(&page->lru);
1389
1390                 if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
1391                         list_add(&page->lru, &pagepool);
1392                         continue;
1393                 }
1394
1395                 set_page_private(page, (unsigned long)head);
1396                 head = page;
1397         }
1398
1399         while (head != NULL) {
1400                 struct page *page = head;
1401                 int err;
1402
1403                 /* traversal in reverse order */
1404                 head = (void *)page_private(page);
1405
1406                 err = z_erofs_do_read_page(&f, page, &pagepool);
1407                 if (err) {
1408                         struct erofs_vnode *vi = EROFS_V(inode);
1409
1410                         errln("%s, readahead error at page %lu of nid %llu",
1411                                 __func__, page->index, vi->nid);
1412                 }
1413
1414                 put_page(page);
1415         }
1416
1417         (void)z_erofs_vle_work_iter_end(&f.builder);
1418
1419         z_erofs_submit_and_unzip(&f, &pagepool, sync);
1420
1421         if (f.m_iter.mpage != NULL)
1422                 put_page(f.m_iter.mpage);
1423
1424         /* clean up the remaining free pages */
1425         put_pages_list(&pagepool);
1426         return 0;
1427 }
1428
1429 static int z_erofs_vle_normalaccess_readpages(
1430         struct file *filp,
1431         struct address_space *mapping,
1432         struct list_head *pages, unsigned nr_pages)
1433 {
1434         return __z_erofs_vle_normalaccess_readpages(filp,
1435                 mapping, pages, nr_pages,
1436                 nr_pages < 4 /* sync */);
1437 }
1438
1439 const struct address_space_operations z_erofs_vle_normalaccess_aops = {
1440         .readpage = z_erofs_vle_normalaccess_readpage,
1441         .readpages = z_erofs_vle_normalaccess_readpages,
1442 };
1443
1444 #define __vle_cluster_advise(x, bit, bits) \
1445         ((le16_to_cpu(x) >> (bit)) & ((1 << (bits)) - 1))
1446
1447 #define __vle_cluster_type(advise) __vle_cluster_advise(advise, \
1448         Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT, Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS)
1449
1450 enum {
1451         Z_EROFS_VLE_CLUSTER_TYPE_PLAIN,
1452         Z_EROFS_VLE_CLUSTER_TYPE_HEAD,
1453         Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD,
1454         Z_EROFS_VLE_CLUSTER_TYPE_RESERVED,
1455         Z_EROFS_VLE_CLUSTER_TYPE_MAX
1456 };
1457
1458 #define vle_cluster_type(di)    \
1459         __vle_cluster_type((di)->di_advise)
1460
1461 static inline unsigned
1462 vle_compressed_index_clusterofs(unsigned clustersize,
1463         struct z_erofs_vle_decompressed_index *di)
1464 {
1465         debugln("%s, vle=%pK, advise=%x (type %u), clusterofs=%x blkaddr=%x",
1466                 __func__, di, di->di_advise, vle_cluster_type(di),
1467                 di->di_clusterofs, di->di_u.blkaddr);
1468
1469         switch (vle_cluster_type(di)) {
1470         case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1471                 break;
1472         case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1473         case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1474                 return di->di_clusterofs;
1475         default:
1476                 BUG_ON(1);
1477         }
1478         return clustersize;
1479 }
1480
1481 static inline erofs_blk_t
1482 vle_extent_blkaddr(struct inode *inode, pgoff_t index)
1483 {
1484         struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1485         struct erofs_vnode *vi = EROFS_V(inode);
1486
1487         unsigned ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1488                 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1489                 index * sizeof(struct z_erofs_vle_decompressed_index);
1490
1491         return erofs_blknr(iloc(sbi, vi->nid) + ofs);
1492 }
1493
1494 static inline unsigned int
1495 vle_extent_blkoff(struct inode *inode, pgoff_t index)
1496 {
1497         struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1498         struct erofs_vnode *vi = EROFS_V(inode);
1499
1500         unsigned ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1501                 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1502                 index * sizeof(struct z_erofs_vle_decompressed_index);
1503
1504         return erofs_blkoff(iloc(sbi, vi->nid) + ofs);
1505 }
1506
1507 /*
1508  * Variable-sized Logical Extent (Fixed Physical Cluster) Compression Mode
1509  * ---
1510  * VLE compression mode attempts to compress a number of logical data into
1511  * a physical cluster with a fixed size.
1512  * VLE compression mode uses "struct z_erofs_vle_decompressed_index".
1513  */
1514 static erofs_off_t vle_get_logical_extent_head(
1515         struct inode *inode,
1516         struct page **page_iter,
1517         void **kaddr_iter,
1518         unsigned lcn,   /* logical cluster number */
1519         erofs_blk_t *pcn,
1520         unsigned *flags)
1521 {
1522         /* for extent meta */
1523         struct page *page = *page_iter;
1524         erofs_blk_t blkaddr = vle_extent_blkaddr(inode, lcn);
1525         struct z_erofs_vle_decompressed_index *di;
1526         unsigned long long ofs;
1527         const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
1528         const unsigned int clustersize = 1 << clusterbits;
1529         unsigned int delta0;
1530
1531         if (page->index != blkaddr) {
1532                 kunmap_atomic(*kaddr_iter);
1533                 unlock_page(page);
1534                 put_page(page);
1535
1536                 *page_iter = page = erofs_get_meta_page(inode->i_sb,
1537                         blkaddr, false);
1538                 *kaddr_iter = kmap_atomic(page);
1539         }
1540
1541         di = *kaddr_iter + vle_extent_blkoff(inode, lcn);
1542         switch (vle_cluster_type(di)) {
1543         case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1544                 delta0 = le16_to_cpu(di->di_u.delta[0]);
1545                 DBG_BUGON(!delta0);
1546                 DBG_BUGON(lcn < delta0);
1547
1548                 ofs = vle_get_logical_extent_head(inode,
1549                         page_iter, kaddr_iter,
1550                         lcn - delta0, pcn, flags);
1551                 break;
1552         case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1553                 *flags ^= EROFS_MAP_ZIPPED;
1554         case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1555                 /* clustersize should be a power of two */
1556                 ofs = ((unsigned long long)lcn << clusterbits) +
1557                         (le16_to_cpu(di->di_clusterofs) & (clustersize - 1));
1558                 *pcn = le32_to_cpu(di->di_u.blkaddr);
1559                 break;
1560         default:
1561                 BUG_ON(1);
1562         }
1563         return ofs;
1564 }
1565
1566 int z_erofs_map_blocks_iter(struct inode *inode,
1567         struct erofs_map_blocks *map,
1568         struct page **mpage_ret, int flags)
1569 {
1570         /* logicial extent (start, end) offset */
1571         unsigned long long ofs, end;
1572         struct z_erofs_vle_decompressed_index *di;
1573         erofs_blk_t e_blkaddr, pcn;
1574         unsigned lcn, logical_cluster_ofs, cluster_type;
1575         u32 ofs_rem;
1576         struct page *mpage = *mpage_ret;
1577         void *kaddr;
1578         bool initial;
1579         const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
1580         const unsigned int clustersize = 1 << clusterbits;
1581         int err = 0;
1582
1583         /* if both m_(l,p)len are 0, regularize l_lblk, l_lofs, etc... */
1584         initial = !map->m_llen;
1585
1586         /* when trying to read beyond EOF, leave it unmapped */
1587         if (unlikely(map->m_la >= inode->i_size)) {
1588                 BUG_ON(!initial);
1589                 map->m_llen = map->m_la + 1 - inode->i_size;
1590                 map->m_la = inode->i_size - 1;
1591                 map->m_flags = 0;
1592                 goto out;
1593         }
1594
1595         debugln("%s, m_la %llu m_llen %llu --- start", __func__,
1596                 map->m_la, map->m_llen);
1597
1598         ofs = map->m_la + map->m_llen;
1599
1600         /* clustersize should be power of two */
1601         lcn = ofs >> clusterbits;
1602         ofs_rem = ofs & (clustersize - 1);
1603
1604         e_blkaddr = vle_extent_blkaddr(inode, lcn);
1605
1606         if (mpage == NULL || mpage->index != e_blkaddr) {
1607                 if (mpage != NULL)
1608                         put_page(mpage);
1609
1610                 mpage = erofs_get_meta_page(inode->i_sb, e_blkaddr, false);
1611                 *mpage_ret = mpage;
1612         } else {
1613                 lock_page(mpage);
1614                 DBG_BUGON(!PageUptodate(mpage));
1615         }
1616
1617         kaddr = kmap_atomic(mpage);
1618         di = kaddr + vle_extent_blkoff(inode, lcn);
1619
1620         debugln("%s, lcn %u e_blkaddr %u e_blkoff %u", __func__, lcn,
1621                 e_blkaddr, vle_extent_blkoff(inode, lcn));
1622
1623         logical_cluster_ofs = vle_compressed_index_clusterofs(clustersize, di);
1624         if (!initial) {
1625                 /* [walking mode] 'map' has been already initialized */
1626                 map->m_llen += logical_cluster_ofs;
1627                 goto unmap_out;
1628         }
1629
1630         /* by default, compressed */
1631         map->m_flags |= EROFS_MAP_ZIPPED;
1632
1633         end = (u64)(lcn + 1) * clustersize;
1634
1635         cluster_type = vle_cluster_type(di);
1636
1637         switch (cluster_type) {
1638         case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1639                 if (ofs_rem >= logical_cluster_ofs)
1640                         map->m_flags ^= EROFS_MAP_ZIPPED;
1641                 /* fallthrough */
1642         case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1643                 if (ofs_rem == logical_cluster_ofs) {
1644                         pcn = le32_to_cpu(di->di_u.blkaddr);
1645                         goto exact_hitted;
1646                 }
1647
1648                 if (ofs_rem > logical_cluster_ofs) {
1649                         ofs = lcn * clustersize | logical_cluster_ofs;
1650                         pcn = le32_to_cpu(di->di_u.blkaddr);
1651                         break;
1652                 }
1653
1654                 /* logical cluster number should be >= 1 */
1655                 if (unlikely(!lcn)) {
1656                         errln("invalid logical cluster 0 at nid %llu",
1657                                 EROFS_V(inode)->nid);
1658                         err = -EIO;
1659                         goto unmap_out;
1660                 }
1661                 end = (lcn-- * clustersize) | logical_cluster_ofs;
1662                 /* fallthrough */
1663         case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1664                 /* get the correspoinding first chunk */
1665                 ofs = vle_get_logical_extent_head(inode, mpage_ret,
1666                         &kaddr, lcn, &pcn, &map->m_flags);
1667                 mpage = *mpage_ret;
1668                 break;
1669         default:
1670                 errln("unknown cluster type %u at offset %llu of nid %llu",
1671                         cluster_type, ofs, EROFS_V(inode)->nid);
1672                 err = -EIO;
1673                 goto unmap_out;
1674         }
1675
1676         map->m_la = ofs;
1677 exact_hitted:
1678         map->m_llen = end - ofs;
1679         map->m_plen = clustersize;
1680         map->m_pa = blknr_to_addr(pcn);
1681         map->m_flags |= EROFS_MAP_MAPPED;
1682 unmap_out:
1683         kunmap_atomic(kaddr);
1684         unlock_page(mpage);
1685 out:
1686         debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
1687                 __func__, map->m_la, map->m_pa,
1688                 map->m_llen, map->m_plen, map->m_flags);
1689
1690         /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
1691         DBG_BUGON(err < 0);
1692         return err;
1693 }
1694