1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/unzip_vle.c
5 * Copyright (C) 2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
13 #include "unzip_vle.h"
14 #include <linux/prefetch.h>
16 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
17 static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
19 void z_erofs_exit_zip_subsystem(void)
21 destroy_workqueue(z_erofs_workqueue);
22 kmem_cache_destroy(z_erofs_workgroup_cachep);
25 static inline int init_unzip_workqueue(void)
27 const unsigned onlinecpus = num_possible_cpus();
30 * we don't need too many threads, limiting threads
31 * could improve scheduling performance.
33 z_erofs_workqueue = alloc_workqueue("erofs_unzipd",
34 WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
35 onlinecpus + onlinecpus / 4);
37 return z_erofs_workqueue != NULL ? 0 : -ENOMEM;
40 int z_erofs_init_zip_subsystem(void)
42 z_erofs_workgroup_cachep =
43 kmem_cache_create("erofs_compress",
44 Z_EROFS_WORKGROUP_SIZE, 0,
45 SLAB_RECLAIM_ACCOUNT, NULL);
47 if (z_erofs_workgroup_cachep != NULL) {
48 if (!init_unzip_workqueue())
51 kmem_cache_destroy(z_erofs_workgroup_cachep);
56 enum z_erofs_vle_work_role {
57 Z_EROFS_VLE_WORK_SECONDARY,
58 Z_EROFS_VLE_WORK_PRIMARY,
60 * The current work was the tail of an exist chain, and the previous
61 * processed chained works are all decided to be hooked up to it.
62 * A new chain should be created for the remaining unprocessed works,
63 * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
64 * the next work cannot reuse the whole page in the following scenario:
65 * ________________________________________________________________
66 * | tail (partial) page | head (partial) page |
67 * | (belongs to the next work) | (belongs to the current work) |
68 * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
70 Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
72 * The current work has been linked with the processed chained works,
73 * and could be also linked with the potential remaining works, which
74 * means if the processing page is the tail partial page of the work,
75 * the current work can safely use the whole page (since the next work
76 * is under control) for in-place decompression, as illustrated below:
77 * ________________________________________________________________
78 * | tail (partial) page | head (partial) page |
79 * | (of the current work) | (of the previous work) |
80 * | PRIMARY_FOLLOWED or | |
81 * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
83 * [ (*) the above page can be used for the current work itself. ]
85 Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
89 struct z_erofs_vle_work_builder {
90 enum z_erofs_vle_work_role role;
92 * 'hosted = false' means that the current workgroup doesn't belong to
93 * the owned chained workgroups. In the other words, it is none of our
94 * business to submit this workgroup.
98 struct z_erofs_vle_workgroup *grp;
99 struct z_erofs_vle_work *work;
100 struct z_erofs_pagevec_ctor vector;
102 /* pages used for reading the compressed data */
103 struct page **compressed_pages;
104 unsigned compressed_deficit;
107 #define VLE_WORK_BUILDER_INIT() \
108 { .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
110 #ifdef EROFS_FS_HAS_MANAGED_CACHE
112 static bool grab_managed_cache_pages(struct address_space *mapping,
114 struct page **compressed_pages,
116 bool reserve_allocation)
121 /* TODO: optimize by introducing find_get_pages_range */
122 for (i = 0; i < clusterblks; ++i) {
123 struct page *page, *found;
125 if (READ_ONCE(compressed_pages[i]) != NULL)
128 page = found = find_get_page(mapping, start + i);
131 if (!reserve_allocation)
133 page = EROFS_UNALLOCATED_CACHED_PAGE;
136 if (NULL == cmpxchg(compressed_pages + i, NULL, page))
145 /* called by erofs_shrinker to get rid of all compressed_pages */
146 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
147 struct erofs_workgroup *egrp)
149 struct z_erofs_vle_workgroup *const grp =
150 container_of(egrp, struct z_erofs_vle_workgroup, obj);
151 struct address_space *const mapping = sbi->managed_cache->i_mapping;
152 const int clusterpages = erofs_clusterpages(sbi);
156 * refcount of workgroup is now freezed as 1,
157 * therefore no need to worry about available decompression users.
159 for (i = 0; i < clusterpages; ++i) {
160 struct page *page = grp->compressed_pages[i];
162 if (page == NULL || page->mapping != mapping)
165 /* block other users from reclaiming or migrating the page */
166 if (!trylock_page(page))
169 /* barrier is implied in the following 'unlock_page' */
170 WRITE_ONCE(grp->compressed_pages[i], NULL);
172 set_page_private(page, 0);
173 ClearPagePrivate(page);
181 int erofs_try_to_free_cached_page(struct address_space *mapping,
184 struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb);
185 const unsigned int clusterpages = erofs_clusterpages(sbi);
187 struct z_erofs_vle_workgroup *grp;
188 int ret = 0; /* 0 - busy */
190 /* prevent the workgroup from being freed */
192 grp = (void *)page_private(page);
194 if (erofs_workgroup_try_to_freeze(&grp->obj, 1)) {
197 for (i = 0; i < clusterpages; ++i) {
198 if (grp->compressed_pages[i] == page) {
199 WRITE_ONCE(grp->compressed_pages[i], NULL);
204 erofs_workgroup_unfreeze(&grp->obj, 1);
209 ClearPagePrivate(page);
216 /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
217 static inline bool try_to_reuse_as_compressed_page(
218 struct z_erofs_vle_work_builder *b,
221 while (b->compressed_deficit) {
222 --b->compressed_deficit;
223 if (NULL == cmpxchg(b->compressed_pages++, NULL, page))
230 /* callers must be with work->lock held */
231 static int z_erofs_vle_work_add_page(struct z_erofs_vle_work_builder *builder,
233 enum z_erofs_page_type type,
238 /* give priority for the compressed data storage */
239 if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY &&
240 type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
241 try_to_reuse_as_compressed_page(builder, page))
244 ret = z_erofs_pagevec_ctor_enqueue(&builder->vector, page, type,
246 builder->work->vcnt += (unsigned)ret;
247 return ret ? 0 : -EAGAIN;
250 static enum z_erofs_vle_work_role
251 try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
252 z_erofs_vle_owned_workgrp_t *owned_head,
255 DBG_BUGON(*hosted == true);
257 /* let's claim these following types of workgroup */
259 if (grp->next == Z_EROFS_VLE_WORKGRP_NIL) {
260 /* type 1, nil workgroup */
261 if (Z_EROFS_VLE_WORKGRP_NIL != cmpxchg(&grp->next,
262 Z_EROFS_VLE_WORKGRP_NIL, *owned_head))
267 /* lucky, I am the followee :) */
268 return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
270 } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
272 * type 2, link to the end of a existing open chain,
273 * be careful that its submission itself is governed
274 * by the original owned chain.
276 if (Z_EROFS_VLE_WORKGRP_TAIL != cmpxchg(&grp->next,
277 Z_EROFS_VLE_WORKGRP_TAIL, *owned_head))
279 *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
280 return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
283 return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
286 static struct z_erofs_vle_work *
287 z_erofs_vle_work_lookup(struct super_block *sb,
288 pgoff_t idx, unsigned pageofs,
289 struct z_erofs_vle_workgroup **grp_ret,
290 enum z_erofs_vle_work_role *role,
291 z_erofs_vle_owned_workgrp_t *owned_head,
295 struct erofs_workgroup *egrp;
296 struct z_erofs_vle_workgroup *grp;
297 struct z_erofs_vle_work *work;
299 egrp = erofs_find_workgroup(sb, idx, &tag);
305 *grp_ret = grp = container_of(egrp,
306 struct z_erofs_vle_workgroup, obj);
308 work = z_erofs_vle_grab_work(grp, pageofs);
309 /* if multiref is disabled, `primary' is always true */
312 if (work->pageofs != pageofs) {
314 erofs_workgroup_put(egrp);
315 return ERR_PTR(-EIO);
319 * lock must be taken first to avoid grp->next == NIL between
320 * claiming workgroup and adding pages:
324 * mutex_lock(&work->lock)
325 * add all pages to pagevec
327 * [correct locking case 1]:
328 * mutex_lock(grp->work[a])
330 * mutex_lock(grp->work[b]) mutex_lock(grp->work[c])
331 * ... *role = SECONDARY
332 * add all pages to pagevec
334 * mutex_unlock(grp->work[c])
335 * mutex_lock(grp->work[c])
340 * [correct locking case 2]:
341 * mutex_lock(grp->work[b])
343 * mutex_lock(grp->work[a])
345 * mutex_lock(grp->work[c])
349 * mutex_lock(grp->work[a])
350 * *role = PRIMARY_OWNER
351 * add all pages to pagevec
354 mutex_lock(&work->lock);
358 *role = Z_EROFS_VLE_WORK_SECONDARY;
359 else /* claim the workgroup if possible */
360 *role = try_to_claim_workgroup(grp, owned_head, hosted);
364 static struct z_erofs_vle_work *
365 z_erofs_vle_work_register(struct super_block *sb,
366 struct z_erofs_vle_workgroup **grp_ret,
367 struct erofs_map_blocks *map,
368 pgoff_t index, unsigned pageofs,
369 enum z_erofs_vle_work_role *role,
370 z_erofs_vle_owned_workgrp_t *owned_head,
374 struct z_erofs_vle_workgroup *grp = *grp_ret;
375 struct z_erofs_vle_work *work;
377 /* if multiref is disabled, grp should never be nullptr */
380 return ERR_PTR(-EINVAL);
383 /* no available workgroup, let's allocate one */
384 grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
385 if (unlikely(grp == NULL))
386 return ERR_PTR(-ENOMEM);
388 grp->obj.index = index;
389 grp->llen = map->m_llen;
391 z_erofs_vle_set_workgrp_fmt(grp,
392 (map->m_flags & EROFS_MAP_ZIPPED) ?
393 Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
394 Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
395 atomic_set(&grp->obj.refcount, 1);
397 /* new workgrps have been claimed as type 1 */
398 WRITE_ONCE(grp->next, *owned_head);
399 /* primary and followed work for all new workgrps */
400 *role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
401 /* it should be submitted by ourselves */
405 work = z_erofs_vle_grab_primary_work(grp);
406 work->pageofs = pageofs;
408 mutex_init(&work->lock);
411 int err = erofs_register_workgroup(sb, &grp->obj, 0);
414 kmem_cache_free(z_erofs_workgroup_cachep, grp);
415 return ERR_PTR(-EAGAIN);
419 *owned_head = *grp_ret = grp;
421 mutex_lock(&work->lock);
425 static inline void __update_workgrp_llen(struct z_erofs_vle_workgroup *grp,
429 unsigned int orig_llen = grp->llen;
431 if (orig_llen >= llen || orig_llen ==
432 cmpxchg(&grp->llen, orig_llen, llen))
437 #define builder_is_hooked(builder) \
438 ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
440 #define builder_is_followed(builder) \
441 ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
443 static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
444 struct super_block *sb,
445 struct erofs_map_blocks *map,
446 z_erofs_vle_owned_workgrp_t *owned_head)
448 const unsigned clusterpages = erofs_clusterpages(EROFS_SB(sb));
449 const erofs_blk_t index = erofs_blknr(map->m_pa);
450 const unsigned pageofs = map->m_la & ~PAGE_MASK;
451 struct z_erofs_vle_workgroup *grp;
452 struct z_erofs_vle_work *work;
454 DBG_BUGON(builder->work != NULL);
456 /* must be Z_EROFS_WORK_TAIL or the next chained work */
457 DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_NIL);
458 DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
460 DBG_BUGON(erofs_blkoff(map->m_pa));
463 work = z_erofs_vle_work_lookup(sb, index,
464 pageofs, &grp, &builder->role, owned_head, &builder->hosted);
466 __update_workgrp_llen(grp, map->m_llen);
470 work = z_erofs_vle_work_register(sb, &grp, map, index, pageofs,
471 &builder->role, owned_head, &builder->hosted);
473 if (unlikely(work == ERR_PTR(-EAGAIN)))
476 if (unlikely(IS_ERR(work)))
477 return PTR_ERR(work);
479 z_erofs_pagevec_ctor_init(&builder->vector,
480 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, work->vcnt);
482 if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY) {
483 /* enable possibly in-place decompression */
484 builder->compressed_pages = grp->compressed_pages;
485 builder->compressed_deficit = clusterpages;
487 builder->compressed_pages = NULL;
488 builder->compressed_deficit = 0;
492 builder->work = work;
497 * keep in mind that no referenced workgroups will be freed
498 * only after a RCU grace period, so rcu_read_lock() could
499 * prevent a workgroup from being freed.
501 static void z_erofs_rcu_callback(struct rcu_head *head)
503 struct z_erofs_vle_work *work = container_of(head,
504 struct z_erofs_vle_work, rcu);
505 struct z_erofs_vle_workgroup *grp =
506 z_erofs_vle_work_workgroup(work, true);
508 kmem_cache_free(z_erofs_workgroup_cachep, grp);
511 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
513 struct z_erofs_vle_workgroup *const vgrp = container_of(grp,
514 struct z_erofs_vle_workgroup, obj);
515 struct z_erofs_vle_work *const work = &vgrp->work;
517 call_rcu(&work->rcu, z_erofs_rcu_callback);
520 static void __z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
521 struct z_erofs_vle_work *work __maybe_unused)
523 erofs_workgroup_put(&grp->obj);
526 void z_erofs_vle_work_release(struct z_erofs_vle_work *work)
528 struct z_erofs_vle_workgroup *grp =
529 z_erofs_vle_work_workgroup(work, true);
531 __z_erofs_vle_work_release(grp, work);
535 z_erofs_vle_work_iter_end(struct z_erofs_vle_work_builder *builder)
537 struct z_erofs_vle_work *work = builder->work;
542 z_erofs_pagevec_ctor_exit(&builder->vector, false);
543 mutex_unlock(&work->lock);
546 * if all pending pages are added, don't hold work reference
547 * any longer if the current work isn't hosted by ourselves.
549 if (!builder->hosted)
550 __z_erofs_vle_work_release(builder->grp, work);
552 builder->work = NULL;
557 static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
560 struct page *page = erofs_allocpage(pagepool, gfp);
562 if (unlikely(page == NULL))
565 page->mapping = Z_EROFS_MAPPING_STAGING;
569 struct z_erofs_vle_frontend {
570 struct inode *const inode;
572 struct z_erofs_vle_work_builder builder;
573 struct erofs_map_blocks_iter m_iter;
575 z_erofs_vle_owned_workgrp_t owned_head;
578 #if (EROFS_FS_ZIP_CACHE_LVL >= 2)
579 erofs_off_t cachedzone_la;
583 #define VLE_FRONTEND_INIT(__i) { \
586 { .m_llen = 0, .m_plen = 0 }, \
589 .builder = VLE_WORK_BUILDER_INIT(), \
590 .owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
593 static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
595 struct list_head *page_pool)
597 struct super_block *const sb = fe->inode->i_sb;
598 struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
599 struct erofs_map_blocks_iter *const m = &fe->m_iter;
600 struct erofs_map_blocks *const map = &m->map;
601 struct z_erofs_vle_work_builder *const builder = &fe->builder;
602 const loff_t offset = page_offset(page);
604 bool tight = builder_is_hooked(builder);
605 struct z_erofs_vle_work *work = builder->work;
607 #ifdef EROFS_FS_HAS_MANAGED_CACHE
608 struct address_space *const mngda = sbi->managed_cache->i_mapping;
609 struct z_erofs_vle_workgroup *grp;
610 bool noio_outoforder;
613 enum z_erofs_page_type page_type;
614 unsigned cur, end, spiltted, index;
617 /* register locked file pages as online pages in pack */
618 z_erofs_onlinepage_init(page);
625 /* lucky, within the range of the current map_blocks */
626 if (offset + cur >= map->m_la &&
627 offset + cur < map->m_la + map->m_llen) {
628 /* didn't get a valid unzip work previously (very rare) */
634 /* go ahead the next map_blocks */
635 debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
637 if (z_erofs_vle_work_iter_end(builder))
640 map->m_la = offset + cur;
642 err = erofs_map_blocks_iter(fe->inode, map, &m->mpage, 0);
647 if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
650 DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
651 DBG_BUGON(erofs_blkoff(map->m_pa));
653 err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
657 #ifdef EROFS_FS_HAS_MANAGED_CACHE
658 grp = fe->builder.grp;
660 /* let's do out-of-order decompression for noio */
661 noio_outoforder = grab_managed_cache_pages(mngda,
662 erofs_blknr(map->m_pa),
663 grp->compressed_pages, erofs_blknr(map->m_plen),
664 /* compressed page caching selection strategy */
665 fe->initial | (EROFS_FS_ZIP_CACHE_LVL >= 2 ?
666 map->m_la < fe->cachedzone_la : 0));
668 if (noio_outoforder && builder_is_followed(builder))
669 builder->role = Z_EROFS_VLE_WORK_PRIMARY;
672 tight &= builder_is_hooked(builder);
673 work = builder->work;
675 cur = end - min_t(unsigned, offset + end - map->m_la, end);
676 if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
677 zero_user_segment(page, cur, end);
681 /* let's derive page type */
682 page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
683 (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
684 (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
685 Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
688 tight &= builder_is_followed(builder);
691 err = z_erofs_vle_work_add_page(builder, page, page_type,
692 builder_is_followed(builder));
693 /* should allocate an additional staging page for pagevec */
694 if (err == -EAGAIN) {
695 struct page *const newpage =
696 __stagingpage_alloc(page_pool, GFP_NOFS);
698 err = z_erofs_vle_work_add_page(builder,
699 newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE, true);
707 index = page->index - map->m_la / PAGE_SIZE;
709 /* FIXME! avoid the last relundant fixup & endio */
710 z_erofs_onlinepage_fixup(page, index, true);
712 /* bump up the number of spiltted parts of a page */
714 /* also update nr_pages */
715 work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
717 /* can be used for verification */
718 map->m_llen = offset + cur - map->m_la;
725 /* FIXME! avoid the last relundant fixup & endio */
726 z_erofs_onlinepage_endio(page);
728 debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
729 __func__, page, spiltted, map->m_llen);
732 /* if some error occurred while processing this page */
738 static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
740 tagptr1_t t = tagptr_init(tagptr1_t, ptr);
741 struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
742 bool background = tagptr_unfold_tags(t);
747 spin_lock_irqsave(&io->u.wait.lock, flags);
748 if (!atomic_add_return(bios, &io->pending_bios))
749 wake_up_locked(&io->u.wait);
750 spin_unlock_irqrestore(&io->u.wait.lock, flags);
754 if (!atomic_add_return(bios, &io->pending_bios))
755 queue_work(z_erofs_workqueue, &io->u.work);
758 static inline void z_erofs_vle_read_endio(struct bio *bio)
760 const blk_status_t err = bio->bi_status;
762 struct bio_vec *bvec;
763 #ifdef EROFS_FS_HAS_MANAGED_CACHE
764 struct address_space *mngda = NULL;
767 bio_for_each_segment_all(bvec, bio, i) {
768 struct page *page = bvec->bv_page;
769 bool cachemngd = false;
771 DBG_BUGON(PageUptodate(page));
772 DBG_BUGON(!page->mapping);
774 #ifdef EROFS_FS_HAS_MANAGED_CACHE
775 if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) {
776 struct inode *const inode = page->mapping->host;
777 struct super_block *const sb = inode->i_sb;
779 mngda = EROFS_SB(sb)->managed_cache->i_mapping;
783 * If mngda has not gotten, it equals NULL,
784 * however, page->mapping never be NULL if working properly.
786 cachemngd = (page->mapping == mngda);
792 SetPageUptodate(page);
798 z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
802 static struct page *z_pagemap_global[Z_EROFS_VLE_VMAP_GLOBAL_PAGES];
803 static DEFINE_MUTEX(z_pagemap_global_lock);
805 static int z_erofs_vle_unzip(struct super_block *sb,
806 struct z_erofs_vle_workgroup *grp,
807 struct list_head *page_pool)
809 struct erofs_sb_info *const sbi = EROFS_SB(sb);
810 #ifdef EROFS_FS_HAS_MANAGED_CACHE
811 struct address_space *const mngda = sbi->managed_cache->i_mapping;
813 const unsigned clusterpages = erofs_clusterpages(sbi);
815 struct z_erofs_pagevec_ctor ctor;
816 unsigned int nr_pages;
817 unsigned int sparsemem_pages = 0;
818 struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
819 struct page **pages, **compressed_pages, *page;
822 enum z_erofs_page_type page_type;
824 struct z_erofs_vle_work *work;
829 work = z_erofs_vle_grab_primary_work(grp);
830 DBG_BUGON(!READ_ONCE(work->nr_pages));
832 mutex_lock(&work->lock);
833 nr_pages = work->nr_pages;
835 if (likely(nr_pages <= Z_EROFS_VLE_VMAP_ONSTACK_PAGES))
836 pages = pages_onstack;
837 else if (nr_pages <= Z_EROFS_VLE_VMAP_GLOBAL_PAGES &&
838 mutex_trylock(&z_pagemap_global_lock))
839 pages = z_pagemap_global;
842 pages = kvmalloc_array(nr_pages,
843 sizeof(struct page *), GFP_KERNEL);
845 /* fallback to global pagemap for the lowmem scenario */
846 if (unlikely(pages == NULL)) {
847 if (nr_pages > Z_EROFS_VLE_VMAP_GLOBAL_PAGES)
850 mutex_lock(&z_pagemap_global_lock);
851 pages = z_pagemap_global;
856 for (i = 0; i < nr_pages; ++i)
860 z_erofs_pagevec_ctor_init(&ctor,
861 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
863 for (i = 0; i < work->vcnt; ++i) {
866 page = z_erofs_pagevec_ctor_dequeue(&ctor, &page_type);
868 /* all pages in pagevec ought to be valid */
869 DBG_BUGON(page == NULL);
870 DBG_BUGON(page->mapping == NULL);
872 if (z_erofs_gather_if_stagingpage(page_pool, page))
875 if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
878 pagenr = z_erofs_onlinepage_index(page);
880 DBG_BUGON(pagenr >= nr_pages);
883 * currently EROFS doesn't support multiref(dedup),
884 * so here erroring out one multiref page.
888 SetPageError(pages[pagenr]);
889 z_erofs_onlinepage_endio(pages[pagenr]);
892 pages[pagenr] = page;
896 z_erofs_pagevec_ctor_exit(&ctor, true);
899 compressed_pages = grp->compressed_pages;
901 for (i = 0; i < clusterpages; ++i) {
904 page = compressed_pages[i];
906 /* all compressed pages ought to be valid */
907 DBG_BUGON(page == NULL);
908 DBG_BUGON(page->mapping == NULL);
910 if (!z_erofs_is_stagingpage(page)) {
911 #ifdef EROFS_FS_HAS_MANAGED_CACHE
912 if (page->mapping == mngda) {
913 if (unlikely(!PageUptodate(page)))
920 * only if non-head page can be selected
921 * for inplace decompression
923 pagenr = z_erofs_onlinepage_index(page);
925 DBG_BUGON(pagenr >= nr_pages);
928 SetPageError(pages[pagenr]);
929 z_erofs_onlinepage_endio(pages[pagenr]);
933 pages[pagenr] = page;
938 /* PG_error needs checking for inplaced and staging pages */
939 if (unlikely(PageError(page))) {
940 DBG_BUGON(PageUptodate(page));
948 llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
950 if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
951 err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
952 pages, nr_pages, work->pageofs);
956 if (llen > grp->llen)
959 err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
960 pages, llen, work->pageofs);
961 if (err != -ENOTSUPP)
964 if (sparsemem_pages >= nr_pages)
967 for (i = 0; i < nr_pages; ++i) {
968 if (pages[i] != NULL)
971 pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
975 vout = erofs_vmap(pages, nr_pages);
981 err = z_erofs_vle_unzip_vmap(compressed_pages,
982 clusterpages, vout, llen, work->pageofs, overlapped);
984 erofs_vunmap(vout, nr_pages);
987 /* must handle all compressed pages before endding pages */
988 for (i = 0; i < clusterpages; ++i) {
989 page = compressed_pages[i];
991 #ifdef EROFS_FS_HAS_MANAGED_CACHE
992 if (page->mapping == mngda)
995 /* recycle all individual staging pages */
996 (void)z_erofs_gather_if_stagingpage(page_pool, page);
998 WRITE_ONCE(compressed_pages[i], NULL);
1001 for (i = 0; i < nr_pages; ++i) {
1006 DBG_BUGON(page->mapping == NULL);
1008 /* recycle all individual staging pages */
1009 if (z_erofs_gather_if_stagingpage(page_pool, page))
1012 if (unlikely(err < 0))
1015 z_erofs_onlinepage_endio(page);
1018 if (pages == z_pagemap_global)
1019 mutex_unlock(&z_pagemap_global_lock);
1020 else if (unlikely(pages != pages_onstack))
1026 /* all work locks MUST be taken before the following line */
1028 WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_NIL);
1030 /* all work locks SHOULD be released right now */
1031 mutex_unlock(&work->lock);
1033 z_erofs_vle_work_release(work);
1037 static void z_erofs_vle_unzip_all(struct super_block *sb,
1038 struct z_erofs_vle_unzip_io *io,
1039 struct list_head *page_pool)
1041 z_erofs_vle_owned_workgrp_t owned = io->head;
1043 while (owned != Z_EROFS_VLE_WORKGRP_TAIL_CLOSED) {
1044 struct z_erofs_vle_workgroup *grp;
1046 /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
1047 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_TAIL);
1049 /* no possible that 'owned' equals NULL */
1050 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_NIL);
1053 owned = READ_ONCE(grp->next);
1055 z_erofs_vle_unzip(sb, grp, page_pool);
1059 static void z_erofs_vle_unzip_wq(struct work_struct *work)
1061 struct z_erofs_vle_unzip_io_sb *iosb = container_of(work,
1062 struct z_erofs_vle_unzip_io_sb, io.u.work);
1063 LIST_HEAD(page_pool);
1065 DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1066 z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
1068 put_pages_list(&page_pool);
1072 static inline struct z_erofs_vle_unzip_io *
1073 prepare_io_handler(struct super_block *sb,
1074 struct z_erofs_vle_unzip_io *io,
1077 struct z_erofs_vle_unzip_io_sb *iosb;
1080 /* waitqueue available for foreground io */
1083 init_waitqueue_head(&io->u.wait);
1084 atomic_set(&io->pending_bios, 0);
1091 /* allocate extra io descriptor for background io */
1092 iosb = kvzalloc(sizeof(struct z_erofs_vle_unzip_io_sb),
1093 GFP_KERNEL | __GFP_NOFAIL);
1094 BUG_ON(iosb == NULL);
1100 INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
1102 io->head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1106 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1107 /* true - unlocked (noio), false - locked (need submit io) */
1108 static inline bool recover_managed_page(struct z_erofs_vle_workgroup *grp,
1111 wait_on_page_locked(page);
1112 if (PagePrivate(page) && PageUptodate(page))
1116 ClearPageError(page);
1118 if (unlikely(!PagePrivate(page))) {
1119 set_page_private(page, (unsigned long)grp);
1120 SetPagePrivate(page);
1122 if (unlikely(PageUptodate(page))) {
1134 static bool z_erofs_vle_submit_all(struct super_block *sb,
1135 z_erofs_vle_owned_workgrp_t owned_head,
1136 struct list_head *pagepool,
1137 struct z_erofs_vle_unzip_io *fg_io,
1140 struct erofs_sb_info *const sbi = EROFS_SB(sb);
1141 const unsigned clusterpages = erofs_clusterpages(sbi);
1142 const gfp_t gfp = GFP_NOFS;
1143 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1144 struct address_space *const mngda = sbi->managed_cache->i_mapping;
1145 struct z_erofs_vle_workgroup *lstgrp_noio = NULL, *lstgrp_io = NULL;
1147 struct z_erofs_vle_unzip_io *ios[1 + __FSIO_1];
1149 tagptr1_t bi_private;
1150 /* since bio will be NULL, no need to initialize last_index */
1151 pgoff_t uninitialized_var(last_index);
1152 bool force_submit = false;
1155 if (unlikely(owned_head == Z_EROFS_VLE_WORKGRP_TAIL))
1159 * force_fg == 1, (io, fg_io[0]) no io, (io, fg_io[1]) need submit io
1160 * force_fg == 0, (io, fg_io[0]) no io; (io[1], bg_io) need submit io
1162 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1163 ios[0] = prepare_io_handler(sb, fg_io + 0, false);
1167 ios[__FSIO_1] = prepare_io_handler(sb, fg_io + __FSIO_1, false);
1168 bi_private = tagptr_fold(tagptr1_t, ios[__FSIO_1], 0);
1170 ios[__FSIO_1] = prepare_io_handler(sb, NULL, true);
1171 bi_private = tagptr_fold(tagptr1_t, ios[__FSIO_1], 1);
1175 force_submit = false;
1178 /* by default, all need io submission */
1179 ios[__FSIO_1]->head = owned_head;
1182 struct z_erofs_vle_workgroup *grp;
1183 struct page **compressed_pages, *oldpage, *page;
1184 pgoff_t first_index;
1186 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1187 unsigned int noio = 0;
1192 /* no possible 'owned_head' equals the following */
1193 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1194 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_NIL);
1198 /* close the main owned chain at first */
1199 owned_head = cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
1200 Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1202 first_index = grp->obj.index;
1203 compressed_pages = grp->compressed_pages;
1205 force_submit |= (first_index != last_index + 1);
1207 /* fulfill all compressed pages */
1208 oldpage = page = READ_ONCE(compressed_pages[i]);
1210 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1213 if (page == EROFS_UNALLOCATED_CACHED_PAGE) {
1216 } else if (page != NULL) {
1217 if (page->mapping != mngda)
1218 BUG_ON(PageUptodate(page));
1219 else if (recover_managed_page(grp, page)) {
1220 /* page is uptodate, skip io submission */
1221 force_submit = true;
1229 BUG_ON(PageUptodate(page));
1232 page = __stagingpage_alloc(pagepool, gfp);
1234 if (oldpage != cmpxchg(compressed_pages + i,
1236 list_add(&page->lru, pagepool);
1238 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1239 } else if (cachemngd && !add_to_page_cache_lru(page,
1240 mngda, first_index + i, gfp)) {
1241 set_page_private(page, (unsigned long)grp);
1242 SetPagePrivate(page);
1247 if (bio != NULL && force_submit) {
1249 __submit_bio(bio, REQ_OP_READ, 0);
1254 bio = prepare_bio(sb, first_index + i,
1255 BIO_MAX_PAGES, z_erofs_vle_read_endio);
1256 bio->bi_private = tagptr_cast_ptr(bi_private);
1261 err = bio_add_page(bio, page, PAGE_SIZE, 0);
1262 if (err < PAGE_SIZE)
1263 goto submit_bio_retry;
1265 force_submit = false;
1266 last_index = first_index + i;
1267 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1270 if (++i < clusterpages)
1273 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1274 if (noio < clusterpages) {
1277 z_erofs_vle_owned_workgrp_t iogrp_next =
1278 owned_head == Z_EROFS_VLE_WORKGRP_TAIL ?
1279 Z_EROFS_VLE_WORKGRP_TAIL_CLOSED :
1282 if (lstgrp_io == NULL)
1283 ios[1]->head = iogrp_next;
1285 WRITE_ONCE(lstgrp_io->next, iogrp_next);
1287 if (lstgrp_noio == NULL)
1290 WRITE_ONCE(lstgrp_noio->next, grp);
1295 } while (owned_head != Z_EROFS_VLE_WORKGRP_TAIL);
1298 __submit_bio(bio, REQ_OP_READ, 0);
1300 #ifndef EROFS_FS_HAS_MANAGED_CACHE
1303 if (lstgrp_noio != NULL)
1304 WRITE_ONCE(lstgrp_noio->next, Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1306 if (!force_fg && !nr_bios) {
1307 kvfree(container_of(ios[1],
1308 struct z_erofs_vle_unzip_io_sb, io));
1313 z_erofs_vle_unzip_kickoff(tagptr_cast_ptr(bi_private), nr_bios);
1317 static void z_erofs_submit_and_unzip(struct z_erofs_vle_frontend *f,
1318 struct list_head *pagepool,
1321 struct super_block *sb = f->inode->i_sb;
1322 struct z_erofs_vle_unzip_io io[1 + __FSIO_1];
1324 if (!z_erofs_vle_submit_all(sb, f->owned_head, pagepool, io, force_fg))
1327 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1328 z_erofs_vle_unzip_all(sb, &io[0], pagepool);
1333 /* wait until all bios are completed */
1334 wait_event(io[__FSIO_1].u.wait,
1335 !atomic_read(&io[__FSIO_1].pending_bios));
1337 /* let's synchronous decompression */
1338 z_erofs_vle_unzip_all(sb, &io[__FSIO_1], pagepool);
1341 static int z_erofs_vle_normalaccess_readpage(struct file *file,
1344 struct inode *const inode = page->mapping->host;
1345 struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1347 LIST_HEAD(pagepool);
1349 #if (EROFS_FS_ZIP_CACHE_LVL >= 2)
1350 f.cachedzone_la = page->index << PAGE_SHIFT;
1352 err = z_erofs_do_read_page(&f, page, &pagepool);
1353 (void)z_erofs_vle_work_iter_end(&f.builder);
1355 /* if some compressed cluster ready, need submit them anyway */
1356 z_erofs_submit_and_unzip(&f, &pagepool, true);
1359 errln("%s, failed to read, err [%d]", __func__, err);
1361 if (f.m_iter.mpage != NULL)
1362 put_page(f.m_iter.mpage);
1364 /* clean up the remaining free pages */
1365 put_pages_list(&pagepool);
1369 static inline int __z_erofs_vle_normalaccess_readpages(
1371 struct address_space *mapping,
1372 struct list_head *pages, unsigned nr_pages, bool sync)
1374 struct inode *const inode = mapping->host;
1376 struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1377 gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
1378 struct page *head = NULL;
1379 LIST_HEAD(pagepool);
1381 #if (EROFS_FS_ZIP_CACHE_LVL >= 2)
1382 f.cachedzone_la = lru_to_page(pages)->index << PAGE_SHIFT;
1384 for (; nr_pages; --nr_pages) {
1385 struct page *page = lru_to_page(pages);
1387 prefetchw(&page->flags);
1388 list_del(&page->lru);
1390 if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
1391 list_add(&page->lru, &pagepool);
1395 set_page_private(page, (unsigned long)head);
1399 while (head != NULL) {
1400 struct page *page = head;
1403 /* traversal in reverse order */
1404 head = (void *)page_private(page);
1406 err = z_erofs_do_read_page(&f, page, &pagepool);
1408 struct erofs_vnode *vi = EROFS_V(inode);
1410 errln("%s, readahead error at page %lu of nid %llu",
1411 __func__, page->index, vi->nid);
1417 (void)z_erofs_vle_work_iter_end(&f.builder);
1419 z_erofs_submit_and_unzip(&f, &pagepool, sync);
1421 if (f.m_iter.mpage != NULL)
1422 put_page(f.m_iter.mpage);
1424 /* clean up the remaining free pages */
1425 put_pages_list(&pagepool);
1429 static int z_erofs_vle_normalaccess_readpages(
1431 struct address_space *mapping,
1432 struct list_head *pages, unsigned nr_pages)
1434 return __z_erofs_vle_normalaccess_readpages(filp,
1435 mapping, pages, nr_pages,
1436 nr_pages < 4 /* sync */);
1439 const struct address_space_operations z_erofs_vle_normalaccess_aops = {
1440 .readpage = z_erofs_vle_normalaccess_readpage,
1441 .readpages = z_erofs_vle_normalaccess_readpages,
1444 #define __vle_cluster_advise(x, bit, bits) \
1445 ((le16_to_cpu(x) >> (bit)) & ((1 << (bits)) - 1))
1447 #define __vle_cluster_type(advise) __vle_cluster_advise(advise, \
1448 Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT, Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS)
1451 Z_EROFS_VLE_CLUSTER_TYPE_PLAIN,
1452 Z_EROFS_VLE_CLUSTER_TYPE_HEAD,
1453 Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD,
1454 Z_EROFS_VLE_CLUSTER_TYPE_RESERVED,
1455 Z_EROFS_VLE_CLUSTER_TYPE_MAX
1458 #define vle_cluster_type(di) \
1459 __vle_cluster_type((di)->di_advise)
1461 static inline unsigned
1462 vle_compressed_index_clusterofs(unsigned clustersize,
1463 struct z_erofs_vle_decompressed_index *di)
1465 debugln("%s, vle=%pK, advise=%x (type %u), clusterofs=%x blkaddr=%x",
1466 __func__, di, di->di_advise, vle_cluster_type(di),
1467 di->di_clusterofs, di->di_u.blkaddr);
1469 switch (vle_cluster_type(di)) {
1470 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1472 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1473 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1474 return di->di_clusterofs;
1481 static inline erofs_blk_t
1482 vle_extent_blkaddr(struct inode *inode, pgoff_t index)
1484 struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1485 struct erofs_vnode *vi = EROFS_V(inode);
1487 unsigned ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1488 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1489 index * sizeof(struct z_erofs_vle_decompressed_index);
1491 return erofs_blknr(iloc(sbi, vi->nid) + ofs);
1494 static inline unsigned int
1495 vle_extent_blkoff(struct inode *inode, pgoff_t index)
1497 struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1498 struct erofs_vnode *vi = EROFS_V(inode);
1500 unsigned ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1501 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1502 index * sizeof(struct z_erofs_vle_decompressed_index);
1504 return erofs_blkoff(iloc(sbi, vi->nid) + ofs);
1508 * Variable-sized Logical Extent (Fixed Physical Cluster) Compression Mode
1510 * VLE compression mode attempts to compress a number of logical data into
1511 * a physical cluster with a fixed size.
1512 * VLE compression mode uses "struct z_erofs_vle_decompressed_index".
1514 static erofs_off_t vle_get_logical_extent_head(
1515 struct inode *inode,
1516 struct page **page_iter,
1518 unsigned lcn, /* logical cluster number */
1522 /* for extent meta */
1523 struct page *page = *page_iter;
1524 erofs_blk_t blkaddr = vle_extent_blkaddr(inode, lcn);
1525 struct z_erofs_vle_decompressed_index *di;
1526 unsigned long long ofs;
1527 const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
1528 const unsigned int clustersize = 1 << clusterbits;
1529 unsigned int delta0;
1531 if (page->index != blkaddr) {
1532 kunmap_atomic(*kaddr_iter);
1536 *page_iter = page = erofs_get_meta_page(inode->i_sb,
1538 *kaddr_iter = kmap_atomic(page);
1541 di = *kaddr_iter + vle_extent_blkoff(inode, lcn);
1542 switch (vle_cluster_type(di)) {
1543 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1544 delta0 = le16_to_cpu(di->di_u.delta[0]);
1546 DBG_BUGON(lcn < delta0);
1548 ofs = vle_get_logical_extent_head(inode,
1549 page_iter, kaddr_iter,
1550 lcn - delta0, pcn, flags);
1552 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1553 *flags ^= EROFS_MAP_ZIPPED;
1554 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1555 /* clustersize should be a power of two */
1556 ofs = ((unsigned long long)lcn << clusterbits) +
1557 (le16_to_cpu(di->di_clusterofs) & (clustersize - 1));
1558 *pcn = le32_to_cpu(di->di_u.blkaddr);
1566 int z_erofs_map_blocks_iter(struct inode *inode,
1567 struct erofs_map_blocks *map,
1568 struct page **mpage_ret, int flags)
1570 /* logicial extent (start, end) offset */
1571 unsigned long long ofs, end;
1572 struct z_erofs_vle_decompressed_index *di;
1573 erofs_blk_t e_blkaddr, pcn;
1574 unsigned lcn, logical_cluster_ofs, cluster_type;
1576 struct page *mpage = *mpage_ret;
1579 const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
1580 const unsigned int clustersize = 1 << clusterbits;
1583 /* if both m_(l,p)len are 0, regularize l_lblk, l_lofs, etc... */
1584 initial = !map->m_llen;
1586 /* when trying to read beyond EOF, leave it unmapped */
1587 if (unlikely(map->m_la >= inode->i_size)) {
1589 map->m_llen = map->m_la + 1 - inode->i_size;
1590 map->m_la = inode->i_size - 1;
1595 debugln("%s, m_la %llu m_llen %llu --- start", __func__,
1596 map->m_la, map->m_llen);
1598 ofs = map->m_la + map->m_llen;
1600 /* clustersize should be power of two */
1601 lcn = ofs >> clusterbits;
1602 ofs_rem = ofs & (clustersize - 1);
1604 e_blkaddr = vle_extent_blkaddr(inode, lcn);
1606 if (mpage == NULL || mpage->index != e_blkaddr) {
1610 mpage = erofs_get_meta_page(inode->i_sb, e_blkaddr, false);
1614 DBG_BUGON(!PageUptodate(mpage));
1617 kaddr = kmap_atomic(mpage);
1618 di = kaddr + vle_extent_blkoff(inode, lcn);
1620 debugln("%s, lcn %u e_blkaddr %u e_blkoff %u", __func__, lcn,
1621 e_blkaddr, vle_extent_blkoff(inode, lcn));
1623 logical_cluster_ofs = vle_compressed_index_clusterofs(clustersize, di);
1625 /* [walking mode] 'map' has been already initialized */
1626 map->m_llen += logical_cluster_ofs;
1630 /* by default, compressed */
1631 map->m_flags |= EROFS_MAP_ZIPPED;
1633 end = (u64)(lcn + 1) * clustersize;
1635 cluster_type = vle_cluster_type(di);
1637 switch (cluster_type) {
1638 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1639 if (ofs_rem >= logical_cluster_ofs)
1640 map->m_flags ^= EROFS_MAP_ZIPPED;
1642 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1643 if (ofs_rem == logical_cluster_ofs) {
1644 pcn = le32_to_cpu(di->di_u.blkaddr);
1648 if (ofs_rem > logical_cluster_ofs) {
1649 ofs = lcn * clustersize | logical_cluster_ofs;
1650 pcn = le32_to_cpu(di->di_u.blkaddr);
1654 /* logical cluster number should be >= 1 */
1655 if (unlikely(!lcn)) {
1656 errln("invalid logical cluster 0 at nid %llu",
1657 EROFS_V(inode)->nid);
1661 end = (lcn-- * clustersize) | logical_cluster_ofs;
1663 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1664 /* get the correspoinding first chunk */
1665 ofs = vle_get_logical_extent_head(inode, mpage_ret,
1666 &kaddr, lcn, &pcn, &map->m_flags);
1670 errln("unknown cluster type %u at offset %llu of nid %llu",
1671 cluster_type, ofs, EROFS_V(inode)->nid);
1678 map->m_llen = end - ofs;
1679 map->m_plen = clustersize;
1680 map->m_pa = blknr_to_addr(pcn);
1681 map->m_flags |= EROFS_MAP_MAPPED;
1683 kunmap_atomic(kaddr);
1686 debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
1687 __func__, map->m_la, map->m_pa,
1688 map->m_llen, map->m_plen, map->m_flags);
1690 /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */