1 /* SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/unzip_vle.h
5 * Copyright (C) 2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
13 #ifndef __EROFS_FS_UNZIP_VLE_H
14 #define __EROFS_FS_UNZIP_VLE_H
17 #include "unzip_pagevec.h"
20 * - 0x5A110C8D ('sallocated', Z_EROFS_MAPPING_STAGING) -
21 * used for temporary allocated pages (via erofs_allocpage),
22 * in order to seperate those from NULL mapping (eg. truncated pages)
24 #define Z_EROFS_MAPPING_STAGING ((void *)0x5A110C8D)
26 #define z_erofs_is_stagingpage(page) \
27 ((page)->mapping == Z_EROFS_MAPPING_STAGING)
29 static inline bool z_erofs_gather_if_stagingpage(struct list_head *page_pool,
32 if (z_erofs_is_stagingpage(page)) {
33 list_add(&page->lru, page_pool);
40 * Structure fields follow one of the following exclusion rules.
42 * I: Modifiable by initialization/destruction paths and read-only
47 #define Z_EROFS_VLE_INLINE_PAGEVECS 3
49 struct z_erofs_vle_work {
52 /* I: decompression offset in page */
53 unsigned short pageofs;
54 unsigned short nr_pages;
56 /* L: queued pages in pagevec[] */
61 erofs_vtptr_t pagevec[Z_EROFS_VLE_INLINE_PAGEVECS];
66 #define Z_EROFS_VLE_WORKGRP_FMT_PLAIN 0
67 #define Z_EROFS_VLE_WORKGRP_FMT_LZ4 1
68 #define Z_EROFS_VLE_WORKGRP_FMT_MASK 1
70 typedef struct z_erofs_vle_workgroup *z_erofs_vle_owned_workgrp_t;
72 struct z_erofs_vle_workgroup {
73 struct erofs_workgroup obj;
74 struct z_erofs_vle_work work;
76 /* next owned workgroup */
77 z_erofs_vle_owned_workgrp_t next;
79 /* compressed pages (including multi-usage pages) */
80 struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES];
81 unsigned int llen, flags;
84 /* let's avoid the valid 32-bit kernel addresses */
86 /* the chained workgroup has't submitted io (still open) */
87 #define Z_EROFS_VLE_WORKGRP_TAIL ((void *)0x5F0ECAFE)
88 /* the chained workgroup has already submitted io */
89 #define Z_EROFS_VLE_WORKGRP_TAIL_CLOSED ((void *)0x5F0EDEAD)
91 #define Z_EROFS_VLE_WORKGRP_NIL (NULL)
93 #define z_erofs_vle_workgrp_fmt(grp) \
94 ((grp)->flags & Z_EROFS_VLE_WORKGRP_FMT_MASK)
96 static inline void z_erofs_vle_set_workgrp_fmt(
97 struct z_erofs_vle_workgroup *grp,
100 grp->flags = fmt | (grp->flags & ~Z_EROFS_VLE_WORKGRP_FMT_MASK);
104 /* definitions if multiref is disabled */
105 #define z_erofs_vle_grab_primary_work(grp) (&(grp)->work)
106 #define z_erofs_vle_grab_work(grp, pageofs) (&(grp)->work)
107 #define z_erofs_vle_work_workgroup(wrk, primary) \
108 ((primary) ? container_of(wrk, \
109 struct z_erofs_vle_workgroup, work) : \
110 ({ BUG(); (void *)NULL; }))
113 #define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_vle_workgroup)
115 struct z_erofs_vle_unzip_io {
116 atomic_t pending_bios;
117 z_erofs_vle_owned_workgrp_t head;
120 wait_queue_head_t wait;
121 struct work_struct work;
125 struct z_erofs_vle_unzip_io_sb {
126 struct z_erofs_vle_unzip_io io;
127 struct super_block *sb;
130 #define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
131 #define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
132 #define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS)
135 * waiters (aka. ongoing_packs): # to unlock the page
136 * sub-index: 0 - for partial page, >= 1 full page sub-index
138 typedef atomic_t z_erofs_onlinepage_t;
141 union z_erofs_onlinepage_converter {
142 z_erofs_onlinepage_t *o;
146 static inline unsigned z_erofs_onlinepage_index(struct page *page)
148 union z_erofs_onlinepage_converter u;
150 BUG_ON(!PagePrivate(page));
151 u.v = &page_private(page);
153 return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
156 static inline void z_erofs_onlinepage_init(struct page *page)
159 z_erofs_onlinepage_t o;
161 /* keep from being unlocked in advance */
162 } u = { .o = ATOMIC_INIT(1) };
164 set_page_private(page, u.v);
166 SetPagePrivate(page);
169 static inline void z_erofs_onlinepage_fixup(struct page *page,
170 uintptr_t index, bool down)
172 union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
173 int orig, orig_index, val;
176 orig = atomic_read(u.o);
177 orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
182 DBG_BUGON(orig_index != index);
185 val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
186 ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
187 if (atomic_cmpxchg(u.o, orig, val) != orig)
191 static inline void z_erofs_onlinepage_endio(struct page *page)
193 union z_erofs_onlinepage_converter u;
196 BUG_ON(!PagePrivate(page));
197 u.v = &page_private(page);
199 v = atomic_dec_return(u.o);
200 if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
201 ClearPagePrivate(page);
202 if (!PageError(page))
203 SetPageUptodate(page);
207 debugln("%s, page %p value %x", __func__, page, atomic_read(u.o));
210 #define Z_EROFS_VLE_VMAP_ONSTACK_PAGES \
211 min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
212 #define Z_EROFS_VLE_VMAP_GLOBAL_PAGES 2048
214 /* unzip_vle_lz4.c */
215 extern int z_erofs_vle_plain_copy(struct page **compressed_pages,
216 unsigned clusterpages, struct page **pages,
217 unsigned nr_pages, unsigned short pageofs);
219 extern int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
220 unsigned clusterpages, struct page **pages,
221 unsigned int outlen, unsigned short pageofs);
223 extern int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
224 unsigned clusterpages, void *vaddr, unsigned llen,
225 unsigned short pageofs, bool overlapped);