1 /* SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/unzip_pagevec.h
5 * Copyright (C) 2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
13 #ifndef __EROFS_UNZIP_PAGEVEC_H
14 #define __EROFS_UNZIP_PAGEVEC_H
16 #include <linux/tagptr.h>
18 /* page type in pagevec for unzip subsystem */
19 enum z_erofs_page_type {
20 /* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */
21 Z_EROFS_PAGE_TYPE_EXCLUSIVE,
23 Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED,
25 Z_EROFS_VLE_PAGE_TYPE_HEAD,
26 Z_EROFS_VLE_PAGE_TYPE_MAX
29 extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0")
30 __bad_page_type_exclusive(void);
32 /* pagevec tagged pointer */
33 typedef tagptr2_t erofs_vtptr_t;
35 /* pagevec collector */
36 struct z_erofs_pagevec_ctor {
37 struct page *curr, *next;
40 unsigned int nr, index;
43 static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor,
46 if (ctor->curr == NULL)
50 kunmap_atomic(ctor->pages);
55 static inline struct page *
56 z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor,
61 /* keep away from occupied pages */
62 if (ctor->next != NULL)
65 for (index = 0; index < nr; ++index) {
66 const erofs_vtptr_t t = ctor->pages[index];
67 const unsigned tags = tagptr_unfold_tags(t);
69 if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE)
70 return tagptr_unfold_ptr(t);
73 if (unlikely(nr >= ctor->nr))
80 z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor,
83 struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr);
85 z_erofs_pagevec_ctor_exit(ctor, atomic);
89 ctor->pages = atomic ?
90 kmap_atomic(ctor->curr) : kmap(ctor->curr);
92 ctor->nr = PAGE_SIZE / sizeof(struct page *);
96 static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor,
98 erofs_vtptr_t *pages, unsigned i)
101 ctor->curr = ctor->next = NULL;
106 z_erofs_pagevec_ctor_pagedown(ctor, false);
107 while (i > ctor->nr) {
109 z_erofs_pagevec_ctor_pagedown(ctor, false);
113 ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i);
118 z_erofs_pagevec_ctor_enqueue(struct z_erofs_pagevec_ctor *ctor,
120 enum z_erofs_page_type type,
124 /* some pages cannot be reused as pvec safely without I/O */
125 if (type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && !pvec_safereuse)
126 type = Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED;
128 if (type != Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
129 ctor->index + 1 == ctor->nr)
133 if (unlikely(ctor->index >= ctor->nr))
134 z_erofs_pagevec_ctor_pagedown(ctor, false);
136 /* exclusive page type must be 0 */
137 if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL)
138 __bad_page_type_exclusive();
140 /* should remind that collector->next never equal to 1, 2 */
141 if (type == (uintptr_t)ctor->next) {
145 ctor->pages[ctor->index++] =
146 tagptr_fold(erofs_vtptr_t, page, type);
150 static inline struct page *
151 z_erofs_pagevec_ctor_dequeue(struct z_erofs_pagevec_ctor *ctor,
152 enum z_erofs_page_type *type)
156 if (unlikely(ctor->index >= ctor->nr)) {
157 DBG_BUGON(!ctor->next);
158 z_erofs_pagevec_ctor_pagedown(ctor, true);
161 t = ctor->pages[ctor->index];
163 *type = tagptr_unfold_tags(t);
165 /* should remind that collector->next never equal to 1, 2 */
166 if (*type == (uintptr_t)ctor->next)
167 ctor->next = tagptr_unfold_ptr(t);
169 ctor->pages[ctor->index++] =
170 tagptr_fold(erofs_vtptr_t, NULL, 0);
172 return tagptr_unfold_ptr(t);