1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/unzip_vle_lz4.c
5 * Copyright (C) 2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
13 #include "unzip_vle.h"
15 #if Z_EROFS_CLUSTER_MAX_PAGES > Z_EROFS_VLE_INLINE_PAGEVECS
16 #define EROFS_PERCPU_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES
18 #define EROFS_PERCPU_NR_PAGES Z_EROFS_VLE_INLINE_PAGEVECS
22 char data[PAGE_SIZE * EROFS_PERCPU_NR_PAGES];
23 } erofs_pcpubuf[NR_CPUS];
25 int z_erofs_vle_plain_copy(struct page **compressed_pages,
26 unsigned clusterpages,
29 unsigned short pageofs)
33 const unsigned righthalf = PAGE_SIZE - pageofs;
35 bool mirrored[Z_EROFS_CLUSTER_MAX_PAGES] = { 0 };
38 percpu_data = erofs_pcpubuf[smp_processor_id()].data;
41 for (i = 0; i < nr_pages; j = i++) {
42 struct page *page = pages[i];
54 dst = kmap_atomic(page);
56 for (; j < clusterpages; ++j) {
57 if (compressed_pages[j] != page)
60 DBG_BUGON(mirrored[j]);
61 memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE);
69 percpu_data + (i-1) * PAGE_SIZE :
70 kmap_atomic(compressed_pages[i-1]);
72 memcpy(dst, src + righthalf, pageofs);
77 if (unlikely(i >= clusterpages)) {
86 src = mirrored[i] ? percpu_data + i * PAGE_SIZE :
87 kmap_atomic(compressed_pages[i]);
89 memcpy(dst + pageofs, src, righthalf);
95 if (src != NULL && !mirrored[j])
102 extern int z_erofs_unzip_lz4(void *in, void *out, size_t inlen, size_t outlen);
104 int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
105 unsigned clusterpages,
108 unsigned short pageofs)
111 unsigned nr_pages, i, j;
114 if (outlen + pageofs > EROFS_PERCPU_NR_PAGES * PAGE_SIZE)
117 nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
119 if (clusterpages == 1) {
120 vin = kmap_atomic(compressed_pages[0]);
122 vin = erofs_vmap(compressed_pages, clusterpages);
128 vout = erofs_pcpubuf[smp_processor_id()].data;
130 ret = z_erofs_unzip_lz4(vin, vout + pageofs,
131 clusterpages * PAGE_SIZE, outlen);
137 for (i = 0; i < nr_pages; ++i) {
138 j = min((unsigned)PAGE_SIZE - pageofs, outlen);
140 if (pages[i] != NULL) {
141 if (clusterpages == 1 &&
142 pages[i] == compressed_pages[0]) {
143 memcpy(vin + pageofs, vout + pageofs, j);
145 void *dst = kmap_atomic(pages[i]);
147 memcpy(dst + pageofs, vout + pageofs, j);
159 if (clusterpages == 1)
162 erofs_vunmap(vin, clusterpages);
167 int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
168 unsigned clusterpages,
171 unsigned short pageofs,
180 vin = erofs_pcpubuf[smp_processor_id()].data;
182 for (i = 0; i < clusterpages; ++i) {
183 void *t = kmap_atomic(compressed_pages[i]);
185 memcpy(vin + PAGE_SIZE *i, t, PAGE_SIZE);
188 } else if (clusterpages == 1)
189 vin = kmap_atomic(compressed_pages[0]);
191 vin = erofs_vmap(compressed_pages, clusterpages);
194 ret = z_erofs_unzip_lz4(vin, vout + pageofs,
195 clusterpages * PAGE_SIZE, llen);
200 if (clusterpages == 1)
203 erofs_vunmap(vin, clusterpages);