1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/inode.c
5 * Copyright (C) 2017-2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
15 #include <trace/events/erofs.h>
18 * if inode is successfully read, return its inode page (or sometimes
19 * the inode payload page if it's an extended inode) in order to fill
20 * inline data if possible.
22 static struct page *read_inode(struct inode *inode, unsigned int *ofs)
24 struct super_block *sb = inode->i_sb;
25 struct erofs_sb_info *sbi = EROFS_SB(sb);
26 struct erofs_vnode *vi = EROFS_V(inode);
27 const erofs_off_t inode_loc = iloc(sbi, vi->nid);
30 struct erofs_inode_v1 *v1;
31 struct erofs_inode_v2 *v2, *copied = NULL;
35 blkaddr = erofs_blknr(inode_loc);
36 *ofs = erofs_blkoff(inode_loc);
38 debugln("%s, reading inode nid %llu at %u of blkaddr %u",
39 __func__, vi->nid, *ofs, blkaddr);
41 page = erofs_get_meta_page(sb, blkaddr, false);
43 errln("failed to get inode (nid: %llu) page, err %ld",
44 vi->nid, PTR_ERR(page));
48 v1 = page_address(page) + *ofs;
49 ifmt = le16_to_cpu(v1->i_advise);
51 if (ifmt & ~EROFS_I_ALL) {
52 errln("unsupported i_format %u of nid %llu", ifmt, vi->nid);
57 vi->data_mapping_mode = __inode_data_mapping(ifmt);
58 if (unlikely(vi->data_mapping_mode >= EROFS_INODE_LAYOUT_MAX)) {
59 errln("unknown data mapping mode %u of nid %llu",
60 vi->data_mapping_mode, vi->nid);
65 switch (__inode_version(ifmt)) {
66 case EROFS_INODE_LAYOUT_V2:
67 vi->inode_isize = sizeof(struct erofs_inode_v2);
68 /* check if the inode acrosses page boundary */
69 if (*ofs + vi->inode_isize <= PAGE_SIZE) {
70 *ofs += vi->inode_isize;
71 v2 = (struct erofs_inode_v2 *)v1;
73 const unsigned int gotten = PAGE_SIZE - *ofs;
75 copied = kmalloc(vi->inode_isize, GFP_NOFS);
80 memcpy(copied, v1, gotten);
84 page = erofs_get_meta_page(sb, blkaddr + 1, false);
86 errln("failed to get inode payload page (nid: %llu), err %ld",
87 vi->nid, PTR_ERR(page));
91 *ofs = vi->inode_isize - gotten;
92 memcpy((u8 *)copied + gotten, page_address(page), *ofs);
95 vi->xattr_isize = ondisk_xattr_ibody_size(v2->i_xattr_icount);
97 inode->i_mode = le16_to_cpu(v2->i_mode);
98 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
99 S_ISLNK(inode->i_mode)) {
100 vi->raw_blkaddr = le32_to_cpu(v2->i_u.raw_blkaddr);
101 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
103 new_decode_dev(le32_to_cpu(v2->i_u.rdev));
104 } else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
110 i_uid_write(inode, le32_to_cpu(v2->i_uid));
111 i_gid_write(inode, le32_to_cpu(v2->i_gid));
112 set_nlink(inode, le32_to_cpu(v2->i_nlink));
114 /* extended inode has its own timestamp */
115 inode->i_ctime.tv_sec = le64_to_cpu(v2->i_ctime);
116 inode->i_ctime.tv_nsec = le32_to_cpu(v2->i_ctime_nsec);
118 inode->i_size = le64_to_cpu(v2->i_size);
121 case EROFS_INODE_LAYOUT_V1:
122 vi->inode_isize = sizeof(struct erofs_inode_v1);
123 *ofs += vi->inode_isize;
124 vi->xattr_isize = ondisk_xattr_ibody_size(v1->i_xattr_icount);
126 inode->i_mode = le16_to_cpu(v1->i_mode);
127 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
128 S_ISLNK(inode->i_mode)) {
129 vi->raw_blkaddr = le32_to_cpu(v1->i_u.raw_blkaddr);
130 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
132 new_decode_dev(le32_to_cpu(v1->i_u.rdev));
133 } else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
139 i_uid_write(inode, le16_to_cpu(v1->i_uid));
140 i_gid_write(inode, le16_to_cpu(v1->i_gid));
141 set_nlink(inode, le16_to_cpu(v1->i_nlink));
143 /* use build time for compact inodes */
144 inode->i_ctime.tv_sec = sbi->build_time;
145 inode->i_ctime.tv_nsec = sbi->build_time_nsec;
147 inode->i_size = le32_to_cpu(v1->i_size);
150 errln("unsupported on-disk inode version %u of nid %llu",
151 __inode_version(ifmt), vi->nid);
156 inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
157 inode->i_atime.tv_sec = inode->i_ctime.tv_sec;
158 inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec;
159 inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec;
161 /* measure inode.i_blocks as the generic filesystem */
162 inode->i_blocks = ((inode->i_size - 1) >> 9) + 1;
165 errln("bogus i_mode (%o) @ nid %llu", inode->i_mode, vi->nid);
176 * try_lock can be required since locking order is:
177 * file data(fs_inode)
179 * but the majority of the callers is "iget",
180 * in that case we are pretty sure no deadlock since
181 * no data operations exist. However I tend to
182 * try_lock since it takes no much overhead and
183 * will success immediately.
185 static int fill_inline_data(struct inode *inode, void *data, unsigned m_pofs)
187 struct erofs_vnode *vi = EROFS_V(inode);
188 struct erofs_sb_info *sbi = EROFS_I_SB(inode);
189 int mode = vi->data_mapping_mode;
191 DBG_BUGON(mode >= EROFS_INODE_LAYOUT_MAX);
193 /* should be inode inline C */
194 if (mode != EROFS_INODE_LAYOUT_INLINE)
197 /* fast symlink (following ext4) */
198 if (S_ISLNK(inode->i_mode) && inode->i_size < PAGE_SIZE) {
199 char *lnk = erofs_kmalloc(sbi, inode->i_size + 1, GFP_KERNEL);
201 if (unlikely(lnk == NULL))
204 m_pofs += vi->xattr_isize;
206 /* inline symlink data shouldn't across page boundary as well */
207 if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) {
213 /* get in-page inline data */
214 memcpy(lnk, data + m_pofs, inode->i_size);
215 lnk[inode->i_size] = '\0';
218 set_inode_fast_symlink(inode);
223 static int fill_inode(struct inode *inode, int isdir)
229 trace_erofs_fill_inode(inode, isdir);
231 /* read inode base data from disk */
232 page = read_inode(inode, &ofs);
234 return PTR_ERR(page);
236 /* setup the new inode */
237 if (S_ISREG(inode->i_mode)) {
238 #ifdef CONFIG_EROFS_FS_XATTR
239 inode->i_op = &erofs_generic_xattr_iops;
241 inode->i_fop = &generic_ro_fops;
242 } else if (S_ISDIR(inode->i_mode)) {
244 #ifdef CONFIG_EROFS_FS_XATTR
245 &erofs_dir_xattr_iops;
249 inode->i_fop = &erofs_dir_fops;
250 } else if (S_ISLNK(inode->i_mode)) {
251 /* by default, page_get_link is used for symlink */
253 #ifdef CONFIG_EROFS_FS_XATTR
254 &erofs_symlink_xattr_iops,
256 &page_symlink_inode_operations;
258 inode_nohighmem(inode);
259 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
260 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
261 #ifdef CONFIG_EROFS_FS_XATTR
262 inode->i_op = &erofs_special_inode_operations;
264 init_special_inode(inode, inode->i_mode, inode->i_rdev);
270 if (is_inode_layout_compression(inode)) {
271 #ifdef CONFIG_EROFS_FS_ZIP
272 inode->i_mapping->a_ops =
273 &z_erofs_vle_normalaccess_aops;
280 inode->i_mapping->a_ops = &erofs_raw_access_aops;
282 /* fill last page if inline data is available */
283 fill_inline_data(inode, page_address(page), ofs);
292 struct inode *erofs_iget(struct super_block *sb,
293 erofs_nid_t nid, bool isdir)
295 struct inode *inode = iget_locked(sb, nid);
297 if (unlikely(inode == NULL))
298 return ERR_PTR(-ENOMEM);
300 if (inode->i_state & I_NEW) {
302 struct erofs_vnode *vi = EROFS_V(inode);
305 err = fill_inode(inode, isdir);
307 unlock_new_inode(inode);
310 inode = ERR_PTR(err);
316 #ifdef CONFIG_EROFS_FS_XATTR
317 const struct inode_operations erofs_generic_xattr_iops = {
318 .listxattr = erofs_listxattr,
322 #ifdef CONFIG_EROFS_FS_XATTR
323 const struct inode_operations erofs_symlink_xattr_iops = {
324 .get_link = page_get_link,
325 .listxattr = erofs_listxattr,
329 const struct inode_operations erofs_special_inode_operations = {
330 #ifdef CONFIG_EROFS_FS_XATTR
331 .listxattr = erofs_listxattr,
335 #ifdef CONFIG_EROFS_FS_XATTR
336 const struct inode_operations erofs_fast_symlink_xattr_iops = {
337 .get_link = simple_get_link,
338 .listxattr = erofs_listxattr,