5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
19 * 10/04/98 dgb Added rudimentary directory functions
20 * 10/07/98 Fully working udf_block_map! It works!
21 * 11/25/98 bmap altered to better support extents
22 * 12/06/98 blf partition support in udf_iget, udf_block_map
24 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
25 * block boundaries (which is not actually allowed)
26 * 12/20/98 added support for strategy 4096
27 * 03/07/99 rewrote udf_block_map (again)
28 * New funcs, inode_bmap, udf_next_aext
29 * 04/19/99 Support for writing device EA's for major/minor #
34 #include <linux/module.h>
35 #include <linux/pagemap.h>
36 #include <linux/writeback.h>
37 #include <linux/slab.h>
38 #include <linux/crc-itu-t.h>
39 #include <linux/mpage.h>
40 #include <linux/uio.h>
45 MODULE_AUTHOR("Ben Fennema");
46 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
47 MODULE_LICENSE("GPL");
49 #define EXTENT_MERGE_SIZE 5
51 static umode_t udf_convert_permissions(struct fileEntry *);
52 static int udf_update_inode(struct inode *, int);
53 static int udf_sync_inode(struct inode *inode);
54 static int udf_alloc_i_data(struct inode *inode, size_t size);
55 static sector_t inode_getblk(struct inode *, sector_t, int *, int *);
56 static int8_t udf_insert_aext(struct inode *, struct extent_position,
57 struct kernel_lb_addr, uint32_t);
58 static void udf_split_extents(struct inode *, int *, int, int,
59 struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
60 static void udf_prealloc_extents(struct inode *, int, int,
61 struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
62 static void udf_merge_extents(struct inode *,
63 struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
64 static void udf_update_extents(struct inode *,
65 struct kernel_long_ad[EXTENT_MERGE_SIZE], int, int,
66 struct extent_position *);
67 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
69 static void __udf_clear_extent_cache(struct inode *inode)
71 struct udf_inode_info *iinfo = UDF_I(inode);
73 if (iinfo->cached_extent.lstart != -1) {
74 brelse(iinfo->cached_extent.epos.bh);
75 iinfo->cached_extent.lstart = -1;
79 /* Invalidate extent cache */
80 static void udf_clear_extent_cache(struct inode *inode)
82 struct udf_inode_info *iinfo = UDF_I(inode);
84 spin_lock(&iinfo->i_extent_cache_lock);
85 __udf_clear_extent_cache(inode);
86 spin_unlock(&iinfo->i_extent_cache_lock);
89 /* Return contents of extent cache */
90 static int udf_read_extent_cache(struct inode *inode, loff_t bcount,
91 loff_t *lbcount, struct extent_position *pos)
93 struct udf_inode_info *iinfo = UDF_I(inode);
96 spin_lock(&iinfo->i_extent_cache_lock);
97 if ((iinfo->cached_extent.lstart <= bcount) &&
98 (iinfo->cached_extent.lstart != -1)) {
100 *lbcount = iinfo->cached_extent.lstart;
101 memcpy(pos, &iinfo->cached_extent.epos,
102 sizeof(struct extent_position));
107 spin_unlock(&iinfo->i_extent_cache_lock);
111 /* Add extent to extent cache */
112 static void udf_update_extent_cache(struct inode *inode, loff_t estart,
113 struct extent_position *pos, int next_epos)
115 struct udf_inode_info *iinfo = UDF_I(inode);
117 spin_lock(&iinfo->i_extent_cache_lock);
118 /* Invalidate previously cached extent */
119 __udf_clear_extent_cache(inode);
122 memcpy(&iinfo->cached_extent.epos, pos,
123 sizeof(struct extent_position));
124 iinfo->cached_extent.lstart = estart;
126 switch (iinfo->i_alloc_type) {
127 case ICBTAG_FLAG_AD_SHORT:
128 iinfo->cached_extent.epos.offset -=
129 sizeof(struct short_ad);
131 case ICBTAG_FLAG_AD_LONG:
132 iinfo->cached_extent.epos.offset -=
133 sizeof(struct long_ad);
135 spin_unlock(&iinfo->i_extent_cache_lock);
138 void udf_evict_inode(struct inode *inode)
140 struct udf_inode_info *iinfo = UDF_I(inode);
143 if (!is_bad_inode(inode)) {
144 if (!inode->i_nlink) {
146 udf_setsize(inode, 0);
147 udf_update_inode(inode, IS_SYNC(inode));
149 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
150 inode->i_size != iinfo->i_lenExtents) {
151 udf_warn(inode->i_sb,
152 "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
153 inode->i_ino, inode->i_mode,
154 (unsigned long long)inode->i_size,
155 (unsigned long long)iinfo->i_lenExtents);
158 truncate_inode_pages_final(&inode->i_data);
159 invalidate_inode_buffers(inode);
161 kfree(iinfo->i_ext.i_data);
162 iinfo->i_ext.i_data = NULL;
163 udf_clear_extent_cache(inode);
165 udf_free_inode(inode);
169 static void udf_write_failed(struct address_space *mapping, loff_t to)
171 struct inode *inode = mapping->host;
172 struct udf_inode_info *iinfo = UDF_I(inode);
173 loff_t isize = inode->i_size;
176 truncate_pagecache(inode, isize);
177 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
178 down_write(&iinfo->i_data_sem);
179 udf_clear_extent_cache(inode);
180 udf_truncate_extents(inode);
181 up_write(&iinfo->i_data_sem);
186 static int udf_writepage(struct page *page, struct writeback_control *wbc)
188 return block_write_full_page(page, udf_get_block, wbc);
191 static int udf_writepages(struct address_space *mapping,
192 struct writeback_control *wbc)
194 return mpage_writepages(mapping, wbc, udf_get_block);
197 static int udf_readpage(struct file *file, struct page *page)
199 return mpage_readpage(page, udf_get_block);
202 static int udf_readpages(struct file *file, struct address_space *mapping,
203 struct list_head *pages, unsigned nr_pages)
205 return mpage_readpages(mapping, pages, nr_pages, udf_get_block);
208 static int udf_write_begin(struct file *file, struct address_space *mapping,
209 loff_t pos, unsigned len, unsigned flags,
210 struct page **pagep, void **fsdata)
214 ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block);
216 udf_write_failed(mapping, pos + len);
220 static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
222 struct file *file = iocb->ki_filp;
223 struct address_space *mapping = file->f_mapping;
224 struct inode *inode = mapping->host;
225 size_t count = iov_iter_count(iter);
228 ret = blockdev_direct_IO(iocb, inode, iter, udf_get_block);
229 if (unlikely(ret < 0 && iov_iter_rw(iter) == WRITE))
230 udf_write_failed(mapping, iocb->ki_pos + count);
234 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
236 return generic_block_bmap(mapping, block, udf_get_block);
239 const struct address_space_operations udf_aops = {
240 .readpage = udf_readpage,
241 .readpages = udf_readpages,
242 .writepage = udf_writepage,
243 .writepages = udf_writepages,
244 .write_begin = udf_write_begin,
245 .write_end = generic_write_end,
246 .direct_IO = udf_direct_IO,
251 * Expand file stored in ICB to a normal one-block-file
253 * This function requires i_data_sem for writing and releases it.
254 * This function requires i_mutex held
256 int udf_expand_file_adinicb(struct inode *inode)
260 struct udf_inode_info *iinfo = UDF_I(inode);
263 WARN_ON_ONCE(!inode_is_locked(inode));
264 if (!iinfo->i_lenAlloc) {
265 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
266 iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
268 iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
269 /* from now on we have normal address_space methods */
270 inode->i_data.a_ops = &udf_aops;
271 up_write(&iinfo->i_data_sem);
272 mark_inode_dirty(inode);
276 * Release i_data_sem so that we can lock a page - page lock ranks
277 * above i_data_sem. i_mutex still protects us against file changes.
279 up_write(&iinfo->i_data_sem);
281 page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
285 if (!PageUptodate(page)) {
287 memset(kaddr + iinfo->i_lenAlloc, 0x00,
288 PAGE_SIZE - iinfo->i_lenAlloc);
289 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr,
291 flush_dcache_page(page);
292 SetPageUptodate(page);
295 down_write(&iinfo->i_data_sem);
296 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00,
298 iinfo->i_lenAlloc = 0;
299 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
300 iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
302 iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
303 /* from now on we have normal address_space methods */
304 inode->i_data.a_ops = &udf_aops;
305 set_page_dirty(page);
307 up_write(&iinfo->i_data_sem);
308 err = filemap_fdatawrite(inode->i_mapping);
310 /* Restore everything back so that we don't lose data... */
313 down_write(&iinfo->i_data_sem);
314 memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr,
318 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
319 inode->i_data.a_ops = &udf_adinicb_aops;
320 iinfo->i_lenAlloc = inode->i_size;
321 up_write(&iinfo->i_data_sem);
324 mark_inode_dirty(inode);
329 struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
333 struct buffer_head *dbh = NULL;
334 struct kernel_lb_addr eloc;
336 struct extent_position epos;
338 struct udf_fileident_bh sfibh, dfibh;
339 loff_t f_pos = udf_ext0_offset(inode);
340 int size = udf_ext0_offset(inode) + inode->i_size;
341 struct fileIdentDesc cfi, *sfi, *dfi;
342 struct udf_inode_info *iinfo = UDF_I(inode);
344 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
345 alloctype = ICBTAG_FLAG_AD_SHORT;
347 alloctype = ICBTAG_FLAG_AD_LONG;
349 if (!inode->i_size) {
350 iinfo->i_alloc_type = alloctype;
351 mark_inode_dirty(inode);
355 /* alloc block, and copy data to it */
356 *block = udf_new_block(inode->i_sb, inode,
357 iinfo->i_location.partitionReferenceNum,
358 iinfo->i_location.logicalBlockNum, err);
361 newblock = udf_get_pblock(inode->i_sb, *block,
362 iinfo->i_location.partitionReferenceNum,
366 dbh = udf_tgetblk(inode->i_sb, newblock);
370 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
371 set_buffer_uptodate(dbh);
373 mark_buffer_dirty_inode(dbh, inode);
375 sfibh.soffset = sfibh.eoffset =
376 f_pos & (inode->i_sb->s_blocksize - 1);
377 sfibh.sbh = sfibh.ebh = NULL;
378 dfibh.soffset = dfibh.eoffset = 0;
379 dfibh.sbh = dfibh.ebh = dbh;
380 while (f_pos < size) {
381 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
382 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL,
388 iinfo->i_alloc_type = alloctype;
389 sfi->descTag.tagLocation = cpu_to_le32(*block);
390 dfibh.soffset = dfibh.eoffset;
391 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
392 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
393 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
395 le16_to_cpu(sfi->lengthOfImpUse))) {
396 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
401 mark_buffer_dirty_inode(dbh, inode);
403 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0,
405 iinfo->i_lenAlloc = 0;
406 eloc.logicalBlockNum = *block;
407 eloc.partitionReferenceNum =
408 iinfo->i_location.partitionReferenceNum;
409 iinfo->i_lenExtents = inode->i_size;
411 epos.block = iinfo->i_location;
412 epos.offset = udf_file_entry_alloc_offset(inode);
413 udf_add_aext(inode, &epos, &eloc, inode->i_size, 0);
417 mark_inode_dirty(inode);
421 static int udf_get_block(struct inode *inode, sector_t block,
422 struct buffer_head *bh_result, int create)
426 struct udf_inode_info *iinfo;
429 phys = udf_block_map(inode, block);
431 map_bh(bh_result, inode->i_sb, phys);
437 iinfo = UDF_I(inode);
439 down_write(&iinfo->i_data_sem);
440 if (block == iinfo->i_next_alloc_block + 1) {
441 iinfo->i_next_alloc_block++;
442 iinfo->i_next_alloc_goal++;
446 * Block beyond EOF and prealloc extents? Just discard preallocation
447 * as it is not useful and complicates things.
449 if (((loff_t)block) << inode->i_blkbits > iinfo->i_lenExtents)
450 udf_discard_prealloc(inode);
451 udf_clear_extent_cache(inode);
452 phys = inode_getblk(inode, block, &err, &new);
457 set_buffer_new(bh_result);
458 map_bh(bh_result, inode->i_sb, phys);
461 up_write(&iinfo->i_data_sem);
465 static struct buffer_head *udf_getblk(struct inode *inode, long block,
466 int create, int *err)
468 struct buffer_head *bh;
469 struct buffer_head dummy;
472 dummy.b_blocknr = -1000;
473 *err = udf_get_block(inode, block, &dummy, create);
474 if (!*err && buffer_mapped(&dummy)) {
475 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
476 if (buffer_new(&dummy)) {
478 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
479 set_buffer_uptodate(bh);
481 mark_buffer_dirty_inode(bh, inode);
489 /* Extend the file with new blocks totaling 'new_block_bytes',
490 * return the number of extents added
492 static int udf_do_extend_file(struct inode *inode,
493 struct extent_position *last_pos,
494 struct kernel_long_ad *last_ext,
495 loff_t new_block_bytes)
498 int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
499 struct super_block *sb = inode->i_sb;
500 struct udf_inode_info *iinfo;
503 /* The previous extent is fake and we should not extend by anything
504 * - there's nothing to do... */
505 if (!new_block_bytes && fake)
508 iinfo = UDF_I(inode);
509 /* Round the last extent up to a multiple of block size */
510 if (last_ext->extLength & (sb->s_blocksize - 1)) {
511 last_ext->extLength =
512 (last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
513 (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
514 sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
515 iinfo->i_lenExtents =
516 (iinfo->i_lenExtents + sb->s_blocksize - 1) &
517 ~(sb->s_blocksize - 1);
520 /* Can we merge with the previous extent? */
521 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
522 EXT_NOT_RECORDED_NOT_ALLOCATED) {
523 add = (1 << 30) - sb->s_blocksize -
524 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
525 if (add > new_block_bytes)
526 add = new_block_bytes;
527 new_block_bytes -= add;
528 last_ext->extLength += add;
532 udf_add_aext(inode, last_pos, &last_ext->extLocation,
533 last_ext->extLength, 1);
536 struct kernel_lb_addr tmploc;
539 udf_write_aext(inode, last_pos, &last_ext->extLocation,
540 last_ext->extLength, 1);
543 * We've rewritten the last extent. If we are going to add
544 * more extents, we may need to enter possible following
545 * empty indirect extent.
548 udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0);
551 /* Managed to do everything necessary? */
552 if (!new_block_bytes)
555 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
556 last_ext->extLocation.logicalBlockNum = 0;
557 last_ext->extLocation.partitionReferenceNum = 0;
558 add = (1 << 30) - sb->s_blocksize;
559 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | add;
561 /* Create enough extents to cover the whole hole */
562 while (new_block_bytes > add) {
563 new_block_bytes -= add;
564 err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
565 last_ext->extLength, 1);
570 if (new_block_bytes) {
571 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
573 err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
574 last_ext->extLength, 1);
581 /* last_pos should point to the last written extent... */
582 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
583 last_pos->offset -= sizeof(struct short_ad);
584 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
585 last_pos->offset -= sizeof(struct long_ad);
592 /* Extend the final block of the file to final_block_len bytes */
593 static void udf_do_extend_final_block(struct inode *inode,
594 struct extent_position *last_pos,
595 struct kernel_long_ad *last_ext,
598 uint32_t added_bytes;
601 * Extent already large enough? It may be already rounded up to block
604 if (new_elen <= (last_ext->extLength & UDF_EXTENT_LENGTH_MASK))
606 added_bytes = (last_ext->extLength & UDF_EXTENT_LENGTH_MASK) - new_elen;
607 last_ext->extLength += added_bytes;
608 UDF_I(inode)->i_lenExtents += added_bytes;
610 udf_write_aext(inode, last_pos, &last_ext->extLocation,
611 last_ext->extLength, 1);
614 static int udf_extend_file(struct inode *inode, loff_t newsize)
617 struct extent_position epos;
618 struct kernel_lb_addr eloc;
621 struct super_block *sb = inode->i_sb;
622 sector_t first_block = newsize >> sb->s_blocksize_bits, offset;
625 struct udf_inode_info *iinfo = UDF_I(inode);
626 struct kernel_long_ad extent;
628 bool within_last_ext;
630 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
631 adsize = sizeof(struct short_ad);
632 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
633 adsize = sizeof(struct long_ad);
638 * When creating hole in file, just don't bother with preserving
639 * preallocation. It likely won't be very useful anyway.
641 udf_discard_prealloc(inode);
643 etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
644 within_last_ext = (etype != -1);
645 /* We don't expect extents past EOF... */
646 WARN_ON_ONCE(within_last_ext &&
647 elen > ((loff_t)offset + 1) << inode->i_blkbits);
649 if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
650 (epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
651 /* File has no extents at all or has empty last
652 * indirect extent! Create a fake extent... */
653 extent.extLocation.logicalBlockNum = 0;
654 extent.extLocation.partitionReferenceNum = 0;
655 extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
657 epos.offset -= adsize;
658 etype = udf_next_aext(inode, &epos, &extent.extLocation,
659 &extent.extLength, 0);
660 extent.extLength |= etype << 30;
663 new_elen = ((loff_t)offset << inode->i_blkbits) |
664 (newsize & (sb->s_blocksize - 1));
666 /* File has extent covering the new size (could happen when extending
669 if (within_last_ext) {
670 /* Extending file within the last file block */
671 udf_do_extend_final_block(inode, &epos, &extent, new_elen);
673 err = udf_do_extend_file(inode, &epos, &extent, new_elen);
679 iinfo->i_lenExtents = newsize;
685 static sector_t inode_getblk(struct inode *inode, sector_t block,
688 struct kernel_long_ad laarr[EXTENT_MERGE_SIZE];
689 struct extent_position prev_epos, cur_epos, next_epos;
690 int count = 0, startnum = 0, endnum = 0;
691 uint32_t elen = 0, tmpelen;
692 struct kernel_lb_addr eloc, tmpeloc;
694 loff_t lbcount = 0, b_off = 0;
695 uint32_t newblocknum, newblock;
698 struct udf_inode_info *iinfo = UDF_I(inode);
699 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
705 prev_epos.offset = udf_file_entry_alloc_offset(inode);
706 prev_epos.block = iinfo->i_location;
708 cur_epos = next_epos = prev_epos;
709 b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
711 /* find the extent which contains the block we are looking for.
712 alternate between laarr[0] and laarr[1] for locations of the
713 current extent, and the previous extent */
715 if (prev_epos.bh != cur_epos.bh) {
716 brelse(prev_epos.bh);
718 prev_epos.bh = cur_epos.bh;
720 if (cur_epos.bh != next_epos.bh) {
722 get_bh(next_epos.bh);
723 cur_epos.bh = next_epos.bh;
728 prev_epos.block = cur_epos.block;
729 cur_epos.block = next_epos.block;
731 prev_epos.offset = cur_epos.offset;
732 cur_epos.offset = next_epos.offset;
734 etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1);
740 laarr[c].extLength = (etype << 30) | elen;
741 laarr[c].extLocation = eloc;
743 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
744 pgoal = eloc.logicalBlockNum +
745 ((elen + inode->i_sb->s_blocksize - 1) >>
746 inode->i_sb->s_blocksize_bits);
749 } while (lbcount + elen <= b_off);
752 offset = b_off >> inode->i_sb->s_blocksize_bits;
754 * Move prev_epos and cur_epos into indirect extent if we are at
757 udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0);
758 udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0);
760 /* if the extent is allocated and recorded, return the block
761 if the extent is not a multiple of the blocksize, round up */
763 if (etype == (EXT_RECORDED_ALLOCATED >> 30)) {
764 if (elen & (inode->i_sb->s_blocksize - 1)) {
765 elen = EXT_RECORDED_ALLOCATED |
766 ((elen + inode->i_sb->s_blocksize - 1) &
767 ~(inode->i_sb->s_blocksize - 1));
768 udf_write_aext(inode, &cur_epos, &eloc, elen, 1);
770 brelse(prev_epos.bh);
772 brelse(next_epos.bh);
773 newblock = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
777 /* Are we beyond EOF and preallocated extent? */
788 /* Create a fake extent when there's not one */
789 memset(&laarr[0].extLocation, 0x00,
790 sizeof(struct kernel_lb_addr));
791 laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
792 /* Will udf_do_extend_file() create real extent from
794 startnum = (offset > 0);
796 /* Create extents for the hole between EOF and offset */
797 hole_len = (loff_t)offset << inode->i_blkbits;
798 ret = udf_do_extend_file(inode, &prev_epos, laarr, hole_len);
800 brelse(prev_epos.bh);
802 brelse(next_epos.bh);
809 /* We are not covered by a preallocated extent? */
810 if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) !=
811 EXT_NOT_RECORDED_ALLOCATED) {
812 /* Is there any real extent? - otherwise we overwrite
816 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
817 inode->i_sb->s_blocksize;
818 memset(&laarr[c].extLocation, 0x00,
819 sizeof(struct kernel_lb_addr));
826 endnum = startnum = ((count > 2) ? 2 : count);
828 /* if the current extent is in position 0,
829 swap it with the previous */
830 if (!c && count != 1) {
837 /* if the current block is located in an extent,
838 read the next extent */
839 etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0);
841 laarr[c + 1].extLength = (etype << 30) | elen;
842 laarr[c + 1].extLocation = eloc;
850 /* if the current extent is not recorded but allocated, get the
851 * block in the extent corresponding to the requested block */
852 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
853 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
854 else { /* otherwise, allocate a new block */
855 if (iinfo->i_next_alloc_block == block)
856 goal = iinfo->i_next_alloc_goal;
859 if (!(goal = pgoal)) /* XXX: what was intended here? */
860 goal = iinfo->i_location.logicalBlockNum + 1;
863 newblocknum = udf_new_block(inode->i_sb, inode,
864 iinfo->i_location.partitionReferenceNum,
867 brelse(prev_epos.bh);
869 brelse(next_epos.bh);
874 iinfo->i_lenExtents += inode->i_sb->s_blocksize;
877 /* if the extent the requsted block is located in contains multiple
878 * blocks, split the extent into at most three extents. blocks prior
879 * to requested block, requested block, and blocks after requested
881 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
883 #ifdef UDF_PREALLOCATE
884 /* We preallocate blocks only for regular files. It also makes sense
885 * for directories but there's a problem when to drop the
886 * preallocation. We might use some delayed work for that but I feel
887 * it's overengineering for a filesystem like UDF. */
888 if (S_ISREG(inode->i_mode))
889 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
892 /* merge any continuous blocks in laarr */
893 udf_merge_extents(inode, laarr, &endnum);
895 /* write back the new extents, inserting new extents if the new number
896 * of extents is greater than the old number, and deleting extents if
897 * the new number of extents is less than the old number */
898 udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
900 brelse(prev_epos.bh);
902 brelse(next_epos.bh);
904 newblock = udf_get_pblock(inode->i_sb, newblocknum,
905 iinfo->i_location.partitionReferenceNum, 0);
911 iinfo->i_next_alloc_block = block;
912 iinfo->i_next_alloc_goal = newblocknum;
913 inode->i_ctime = current_time(inode);
916 udf_sync_inode(inode);
918 mark_inode_dirty(inode);
923 static void udf_split_extents(struct inode *inode, int *c, int offset,
925 struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
928 unsigned long blocksize = inode->i_sb->s_blocksize;
929 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
931 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
932 (laarr[*c].extLength >> 30) ==
933 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
935 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
936 blocksize - 1) >> blocksize_bits;
937 int8_t etype = (laarr[curr].extLength >> 30);
941 else if (!offset || blen == offset + 1) {
942 laarr[curr + 2] = laarr[curr + 1];
943 laarr[curr + 1] = laarr[curr];
945 laarr[curr + 3] = laarr[curr + 1];
946 laarr[curr + 2] = laarr[curr + 1] = laarr[curr];
950 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
951 udf_free_blocks(inode->i_sb, inode,
952 &laarr[curr].extLocation,
954 laarr[curr].extLength =
955 EXT_NOT_RECORDED_NOT_ALLOCATED |
956 (offset << blocksize_bits);
957 laarr[curr].extLocation.logicalBlockNum = 0;
958 laarr[curr].extLocation.
959 partitionReferenceNum = 0;
961 laarr[curr].extLength = (etype << 30) |
962 (offset << blocksize_bits);
968 laarr[curr].extLocation.logicalBlockNum = newblocknum;
969 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
970 laarr[curr].extLocation.partitionReferenceNum =
971 UDF_I(inode)->i_location.partitionReferenceNum;
972 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
976 if (blen != offset + 1) {
977 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
978 laarr[curr].extLocation.logicalBlockNum +=
980 laarr[curr].extLength = (etype << 30) |
981 ((blen - (offset + 1)) << blocksize_bits);
988 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
989 struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
992 int start, length = 0, currlength = 0, i;
994 if (*endnum >= (c + 1)) {
1000 if ((laarr[c + 1].extLength >> 30) ==
1001 (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
1003 length = currlength =
1004 (((laarr[c + 1].extLength &
1005 UDF_EXTENT_LENGTH_MASK) +
1006 inode->i_sb->s_blocksize - 1) >>
1007 inode->i_sb->s_blocksize_bits);
1012 for (i = start + 1; i <= *endnum; i++) {
1015 length += UDF_DEFAULT_PREALLOC_BLOCKS;
1016 } else if ((laarr[i].extLength >> 30) ==
1017 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
1018 length += (((laarr[i].extLength &
1019 UDF_EXTENT_LENGTH_MASK) +
1020 inode->i_sb->s_blocksize - 1) >>
1021 inode->i_sb->s_blocksize_bits);
1027 int next = laarr[start].extLocation.logicalBlockNum +
1028 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
1029 inode->i_sb->s_blocksize - 1) >>
1030 inode->i_sb->s_blocksize_bits);
1031 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
1032 laarr[start].extLocation.partitionReferenceNum,
1033 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ?
1034 length : UDF_DEFAULT_PREALLOC_BLOCKS) -
1037 if (start == (c + 1))
1038 laarr[start].extLength +=
1040 inode->i_sb->s_blocksize_bits);
1042 memmove(&laarr[c + 2], &laarr[c + 1],
1043 sizeof(struct long_ad) * (*endnum - (c + 1)));
1045 laarr[c + 1].extLocation.logicalBlockNum = next;
1046 laarr[c + 1].extLocation.partitionReferenceNum =
1047 laarr[c].extLocation.
1048 partitionReferenceNum;
1049 laarr[c + 1].extLength =
1050 EXT_NOT_RECORDED_ALLOCATED |
1052 inode->i_sb->s_blocksize_bits);
1056 for (i = start + 1; numalloc && i < *endnum; i++) {
1057 int elen = ((laarr[i].extLength &
1058 UDF_EXTENT_LENGTH_MASK) +
1059 inode->i_sb->s_blocksize - 1) >>
1060 inode->i_sb->s_blocksize_bits;
1062 if (elen > numalloc) {
1063 laarr[i].extLength -=
1065 inode->i_sb->s_blocksize_bits);
1069 if (*endnum > (i + 1))
1072 sizeof(struct long_ad) *
1073 (*endnum - (i + 1)));
1078 UDF_I(inode)->i_lenExtents +=
1079 numalloc << inode->i_sb->s_blocksize_bits;
1084 static void udf_merge_extents(struct inode *inode,
1085 struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
1089 unsigned long blocksize = inode->i_sb->s_blocksize;
1090 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
1092 for (i = 0; i < (*endnum - 1); i++) {
1093 struct kernel_long_ad *li /*l[i]*/ = &laarr[i];
1094 struct kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1];
1096 if (((li->extLength >> 30) == (lip1->extLength >> 30)) &&
1097 (((li->extLength >> 30) ==
1098 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
1099 ((lip1->extLocation.logicalBlockNum -
1100 li->extLocation.logicalBlockNum) ==
1101 (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
1102 blocksize - 1) >> blocksize_bits)))) {
1104 if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
1105 (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
1106 blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
1107 lip1->extLength = (lip1->extLength -
1109 UDF_EXTENT_LENGTH_MASK) +
1110 UDF_EXTENT_LENGTH_MASK) &
1112 li->extLength = (li->extLength &
1113 UDF_EXTENT_FLAG_MASK) +
1114 (UDF_EXTENT_LENGTH_MASK + 1) -
1116 lip1->extLocation.logicalBlockNum =
1117 li->extLocation.logicalBlockNum +
1119 UDF_EXTENT_LENGTH_MASK) >>
1122 li->extLength = lip1->extLength +
1124 UDF_EXTENT_LENGTH_MASK) +
1125 blocksize - 1) & ~(blocksize - 1));
1126 if (*endnum > (i + 2))
1127 memmove(&laarr[i + 1], &laarr[i + 2],
1128 sizeof(struct long_ad) *
1129 (*endnum - (i + 2)));
1133 } else if (((li->extLength >> 30) ==
1134 (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
1135 ((lip1->extLength >> 30) ==
1136 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) {
1137 udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0,
1139 UDF_EXTENT_LENGTH_MASK) +
1140 blocksize - 1) >> blocksize_bits);
1141 li->extLocation.logicalBlockNum = 0;
1142 li->extLocation.partitionReferenceNum = 0;
1144 if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
1145 (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
1146 blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
1147 lip1->extLength = (lip1->extLength -
1149 UDF_EXTENT_LENGTH_MASK) +
1150 UDF_EXTENT_LENGTH_MASK) &
1152 li->extLength = (li->extLength &
1153 UDF_EXTENT_FLAG_MASK) +
1154 (UDF_EXTENT_LENGTH_MASK + 1) -
1157 li->extLength = lip1->extLength +
1159 UDF_EXTENT_LENGTH_MASK) +
1160 blocksize - 1) & ~(blocksize - 1));
1161 if (*endnum > (i + 2))
1162 memmove(&laarr[i + 1], &laarr[i + 2],
1163 sizeof(struct long_ad) *
1164 (*endnum - (i + 2)));
1168 } else if ((li->extLength >> 30) ==
1169 (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
1170 udf_free_blocks(inode->i_sb, inode,
1171 &li->extLocation, 0,
1173 UDF_EXTENT_LENGTH_MASK) +
1174 blocksize - 1) >> blocksize_bits);
1175 li->extLocation.logicalBlockNum = 0;
1176 li->extLocation.partitionReferenceNum = 0;
1177 li->extLength = (li->extLength &
1178 UDF_EXTENT_LENGTH_MASK) |
1179 EXT_NOT_RECORDED_NOT_ALLOCATED;
1184 static void udf_update_extents(struct inode *inode,
1185 struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
1186 int startnum, int endnum,
1187 struct extent_position *epos)
1190 struct kernel_lb_addr tmploc;
1193 if (startnum > endnum) {
1194 for (i = 0; i < (startnum - endnum); i++)
1195 udf_delete_aext(inode, *epos);
1196 } else if (startnum < endnum) {
1197 for (i = 0; i < (endnum - startnum); i++) {
1198 udf_insert_aext(inode, *epos, laarr[i].extLocation,
1199 laarr[i].extLength);
1200 udf_next_aext(inode, epos, &laarr[i].extLocation,
1201 &laarr[i].extLength, 1);
1206 for (i = start; i < endnum; i++) {
1207 udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
1208 udf_write_aext(inode, epos, &laarr[i].extLocation,
1209 laarr[i].extLength, 1);
1213 struct buffer_head *udf_bread(struct inode *inode, int block,
1214 int create, int *err)
1216 struct buffer_head *bh = NULL;
1218 bh = udf_getblk(inode, block, create, err);
1222 if (buffer_uptodate(bh))
1225 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
1228 if (buffer_uptodate(bh))
1236 int udf_setsize(struct inode *inode, loff_t newsize)
1239 struct udf_inode_info *iinfo;
1240 int bsize = i_blocksize(inode);
1242 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1243 S_ISLNK(inode->i_mode)))
1245 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1248 iinfo = UDF_I(inode);
1249 if (newsize > inode->i_size) {
1250 down_write(&iinfo->i_data_sem);
1251 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1253 (udf_file_entry_alloc_offset(inode) + newsize)) {
1254 err = udf_expand_file_adinicb(inode);
1257 down_write(&iinfo->i_data_sem);
1259 iinfo->i_lenAlloc = newsize;
1263 err = udf_extend_file(inode, newsize);
1265 up_write(&iinfo->i_data_sem);
1269 up_write(&iinfo->i_data_sem);
1270 truncate_setsize(inode, newsize);
1272 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1273 down_write(&iinfo->i_data_sem);
1274 udf_clear_extent_cache(inode);
1275 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + newsize,
1276 0x00, bsize - newsize -
1277 udf_file_entry_alloc_offset(inode));
1278 iinfo->i_lenAlloc = newsize;
1279 truncate_setsize(inode, newsize);
1280 up_write(&iinfo->i_data_sem);
1283 err = block_truncate_page(inode->i_mapping, newsize,
1287 truncate_setsize(inode, newsize);
1288 down_write(&iinfo->i_data_sem);
1289 udf_clear_extent_cache(inode);
1290 udf_truncate_extents(inode);
1291 up_write(&iinfo->i_data_sem);
1294 inode->i_mtime = inode->i_ctime = current_time(inode);
1296 udf_sync_inode(inode);
1298 mark_inode_dirty(inode);
1303 * Maximum length of linked list formed by ICB hierarchy. The chosen number is
1304 * arbitrary - just that we hopefully don't limit any real use of rewritten
1305 * inode on write-once media but avoid looping for too long on corrupted media.
1307 #define UDF_MAX_ICB_NESTING 1024
1309 static int udf_read_inode(struct inode *inode, bool hidden_inode)
1311 struct buffer_head *bh = NULL;
1312 struct fileEntry *fe;
1313 struct extendedFileEntry *efe;
1315 struct udf_inode_info *iinfo = UDF_I(inode);
1316 struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1317 struct kernel_lb_addr *iloc = &iinfo->i_location;
1318 unsigned int link_count;
1319 unsigned int indirections = 0;
1320 int bs = inode->i_sb->s_blocksize;
1324 if (iloc->logicalBlockNum >=
1325 sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) {
1326 udf_debug("block=%d, partition=%d out of range\n",
1327 iloc->logicalBlockNum, iloc->partitionReferenceNum);
1332 * Set defaults, but the inode is still incomplete!
1333 * Note: get_new_inode() sets the following on a new inode:
1336 * i_flags = sb->s_flags
1338 * clean_inode(): zero fills and sets
1343 bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident);
1345 udf_err(inode->i_sb, "(ino %ld) failed !bh\n", inode->i_ino);
1349 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
1350 ident != TAG_IDENT_USE) {
1351 udf_err(inode->i_sb, "(ino %ld) failed ident=%d\n",
1352 inode->i_ino, ident);
1356 fe = (struct fileEntry *)bh->b_data;
1357 efe = (struct extendedFileEntry *)bh->b_data;
1359 if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
1360 struct buffer_head *ibh;
1362 ibh = udf_read_ptagged(inode->i_sb, iloc, 1, &ident);
1363 if (ident == TAG_IDENT_IE && ibh) {
1364 struct kernel_lb_addr loc;
1365 struct indirectEntry *ie;
1367 ie = (struct indirectEntry *)ibh->b_data;
1368 loc = lelb_to_cpu(ie->indirectICB.extLocation);
1370 if (ie->indirectICB.extLength) {
1372 memcpy(&iinfo->i_location, &loc,
1373 sizeof(struct kernel_lb_addr));
1374 if (++indirections > UDF_MAX_ICB_NESTING) {
1375 udf_err(inode->i_sb,
1376 "too many ICBs in ICB hierarchy"
1377 " (max %d supported)\n",
1378 UDF_MAX_ICB_NESTING);
1386 } else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
1387 udf_err(inode->i_sb, "unsupported strategy type: %d\n",
1388 le16_to_cpu(fe->icbTag.strategyType));
1391 if (fe->icbTag.strategyType == cpu_to_le16(4))
1392 iinfo->i_strat4096 = 0;
1393 else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
1394 iinfo->i_strat4096 = 1;
1396 iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
1397 ICBTAG_FLAG_AD_MASK;
1398 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_SHORT &&
1399 iinfo->i_alloc_type != ICBTAG_FLAG_AD_LONG &&
1400 iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
1404 iinfo->i_unique = 0;
1405 iinfo->i_lenEAttr = 0;
1406 iinfo->i_lenExtents = 0;
1407 iinfo->i_lenAlloc = 0;
1408 iinfo->i_next_alloc_block = 0;
1409 iinfo->i_next_alloc_goal = 0;
1410 if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
1413 ret = udf_alloc_i_data(inode, bs -
1414 sizeof(struct extendedFileEntry));
1417 memcpy(iinfo->i_ext.i_data,
1418 bh->b_data + sizeof(struct extendedFileEntry),
1419 bs - sizeof(struct extendedFileEntry));
1420 } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
1423 ret = udf_alloc_i_data(inode, bs - sizeof(struct fileEntry));
1426 memcpy(iinfo->i_ext.i_data,
1427 bh->b_data + sizeof(struct fileEntry),
1428 bs - sizeof(struct fileEntry));
1429 } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
1432 iinfo->i_lenAlloc = le32_to_cpu(
1433 ((struct unallocSpaceEntry *)bh->b_data)->
1435 ret = udf_alloc_i_data(inode, bs -
1436 sizeof(struct unallocSpaceEntry));
1439 memcpy(iinfo->i_ext.i_data,
1440 bh->b_data + sizeof(struct unallocSpaceEntry),
1441 bs - sizeof(struct unallocSpaceEntry));
1446 read_lock(&sbi->s_cred_lock);
1447 i_uid_write(inode, le32_to_cpu(fe->uid));
1448 if (!uid_valid(inode->i_uid) ||
1449 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) ||
1450 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
1451 inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1453 i_gid_write(inode, le32_to_cpu(fe->gid));
1454 if (!gid_valid(inode->i_gid) ||
1455 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) ||
1456 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
1457 inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1459 if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
1460 sbi->s_fmode != UDF_INVALID_MODE)
1461 inode->i_mode = sbi->s_fmode;
1462 else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY &&
1463 sbi->s_dmode != UDF_INVALID_MODE)
1464 inode->i_mode = sbi->s_dmode;
1466 inode->i_mode = udf_convert_permissions(fe);
1467 inode->i_mode &= ~sbi->s_umask;
1468 read_unlock(&sbi->s_cred_lock);
1470 link_count = le16_to_cpu(fe->fileLinkCount);
1472 if (!hidden_inode) {
1478 set_nlink(inode, link_count);
1480 inode->i_size = le64_to_cpu(fe->informationLength);
1481 iinfo->i_lenExtents = inode->i_size;
1483 if (iinfo->i_efe == 0) {
1484 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1485 (inode->i_sb->s_blocksize_bits - 9);
1487 if (!udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime))
1488 inode->i_atime = sbi->s_record_time;
1490 if (!udf_disk_stamp_to_time(&inode->i_mtime,
1491 fe->modificationTime))
1492 inode->i_mtime = sbi->s_record_time;
1494 if (!udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime))
1495 inode->i_ctime = sbi->s_record_time;
1497 iinfo->i_unique = le64_to_cpu(fe->uniqueID);
1498 iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
1499 iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
1500 iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint);
1502 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1503 (inode->i_sb->s_blocksize_bits - 9);
1505 if (!udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime))
1506 inode->i_atime = sbi->s_record_time;
1508 if (!udf_disk_stamp_to_time(&inode->i_mtime,
1509 efe->modificationTime))
1510 inode->i_mtime = sbi->s_record_time;
1512 if (!udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime))
1513 iinfo->i_crtime = sbi->s_record_time;
1515 if (!udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime))
1516 inode->i_ctime = sbi->s_record_time;
1518 iinfo->i_unique = le64_to_cpu(efe->uniqueID);
1519 iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
1520 iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
1521 iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
1523 inode->i_generation = iinfo->i_unique;
1526 * Sanity check length of allocation descriptors and extended attrs to
1527 * avoid integer overflows
1529 if (iinfo->i_lenEAttr > bs || iinfo->i_lenAlloc > bs)
1531 /* Now do exact checks */
1532 if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > bs)
1534 /* Sanity checks for files in ICB so that we don't get confused later */
1535 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1537 * For file in ICB data is stored in allocation descriptor
1538 * so sizes should match
1540 if (iinfo->i_lenAlloc != inode->i_size)
1542 /* File in ICB has to fit in there... */
1543 if (inode->i_size > bs - udf_file_entry_alloc_offset(inode))
1547 switch (fe->icbTag.fileType) {
1548 case ICBTAG_FILE_TYPE_DIRECTORY:
1549 inode->i_op = &udf_dir_inode_operations;
1550 inode->i_fop = &udf_dir_operations;
1551 inode->i_mode |= S_IFDIR;
1554 case ICBTAG_FILE_TYPE_REALTIME:
1555 case ICBTAG_FILE_TYPE_REGULAR:
1556 case ICBTAG_FILE_TYPE_UNDEF:
1557 case ICBTAG_FILE_TYPE_VAT20:
1558 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
1559 inode->i_data.a_ops = &udf_adinicb_aops;
1561 inode->i_data.a_ops = &udf_aops;
1562 inode->i_op = &udf_file_inode_operations;
1563 inode->i_fop = &udf_file_operations;
1564 inode->i_mode |= S_IFREG;
1566 case ICBTAG_FILE_TYPE_BLOCK:
1567 inode->i_mode |= S_IFBLK;
1569 case ICBTAG_FILE_TYPE_CHAR:
1570 inode->i_mode |= S_IFCHR;
1572 case ICBTAG_FILE_TYPE_FIFO:
1573 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1575 case ICBTAG_FILE_TYPE_SOCKET:
1576 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1578 case ICBTAG_FILE_TYPE_SYMLINK:
1579 inode->i_data.a_ops = &udf_symlink_aops;
1580 inode->i_op = &page_symlink_inode_operations;
1581 inode_nohighmem(inode);
1582 inode->i_mode = S_IFLNK | S_IRWXUGO;
1584 case ICBTAG_FILE_TYPE_MAIN:
1585 udf_debug("METADATA FILE-----\n");
1587 case ICBTAG_FILE_TYPE_MIRROR:
1588 udf_debug("METADATA MIRROR FILE-----\n");
1590 case ICBTAG_FILE_TYPE_BITMAP:
1591 udf_debug("METADATA BITMAP FILE-----\n");
1594 udf_err(inode->i_sb, "(ino %ld) failed unknown file type=%d\n",
1595 inode->i_ino, fe->icbTag.fileType);
1598 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1599 struct deviceSpec *dsea =
1600 (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1602 init_special_inode(inode, inode->i_mode,
1603 MKDEV(le32_to_cpu(dsea->majorDeviceIdent),
1604 le32_to_cpu(dsea->minorDeviceIdent)));
1605 /* Developer ID ??? */
1615 static int udf_alloc_i_data(struct inode *inode, size_t size)
1617 struct udf_inode_info *iinfo = UDF_I(inode);
1618 iinfo->i_ext.i_data = kmalloc(size, GFP_KERNEL);
1620 if (!iinfo->i_ext.i_data) {
1621 udf_err(inode->i_sb, "(ino %ld) no free memory\n",
1629 static umode_t udf_convert_permissions(struct fileEntry *fe)
1632 uint32_t permissions;
1635 permissions = le32_to_cpu(fe->permissions);
1636 flags = le16_to_cpu(fe->icbTag.flags);
1638 mode = ((permissions) & S_IRWXO) |
1639 ((permissions >> 2) & S_IRWXG) |
1640 ((permissions >> 4) & S_IRWXU) |
1641 ((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1642 ((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1643 ((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1648 int udf_write_inode(struct inode *inode, struct writeback_control *wbc)
1650 return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1653 static int udf_sync_inode(struct inode *inode)
1655 return udf_update_inode(inode, 1);
1658 static int udf_update_inode(struct inode *inode, int do_sync)
1660 struct buffer_head *bh = NULL;
1661 struct fileEntry *fe;
1662 struct extendedFileEntry *efe;
1663 uint64_t lb_recorded;
1668 struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1669 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
1670 struct udf_inode_info *iinfo = UDF_I(inode);
1672 bh = udf_tgetblk(inode->i_sb,
1673 udf_get_lb_pblock(inode->i_sb, &iinfo->i_location, 0));
1675 udf_debug("getblk failure\n");
1680 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1681 fe = (struct fileEntry *)bh->b_data;
1682 efe = (struct extendedFileEntry *)bh->b_data;
1685 struct unallocSpaceEntry *use =
1686 (struct unallocSpaceEntry *)bh->b_data;
1688 use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1689 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry),
1690 iinfo->i_ext.i_data, inode->i_sb->s_blocksize -
1691 sizeof(struct unallocSpaceEntry));
1692 use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE);
1693 crclen = sizeof(struct unallocSpaceEntry);
1698 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1699 fe->uid = cpu_to_le32(-1);
1701 fe->uid = cpu_to_le32(i_uid_read(inode));
1703 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1704 fe->gid = cpu_to_le32(-1);
1706 fe->gid = cpu_to_le32(i_gid_read(inode));
1708 udfperms = ((inode->i_mode & S_IRWXO)) |
1709 ((inode->i_mode & S_IRWXG) << 2) |
1710 ((inode->i_mode & S_IRWXU) << 4);
1712 udfperms |= (le32_to_cpu(fe->permissions) &
1713 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1714 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1715 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1716 fe->permissions = cpu_to_le32(udfperms);
1718 if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0)
1719 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1721 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1723 fe->informationLength = cpu_to_le64(inode->i_size);
1725 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1727 struct deviceSpec *dsea =
1728 (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1730 dsea = (struct deviceSpec *)
1731 udf_add_extendedattr(inode,
1732 sizeof(struct deviceSpec) +
1733 sizeof(struct regid), 12, 0x3);
1734 dsea->attrType = cpu_to_le32(12);
1735 dsea->attrSubtype = 1;
1736 dsea->attrLength = cpu_to_le32(
1737 sizeof(struct deviceSpec) +
1738 sizeof(struct regid));
1739 dsea->impUseLength = cpu_to_le32(sizeof(struct regid));
1741 eid = (struct regid *)dsea->impUse;
1742 memset(eid, 0, sizeof(struct regid));
1743 strcpy(eid->ident, UDF_ID_DEVELOPER);
1744 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1745 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1746 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1747 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1750 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
1751 lb_recorded = 0; /* No extents => no blocks! */
1754 (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
1755 (blocksize_bits - 9);
1757 if (iinfo->i_efe == 0) {
1758 memcpy(bh->b_data + sizeof(struct fileEntry),
1759 iinfo->i_ext.i_data,
1760 inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1761 fe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
1763 udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime);
1764 udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime);
1765 udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime);
1766 memset(&(fe->impIdent), 0, sizeof(struct regid));
1767 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1768 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1769 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1770 fe->uniqueID = cpu_to_le64(iinfo->i_unique);
1771 fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
1772 fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1773 fe->checkpoint = cpu_to_le32(iinfo->i_checkpoint);
1774 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1775 crclen = sizeof(struct fileEntry);
1777 memcpy(bh->b_data + sizeof(struct extendedFileEntry),
1778 iinfo->i_ext.i_data,
1779 inode->i_sb->s_blocksize -
1780 sizeof(struct extendedFileEntry));
1781 efe->objectSize = cpu_to_le64(inode->i_size);
1782 efe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
1784 if (iinfo->i_crtime.tv_sec > inode->i_atime.tv_sec ||
1785 (iinfo->i_crtime.tv_sec == inode->i_atime.tv_sec &&
1786 iinfo->i_crtime.tv_nsec > inode->i_atime.tv_nsec))
1787 iinfo->i_crtime = inode->i_atime;
1789 if (iinfo->i_crtime.tv_sec > inode->i_mtime.tv_sec ||
1790 (iinfo->i_crtime.tv_sec == inode->i_mtime.tv_sec &&
1791 iinfo->i_crtime.tv_nsec > inode->i_mtime.tv_nsec))
1792 iinfo->i_crtime = inode->i_mtime;
1794 if (iinfo->i_crtime.tv_sec > inode->i_ctime.tv_sec ||
1795 (iinfo->i_crtime.tv_sec == inode->i_ctime.tv_sec &&
1796 iinfo->i_crtime.tv_nsec > inode->i_ctime.tv_nsec))
1797 iinfo->i_crtime = inode->i_ctime;
1799 udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime);
1800 udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime);
1801 udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime);
1802 udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime);
1804 memset(&(efe->impIdent), 0, sizeof(struct regid));
1805 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1806 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1807 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1808 efe->uniqueID = cpu_to_le64(iinfo->i_unique);
1809 efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
1810 efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1811 efe->checkpoint = cpu_to_le32(iinfo->i_checkpoint);
1812 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1813 crclen = sizeof(struct extendedFileEntry);
1817 if (iinfo->i_strat4096) {
1818 fe->icbTag.strategyType = cpu_to_le16(4096);
1819 fe->icbTag.strategyParameter = cpu_to_le16(1);
1820 fe->icbTag.numEntries = cpu_to_le16(2);
1822 fe->icbTag.strategyType = cpu_to_le16(4);
1823 fe->icbTag.numEntries = cpu_to_le16(1);
1827 fe->icbTag.fileType = ICBTAG_FILE_TYPE_USE;
1828 else if (S_ISDIR(inode->i_mode))
1829 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1830 else if (S_ISREG(inode->i_mode))
1831 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1832 else if (S_ISLNK(inode->i_mode))
1833 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1834 else if (S_ISBLK(inode->i_mode))
1835 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1836 else if (S_ISCHR(inode->i_mode))
1837 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1838 else if (S_ISFIFO(inode->i_mode))
1839 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1840 else if (S_ISSOCK(inode->i_mode))
1841 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1843 icbflags = iinfo->i_alloc_type |
1844 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1845 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1846 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1847 (le16_to_cpu(fe->icbTag.flags) &
1848 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1849 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1851 fe->icbTag.flags = cpu_to_le16(icbflags);
1852 if (sbi->s_udfrev >= 0x0200)
1853 fe->descTag.descVersion = cpu_to_le16(3);
1855 fe->descTag.descVersion = cpu_to_le16(2);
1856 fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number);
1857 fe->descTag.tagLocation = cpu_to_le32(
1858 iinfo->i_location.logicalBlockNum);
1859 crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc - sizeof(struct tag);
1860 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1861 fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(struct tag),
1863 fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
1865 set_buffer_uptodate(bh);
1868 /* write the data blocks */
1869 mark_buffer_dirty(bh);
1871 sync_dirty_buffer(bh);
1872 if (buffer_write_io_error(bh)) {
1873 udf_warn(inode->i_sb, "IO error syncing udf inode [%08lx]\n",
1883 struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino,
1886 unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1887 struct inode *inode = iget_locked(sb, block);
1891 return ERR_PTR(-ENOMEM);
1893 if (!(inode->i_state & I_NEW))
1896 memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
1897 err = udf_read_inode(inode, hidden_inode);
1900 return ERR_PTR(err);
1902 unlock_new_inode(inode);
1907 int udf_setup_indirect_aext(struct inode *inode, int block,
1908 struct extent_position *epos)
1910 struct super_block *sb = inode->i_sb;
1911 struct buffer_head *bh;
1912 struct allocExtDesc *aed;
1913 struct extent_position nepos;
1914 struct kernel_lb_addr neloc;
1917 if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
1918 adsize = sizeof(struct short_ad);
1919 else if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_LONG)
1920 adsize = sizeof(struct long_ad);
1924 neloc.logicalBlockNum = block;
1925 neloc.partitionReferenceNum = epos->block.partitionReferenceNum;
1927 bh = udf_tgetblk(sb, udf_get_lb_pblock(sb, &neloc, 0));
1931 memset(bh->b_data, 0x00, sb->s_blocksize);
1932 set_buffer_uptodate(bh);
1934 mark_buffer_dirty_inode(bh, inode);
1936 aed = (struct allocExtDesc *)(bh->b_data);
1937 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT)) {
1938 aed->previousAllocExtLocation =
1939 cpu_to_le32(epos->block.logicalBlockNum);
1941 aed->lengthAllocDescs = cpu_to_le32(0);
1942 if (UDF_SB(sb)->s_udfrev >= 0x0200)
1946 udf_new_tag(bh->b_data, TAG_IDENT_AED, ver, 1, block,
1947 sizeof(struct tag));
1949 nepos.block = neloc;
1950 nepos.offset = sizeof(struct allocExtDesc);
1954 * Do we have to copy current last extent to make space for indirect
1957 if (epos->offset + adsize > sb->s_blocksize) {
1958 struct kernel_lb_addr cp_loc;
1962 epos->offset -= adsize;
1963 cp_type = udf_current_aext(inode, epos, &cp_loc, &cp_len, 0);
1964 cp_len |= ((uint32_t)cp_type) << 30;
1966 __udf_add_aext(inode, &nepos, &cp_loc, cp_len, 1);
1967 udf_write_aext(inode, epos, &nepos.block,
1968 sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDECS, 0);
1970 __udf_add_aext(inode, epos, &nepos.block,
1971 sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDECS, 0);
1981 * Append extent at the given position - should be the first free one in inode
1982 * / indirect extent. This function assumes there is enough space in the inode
1983 * or indirect extent. Use udf_add_aext() if you didn't check for this before.
1985 int __udf_add_aext(struct inode *inode, struct extent_position *epos,
1986 struct kernel_lb_addr *eloc, uint32_t elen, int inc)
1988 struct udf_inode_info *iinfo = UDF_I(inode);
1989 struct allocExtDesc *aed;
1992 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
1993 adsize = sizeof(struct short_ad);
1994 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
1995 adsize = sizeof(struct long_ad);
2000 WARN_ON(iinfo->i_lenAlloc !=
2001 epos->offset - udf_file_entry_alloc_offset(inode));
2003 aed = (struct allocExtDesc *)epos->bh->b_data;
2004 WARN_ON(le32_to_cpu(aed->lengthAllocDescs) !=
2005 epos->offset - sizeof(struct allocExtDesc));
2006 WARN_ON(epos->offset + adsize > inode->i_sb->s_blocksize);
2009 udf_write_aext(inode, epos, eloc, elen, inc);
2012 iinfo->i_lenAlloc += adsize;
2013 mark_inode_dirty(inode);
2015 aed = (struct allocExtDesc *)epos->bh->b_data;
2016 le32_add_cpu(&aed->lengthAllocDescs, adsize);
2017 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2018 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
2019 udf_update_tag(epos->bh->b_data,
2020 epos->offset + (inc ? 0 : adsize));
2022 udf_update_tag(epos->bh->b_data,
2023 sizeof(struct allocExtDesc));
2024 mark_buffer_dirty_inode(epos->bh, inode);
2031 * Append extent at given position - should be the first free one in inode
2032 * / indirect extent. Takes care of allocating and linking indirect blocks.
2034 int udf_add_aext(struct inode *inode, struct extent_position *epos,
2035 struct kernel_lb_addr *eloc, uint32_t elen, int inc)
2038 struct super_block *sb = inode->i_sb;
2040 if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
2041 adsize = sizeof(struct short_ad);
2042 else if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_LONG)
2043 adsize = sizeof(struct long_ad);
2047 if (epos->offset + (2 * adsize) > sb->s_blocksize) {
2051 new_block = udf_new_block(sb, NULL,
2052 epos->block.partitionReferenceNum,
2053 epos->block.logicalBlockNum, &err);
2057 err = udf_setup_indirect_aext(inode, new_block, epos);
2062 return __udf_add_aext(inode, epos, eloc, elen, inc);
2065 void udf_write_aext(struct inode *inode, struct extent_position *epos,
2066 struct kernel_lb_addr *eloc, uint32_t elen, int inc)
2070 struct short_ad *sad;
2071 struct long_ad *lad;
2072 struct udf_inode_info *iinfo = UDF_I(inode);
2075 ptr = iinfo->i_ext.i_data + epos->offset -
2076 udf_file_entry_alloc_offset(inode) +
2079 ptr = epos->bh->b_data + epos->offset;
2081 switch (iinfo->i_alloc_type) {
2082 case ICBTAG_FLAG_AD_SHORT:
2083 sad = (struct short_ad *)ptr;
2084 sad->extLength = cpu_to_le32(elen);
2085 sad->extPosition = cpu_to_le32(eloc->logicalBlockNum);
2086 adsize = sizeof(struct short_ad);
2088 case ICBTAG_FLAG_AD_LONG:
2089 lad = (struct long_ad *)ptr;
2090 lad->extLength = cpu_to_le32(elen);
2091 lad->extLocation = cpu_to_lelb(*eloc);
2092 memset(lad->impUse, 0x00, sizeof(lad->impUse));
2093 adsize = sizeof(struct long_ad);
2100 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2101 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) {
2102 struct allocExtDesc *aed =
2103 (struct allocExtDesc *)epos->bh->b_data;
2104 udf_update_tag(epos->bh->b_data,
2105 le32_to_cpu(aed->lengthAllocDescs) +
2106 sizeof(struct allocExtDesc));
2108 mark_buffer_dirty_inode(epos->bh, inode);
2110 mark_inode_dirty(inode);
2114 epos->offset += adsize;
2118 * Only 1 indirect extent in a row really makes sense but allow upto 16 in case
2119 * someone does some weird stuff.
2121 #define UDF_MAX_INDIR_EXTS 16
2123 int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
2124 struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
2127 unsigned int indirections = 0;
2129 while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
2130 (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
2133 if (++indirections > UDF_MAX_INDIR_EXTS) {
2134 udf_err(inode->i_sb,
2135 "too many indirect extents in inode %lu\n",
2140 epos->block = *eloc;
2141 epos->offset = sizeof(struct allocExtDesc);
2143 block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0);
2144 epos->bh = udf_tread(inode->i_sb, block);
2146 udf_debug("reading block %d failed!\n", block);
2154 int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
2155 struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
2160 struct short_ad *sad;
2161 struct long_ad *lad;
2162 struct udf_inode_info *iinfo = UDF_I(inode);
2166 epos->offset = udf_file_entry_alloc_offset(inode);
2167 ptr = iinfo->i_ext.i_data + epos->offset -
2168 udf_file_entry_alloc_offset(inode) +
2170 alen = udf_file_entry_alloc_offset(inode) +
2174 epos->offset = sizeof(struct allocExtDesc);
2175 ptr = epos->bh->b_data + epos->offset;
2176 alen = sizeof(struct allocExtDesc) +
2177 le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->
2181 switch (iinfo->i_alloc_type) {
2182 case ICBTAG_FLAG_AD_SHORT:
2183 sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc);
2186 etype = le32_to_cpu(sad->extLength) >> 30;
2187 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
2188 eloc->partitionReferenceNum =
2189 iinfo->i_location.partitionReferenceNum;
2190 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
2192 case ICBTAG_FLAG_AD_LONG:
2193 lad = udf_get_filelongad(ptr, alen, &epos->offset, inc);
2196 etype = le32_to_cpu(lad->extLength) >> 30;
2197 *eloc = lelb_to_cpu(lad->extLocation);
2198 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
2201 udf_debug("alloc_type = %d unsupported\n", iinfo->i_alloc_type);
2208 static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
2209 struct kernel_lb_addr neloc, uint32_t nelen)
2211 struct kernel_lb_addr oeloc;
2218 while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) {
2219 udf_write_aext(inode, &epos, &neloc, nelen, 1);
2221 nelen = (etype << 30) | oelen;
2223 udf_add_aext(inode, &epos, &neloc, nelen, 1);
2226 return (nelen >> 30);
2229 int8_t udf_delete_aext(struct inode *inode, struct extent_position epos)
2231 struct extent_position oepos;
2234 struct allocExtDesc *aed;
2235 struct udf_inode_info *iinfo;
2236 struct kernel_lb_addr eloc;
2244 iinfo = UDF_I(inode);
2245 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
2246 adsize = sizeof(struct short_ad);
2247 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
2248 adsize = sizeof(struct long_ad);
2253 if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
2256 while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
2257 udf_write_aext(inode, &oepos, &eloc, (etype << 30) | elen, 1);
2258 if (oepos.bh != epos.bh) {
2259 oepos.block = epos.block;
2263 oepos.offset = epos.offset - adsize;
2266 memset(&eloc, 0x00, sizeof(struct kernel_lb_addr));
2269 if (epos.bh != oepos.bh) {
2270 udf_free_blocks(inode->i_sb, inode, &epos.block, 0, 1);
2271 udf_write_aext(inode, &oepos, &eloc, elen, 1);
2272 udf_write_aext(inode, &oepos, &eloc, elen, 1);
2274 iinfo->i_lenAlloc -= (adsize * 2);
2275 mark_inode_dirty(inode);
2277 aed = (struct allocExtDesc *)oepos.bh->b_data;
2278 le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize));
2279 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2280 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
2281 udf_update_tag(oepos.bh->b_data,
2282 oepos.offset - (2 * adsize));
2284 udf_update_tag(oepos.bh->b_data,
2285 sizeof(struct allocExtDesc));
2286 mark_buffer_dirty_inode(oepos.bh, inode);
2289 udf_write_aext(inode, &oepos, &eloc, elen, 1);
2291 iinfo->i_lenAlloc -= adsize;
2292 mark_inode_dirty(inode);
2294 aed = (struct allocExtDesc *)oepos.bh->b_data;
2295 le32_add_cpu(&aed->lengthAllocDescs, -adsize);
2296 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2297 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
2298 udf_update_tag(oepos.bh->b_data,
2299 epos.offset - adsize);
2301 udf_update_tag(oepos.bh->b_data,
2302 sizeof(struct allocExtDesc));
2303 mark_buffer_dirty_inode(oepos.bh, inode);
2310 return (elen >> 30);
2313 int8_t inode_bmap(struct inode *inode, sector_t block,
2314 struct extent_position *pos, struct kernel_lb_addr *eloc,
2315 uint32_t *elen, sector_t *offset)
2317 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
2318 loff_t lbcount = 0, bcount =
2319 (loff_t) block << blocksize_bits;
2321 struct udf_inode_info *iinfo;
2323 iinfo = UDF_I(inode);
2324 if (!udf_read_extent_cache(inode, bcount, &lbcount, pos)) {
2326 pos->block = iinfo->i_location;
2331 etype = udf_next_aext(inode, pos, eloc, elen, 1);
2333 *offset = (bcount - lbcount) >> blocksize_bits;
2334 iinfo->i_lenExtents = lbcount;
2338 } while (lbcount <= bcount);
2339 /* update extent cache */
2340 udf_update_extent_cache(inode, lbcount - *elen, pos, 1);
2341 *offset = (bcount + *elen - lbcount) >> blocksize_bits;
2346 long udf_block_map(struct inode *inode, sector_t block)
2348 struct kernel_lb_addr eloc;
2351 struct extent_position epos = {};
2354 down_read(&UDF_I(inode)->i_data_sem);
2356 if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) ==
2357 (EXT_RECORDED_ALLOCATED >> 30))
2358 ret = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
2362 up_read(&UDF_I(inode)->i_data_sem);
2365 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2366 return udf_fixed_to_variable(ret);