GNU Linux-libre 4.9.309-gnu1
[releases.git] / fs / f2fs / segment.h
1 /*
2  * fs/f2fs/segment.h
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/blkdev.h>
12 #include <linux/backing-dev.h>
13
14 /* constant macro */
15 #define NULL_SEGNO                      ((unsigned int)(~0))
16 #define NULL_SECNO                      ((unsigned int)(~0))
17
18 #define DEF_RECLAIM_PREFREE_SEGMENTS    5       /* 5% over total segments */
19 #define DEF_MAX_RECLAIM_PREFREE_SEGMENTS        4096    /* 8GB in maximum */
20
21 #define F2FS_MIN_SEGMENTS       9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
22
23 /* L: Logical segment # in volume, R: Relative segment # in main area */
24 #define GET_L2R_SEGNO(free_i, segno)    (segno - free_i->start_segno)
25 #define GET_R2L_SEGNO(free_i, segno)    (segno + free_i->start_segno)
26
27 #define IS_DATASEG(t)   (t <= CURSEG_COLD_DATA)
28 #define IS_NODESEG(t)   (t >= CURSEG_HOT_NODE)
29
30 #define IS_CURSEG(sbi, seg)                                             \
31         ((seg == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) ||      \
32          (seg == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) ||     \
33          (seg == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) ||     \
34          (seg == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) ||      \
35          (seg == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) ||     \
36          (seg == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
37
38 #define IS_CURSEC(sbi, secno)                                           \
39         ((secno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno /              \
40           sbi->segs_per_sec) || \
41          (secno == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno /             \
42           sbi->segs_per_sec) || \
43          (secno == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno /             \
44           sbi->segs_per_sec) || \
45          (secno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno /              \
46           sbi->segs_per_sec) || \
47          (secno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno /             \
48           sbi->segs_per_sec) || \
49          (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno /             \
50           sbi->segs_per_sec))   \
51
52 #define MAIN_BLKADDR(sbi)                                               \
53         (SM_I(sbi) ? SM_I(sbi)->main_blkaddr :                          \
54                 le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
55 #define SEG0_BLKADDR(sbi)                                               \
56         (SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr :                          \
57                 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr))
58
59 #define MAIN_SEGS(sbi)  (SM_I(sbi)->main_segments)
60 #define MAIN_SECS(sbi)  (sbi->total_sections)
61
62 #define TOTAL_SEGS(sbi)                                                 \
63         (SM_I(sbi) ? SM_I(sbi)->segment_count :                                 \
64                 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
65 #define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << sbi->log_blocks_per_seg)
66
67 #define MAX_BLKADDR(sbi)        (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
68 #define SEGMENT_SIZE(sbi)       (1ULL << (sbi->log_blocksize +          \
69                                         sbi->log_blocks_per_seg))
70
71 #define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) +                    \
72          (GET_R2L_SEGNO(FREE_I(sbi), segno) << sbi->log_blocks_per_seg))
73
74 #define NEXT_FREE_BLKADDR(sbi, curseg)                                  \
75         (START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff)
76
77 #define GET_SEGOFF_FROM_SEG0(sbi, blk_addr)     ((blk_addr) - SEG0_BLKADDR(sbi))
78 #define GET_SEGNO_FROM_SEG0(sbi, blk_addr)                              \
79         (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> sbi->log_blocks_per_seg)
80 #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr)                             \
81         (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (sbi->blocks_per_seg - 1))
82
83 #define GET_SEGNO(sbi, blk_addr)                                        \
84         ((!is_valid_data_blkaddr(sbi, blk_addr)) ?                      \
85         NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi),                 \
86                 GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
87 #define GET_SECNO(sbi, segno)                                   \
88         ((segno) / sbi->segs_per_sec)
89 #define GET_ZONENO_FROM_SEGNO(sbi, segno)                               \
90         ((segno / sbi->segs_per_sec) / sbi->secs_per_zone)
91
92 #define GET_SUM_BLOCK(sbi, segno)                               \
93         ((sbi->sm_info->ssa_blkaddr) + segno)
94
95 #define GET_SUM_TYPE(footer) ((footer)->entry_type)
96 #define SET_SUM_TYPE(footer, type) ((footer)->entry_type = type)
97
98 #define SIT_ENTRY_OFFSET(sit_i, segno)                                  \
99         (segno % sit_i->sents_per_block)
100 #define SIT_BLOCK_OFFSET(segno)                                 \
101         (segno / SIT_ENTRY_PER_BLOCK)
102 #define START_SEGNO(segno)              \
103         (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
104 #define SIT_BLK_CNT(sbi)                        \
105         ((MAIN_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK)
106 #define f2fs_bitmap_size(nr)                    \
107         (BITS_TO_LONGS(nr) * sizeof(unsigned long))
108
109 #define SECTOR_FROM_BLOCK(blk_addr)                                     \
110         (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
111 #define SECTOR_TO_BLOCK(sectors)                                        \
112         (sectors >> F2FS_LOG_SECTORS_PER_BLOCK)
113 #define MAX_BIO_BLOCKS(sbi)                                             \
114         ((int)min((int)max_hw_blocks(sbi), BIO_MAX_PAGES))
115
116 /*
117  * indicate a block allocation direction: RIGHT and LEFT.
118  * RIGHT means allocating new sections towards the end of volume.
119  * LEFT means the opposite direction.
120  */
121 enum {
122         ALLOC_RIGHT = 0,
123         ALLOC_LEFT
124 };
125
126 /*
127  * In the victim_sel_policy->alloc_mode, there are two block allocation modes.
128  * LFS writes data sequentially with cleaning operations.
129  * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
130  */
131 enum {
132         LFS = 0,
133         SSR
134 };
135
136 /*
137  * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes.
138  * GC_CB is based on cost-benefit algorithm.
139  * GC_GREEDY is based on greedy algorithm.
140  */
141 enum {
142         GC_CB = 0,
143         GC_GREEDY
144 };
145
146 /*
147  * BG_GC means the background cleaning job.
148  * FG_GC means the on-demand cleaning job.
149  * FORCE_FG_GC means on-demand cleaning job in background.
150  */
151 enum {
152         BG_GC = 0,
153         FG_GC,
154         FORCE_FG_GC,
155 };
156
157 /* for a function parameter to select a victim segment */
158 struct victim_sel_policy {
159         int alloc_mode;                 /* LFS or SSR */
160         int gc_mode;                    /* GC_CB or GC_GREEDY */
161         unsigned long *dirty_segmap;    /* dirty segment bitmap */
162         unsigned int max_search;        /* maximum # of segments to search */
163         unsigned int offset;            /* last scanned bitmap offset */
164         unsigned int ofs_unit;          /* bitmap search unit */
165         unsigned int min_cost;          /* minimum cost */
166         unsigned int min_segno;         /* segment # having min. cost */
167 };
168
169 struct seg_entry {
170         unsigned int type:6;            /* segment type like CURSEG_XXX_TYPE */
171         unsigned int valid_blocks:10;   /* # of valid blocks */
172         unsigned int ckpt_valid_blocks:10;      /* # of valid blocks last cp */
173         unsigned int padding:6;         /* padding */
174         unsigned char *cur_valid_map;   /* validity bitmap of blocks */
175         /*
176          * # of valid blocks and the validity bitmap stored in the the last
177          * checkpoint pack. This information is used by the SSR mode.
178          */
179         unsigned char *ckpt_valid_map;  /* validity bitmap of blocks last cp */
180         unsigned char *discard_map;
181         unsigned long long mtime;       /* modification time of the segment */
182 };
183
184 struct sec_entry {
185         unsigned int valid_blocks;      /* # of valid blocks in a section */
186 };
187
188 struct segment_allocation {
189         void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
190 };
191
192 /*
193  * this value is set in page as a private data which indicate that
194  * the page is atomically written, and it is in inmem_pages list.
195  */
196 #define ATOMIC_WRITTEN_PAGE             ((unsigned long)-1)
197
198 #define IS_ATOMIC_WRITTEN_PAGE(page)                    \
199                 (page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
200
201 struct inmem_pages {
202         struct list_head list;
203         struct page *page;
204         block_t old_addr;               /* for revoking when fail to commit */
205 };
206
207 struct sit_info {
208         const struct segment_allocation *s_ops;
209
210         block_t sit_base_addr;          /* start block address of SIT area */
211         block_t sit_blocks;             /* # of blocks used by SIT area */
212         block_t written_valid_blocks;   /* # of valid blocks in main area */
213         char *sit_bitmap;               /* SIT bitmap pointer */
214         unsigned int bitmap_size;       /* SIT bitmap size */
215
216         unsigned long *tmp_map;                 /* bitmap for temporal use */
217         unsigned long *dirty_sentries_bitmap;   /* bitmap for dirty sentries */
218         unsigned int dirty_sentries;            /* # of dirty sentries */
219         unsigned int sents_per_block;           /* # of SIT entries per block */
220         struct mutex sentry_lock;               /* to protect SIT cache */
221         struct seg_entry *sentries;             /* SIT segment-level cache */
222         struct sec_entry *sec_entries;          /* SIT section-level cache */
223
224         /* for cost-benefit algorithm in cleaning procedure */
225         unsigned long long elapsed_time;        /* elapsed time after mount */
226         unsigned long long mounted_time;        /* mount time */
227         unsigned long long min_mtime;           /* min. modification time */
228         unsigned long long max_mtime;           /* max. modification time */
229 };
230
231 struct free_segmap_info {
232         unsigned int start_segno;       /* start segment number logically */
233         unsigned int free_segments;     /* # of free segments */
234         unsigned int free_sections;     /* # of free sections */
235         spinlock_t segmap_lock;         /* free segmap lock */
236         unsigned long *free_segmap;     /* free segment bitmap */
237         unsigned long *free_secmap;     /* free section bitmap */
238 };
239
240 /* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
241 enum dirty_type {
242         DIRTY_HOT_DATA,         /* dirty segments assigned as hot data logs */
243         DIRTY_WARM_DATA,        /* dirty segments assigned as warm data logs */
244         DIRTY_COLD_DATA,        /* dirty segments assigned as cold data logs */
245         DIRTY_HOT_NODE,         /* dirty segments assigned as hot node logs */
246         DIRTY_WARM_NODE,        /* dirty segments assigned as warm node logs */
247         DIRTY_COLD_NODE,        /* dirty segments assigned as cold node logs */
248         DIRTY,                  /* to count # of dirty segments */
249         PRE,                    /* to count # of entirely obsolete segments */
250         NR_DIRTY_TYPE
251 };
252
253 struct dirty_seglist_info {
254         const struct victim_selection *v_ops;   /* victim selction operation */
255         unsigned long *dirty_segmap[NR_DIRTY_TYPE];
256         struct mutex seglist_lock;              /* lock for segment bitmaps */
257         int nr_dirty[NR_DIRTY_TYPE];            /* # of dirty segments */
258         unsigned long *victim_secmap;           /* background GC victims */
259 };
260
261 /* victim selection function for cleaning and SSR */
262 struct victim_selection {
263         int (*get_victim)(struct f2fs_sb_info *, unsigned int *,
264                                                         int, int, char);
265 };
266
267 /* for active log information */
268 struct curseg_info {
269         struct mutex curseg_mutex;              /* lock for consistency */
270         struct f2fs_summary_block *sum_blk;     /* cached summary block */
271         struct rw_semaphore journal_rwsem;      /* protect journal area */
272         struct f2fs_journal *journal;           /* cached journal info */
273         unsigned char alloc_type;               /* current allocation type */
274         unsigned int segno;                     /* current segment number */
275         unsigned short next_blkoff;             /* next block offset to write */
276         unsigned int zone;                      /* current zone number */
277         unsigned int next_segno;                /* preallocated segment */
278 };
279
280 struct sit_entry_set {
281         struct list_head set_list;      /* link with all sit sets */
282         unsigned int start_segno;       /* start segno of sits in set */
283         unsigned int entry_cnt;         /* the # of sit entries in set */
284 };
285
286 /*
287  * inline functions
288  */
289 static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
290 {
291         return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
292 }
293
294 static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
295                                                 unsigned int segno)
296 {
297         struct sit_info *sit_i = SIT_I(sbi);
298         return &sit_i->sentries[segno];
299 }
300
301 static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
302                                                 unsigned int segno)
303 {
304         struct sit_info *sit_i = SIT_I(sbi);
305         return &sit_i->sec_entries[GET_SECNO(sbi, segno)];
306 }
307
308 static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
309                                 unsigned int segno, int section)
310 {
311         /*
312          * In order to get # of valid blocks in a section instantly from many
313          * segments, f2fs manages two counting structures separately.
314          */
315         if (section > 1)
316                 return get_sec_entry(sbi, segno)->valid_blocks;
317         else
318                 return get_seg_entry(sbi, segno)->valid_blocks;
319 }
320
321 static inline void seg_info_from_raw_sit(struct seg_entry *se,
322                                         struct f2fs_sit_entry *rs)
323 {
324         se->valid_blocks = GET_SIT_VBLOCKS(rs);
325         se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
326         memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
327         memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
328         se->type = GET_SIT_TYPE(rs);
329         se->mtime = le64_to_cpu(rs->mtime);
330 }
331
332 static inline void seg_info_to_raw_sit(struct seg_entry *se,
333                                         struct f2fs_sit_entry *rs)
334 {
335         unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
336                                         se->valid_blocks;
337         rs->vblocks = cpu_to_le16(raw_vblocks);
338         memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
339         memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
340         se->ckpt_valid_blocks = se->valid_blocks;
341         rs->mtime = cpu_to_le64(se->mtime);
342 }
343
344 static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
345                 unsigned int max, unsigned int segno)
346 {
347         unsigned int ret;
348         spin_lock(&free_i->segmap_lock);
349         ret = find_next_bit(free_i->free_segmap, max, segno);
350         spin_unlock(&free_i->segmap_lock);
351         return ret;
352 }
353
354 static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
355 {
356         struct free_segmap_info *free_i = FREE_I(sbi);
357         unsigned int secno = segno / sbi->segs_per_sec;
358         unsigned int start_segno = secno * sbi->segs_per_sec;
359         unsigned int next;
360
361         spin_lock(&free_i->segmap_lock);
362         clear_bit(segno, free_i->free_segmap);
363         free_i->free_segments++;
364
365         next = find_next_bit(free_i->free_segmap,
366                         start_segno + sbi->segs_per_sec, start_segno);
367         if (next >= start_segno + sbi->segs_per_sec) {
368                 clear_bit(secno, free_i->free_secmap);
369                 free_i->free_sections++;
370         }
371         spin_unlock(&free_i->segmap_lock);
372 }
373
374 static inline void __set_inuse(struct f2fs_sb_info *sbi,
375                 unsigned int segno)
376 {
377         struct free_segmap_info *free_i = FREE_I(sbi);
378         unsigned int secno = segno / sbi->segs_per_sec;
379         set_bit(segno, free_i->free_segmap);
380         free_i->free_segments--;
381         if (!test_and_set_bit(secno, free_i->free_secmap))
382                 free_i->free_sections--;
383 }
384
385 static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
386                 unsigned int segno)
387 {
388         struct free_segmap_info *free_i = FREE_I(sbi);
389         unsigned int secno = segno / sbi->segs_per_sec;
390         unsigned int start_segno = secno * sbi->segs_per_sec;
391         unsigned int next;
392
393         spin_lock(&free_i->segmap_lock);
394         if (test_and_clear_bit(segno, free_i->free_segmap)) {
395                 free_i->free_segments++;
396
397                 if (IS_CURSEC(sbi, secno))
398                         goto skip_free;
399                 next = find_next_bit(free_i->free_segmap,
400                                 start_segno + sbi->segs_per_sec, start_segno);
401                 if (next >= start_segno + sbi->segs_per_sec) {
402                         if (test_and_clear_bit(secno, free_i->free_secmap))
403                                 free_i->free_sections++;
404                 }
405         }
406 skip_free:
407         spin_unlock(&free_i->segmap_lock);
408 }
409
410 static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
411                 unsigned int segno)
412 {
413         struct free_segmap_info *free_i = FREE_I(sbi);
414         unsigned int secno = segno / sbi->segs_per_sec;
415         spin_lock(&free_i->segmap_lock);
416         if (!test_and_set_bit(segno, free_i->free_segmap)) {
417                 free_i->free_segments--;
418                 if (!test_and_set_bit(secno, free_i->free_secmap))
419                         free_i->free_sections--;
420         }
421         spin_unlock(&free_i->segmap_lock);
422 }
423
424 static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
425                 void *dst_addr)
426 {
427         struct sit_info *sit_i = SIT_I(sbi);
428         memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
429 }
430
431 static inline block_t written_block_count(struct f2fs_sb_info *sbi)
432 {
433         return SIT_I(sbi)->written_valid_blocks;
434 }
435
436 static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
437 {
438         return FREE_I(sbi)->free_segments;
439 }
440
441 static inline int reserved_segments(struct f2fs_sb_info *sbi)
442 {
443         return SM_I(sbi)->reserved_segments;
444 }
445
446 static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
447 {
448         return FREE_I(sbi)->free_sections;
449 }
450
451 static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
452 {
453         return DIRTY_I(sbi)->nr_dirty[PRE];
454 }
455
456 static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
457 {
458         return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] +
459                 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] +
460                 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] +
461                 DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] +
462                 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] +
463                 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE];
464 }
465
466 static inline int overprovision_segments(struct f2fs_sb_info *sbi)
467 {
468         return SM_I(sbi)->ovp_segments;
469 }
470
471 static inline int overprovision_sections(struct f2fs_sb_info *sbi)
472 {
473         return ((unsigned int) overprovision_segments(sbi)) / sbi->segs_per_sec;
474 }
475
476 static inline int reserved_sections(struct f2fs_sb_info *sbi)
477 {
478         return ((unsigned int) reserved_segments(sbi)) / sbi->segs_per_sec;
479 }
480
481 static inline bool need_SSR(struct f2fs_sb_info *sbi)
482 {
483         int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
484         int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
485
486         if (test_opt(sbi, LFS))
487                 return false;
488
489         return free_sections(sbi) <= (node_secs + 2 * dent_secs +
490                                                 reserved_sections(sbi) + 1);
491 }
492
493 static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
494                                         int freed, int needed)
495 {
496         int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
497         int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
498
499         node_secs += get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
500
501         if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
502                 return false;
503
504         return (free_sections(sbi) + freed) <=
505                 (node_secs + 2 * dent_secs + reserved_sections(sbi) + needed);
506 }
507
508 static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
509 {
510         return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
511 }
512
513 static inline int utilization(struct f2fs_sb_info *sbi)
514 {
515         return div_u64((u64)valid_user_blocks(sbi) * 100,
516                                         sbi->user_block_count);
517 }
518
519 /*
520  * Sometimes f2fs may be better to drop out-of-place update policy.
521  * And, users can control the policy through sysfs entries.
522  * There are five policies with triggering conditions as follows.
523  * F2FS_IPU_FORCE - all the time,
524  * F2FS_IPU_SSR - if SSR mode is activated,
525  * F2FS_IPU_UTIL - if FS utilization is over threashold,
526  * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
527  *                     threashold,
528  * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
529  *                     storages. IPU will be triggered only if the # of dirty
530  *                     pages over min_fsync_blocks.
531  * F2FS_IPUT_DISABLE - disable IPU. (=default option)
532  */
533 #define DEF_MIN_IPU_UTIL        70
534 #define DEF_MIN_FSYNC_BLOCKS    8
535
536 enum {
537         F2FS_IPU_FORCE,
538         F2FS_IPU_SSR,
539         F2FS_IPU_UTIL,
540         F2FS_IPU_SSR_UTIL,
541         F2FS_IPU_FSYNC,
542 };
543
544 static inline bool need_inplace_update(struct inode *inode)
545 {
546         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
547         unsigned int policy = SM_I(sbi)->ipu_policy;
548
549         /* IPU can be done only for the user data */
550         if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
551                 return false;
552
553         if (test_opt(sbi, LFS))
554                 return false;
555
556         if (policy & (0x1 << F2FS_IPU_FORCE))
557                 return true;
558         if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
559                 return true;
560         if (policy & (0x1 << F2FS_IPU_UTIL) &&
561                         utilization(sbi) > SM_I(sbi)->min_ipu_util)
562                 return true;
563         if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) &&
564                         utilization(sbi) > SM_I(sbi)->min_ipu_util)
565                 return true;
566
567         /* this is only set during fdatasync */
568         if (policy & (0x1 << F2FS_IPU_FSYNC) &&
569                         is_inode_flag_set(inode, FI_NEED_IPU))
570                 return true;
571
572         return false;
573 }
574
575 static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
576                 int type)
577 {
578         struct curseg_info *curseg = CURSEG_I(sbi, type);
579         return curseg->segno;
580 }
581
582 static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
583                 int type)
584 {
585         struct curseg_info *curseg = CURSEG_I(sbi, type);
586         return curseg->alloc_type;
587 }
588
589 static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)
590 {
591         struct curseg_info *curseg = CURSEG_I(sbi, type);
592         return curseg->next_blkoff;
593 }
594
595 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
596 {
597         f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
598 }
599
600 static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr)
601 {
602         struct f2fs_sb_info *sbi = fio->sbi;
603
604         if (__is_meta_io(fio))
605                 verify_blkaddr(sbi, blk_addr, META_GENERIC);
606         else
607                 verify_blkaddr(sbi, blk_addr, DATA_GENERIC);
608 }
609
610 /*
611  * Summary block is always treated as an invalid block
612  */
613 static inline int check_block_count(struct f2fs_sb_info *sbi,
614                 int segno, struct f2fs_sit_entry *raw_sit)
615 {
616         bool is_valid  = test_bit_le(0, raw_sit->valid_map) ? true : false;
617         int valid_blocks = 0;
618         int cur_pos = 0, next_pos;
619
620         /* check bitmap with valid block count */
621         do {
622                 if (is_valid) {
623                         next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
624                                         sbi->blocks_per_seg,
625                                         cur_pos);
626                         valid_blocks += next_pos - cur_pos;
627                 } else
628                         next_pos = find_next_bit_le(&raw_sit->valid_map,
629                                         sbi->blocks_per_seg,
630                                         cur_pos);
631                 cur_pos = next_pos;
632                 is_valid = !is_valid;
633         } while (cur_pos < sbi->blocks_per_seg);
634
635         if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
636                 f2fs_msg(sbi->sb, KERN_ERR,
637                                 "Mismatch valid blocks %d vs. %d",
638                                         GET_SIT_VBLOCKS(raw_sit), valid_blocks);
639                 set_sbi_flag(sbi, SBI_NEED_FSCK);
640                 return -EINVAL;
641         }
642
643         /* check segment usage, and check boundary of a given segment number */
644         if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
645                                         || segno > TOTAL_SEGS(sbi) - 1)) {
646                 f2fs_msg(sbi->sb, KERN_ERR,
647                                 "Wrong valid blocks %d or segno %u",
648                                         GET_SIT_VBLOCKS(raw_sit), segno);
649                 set_sbi_flag(sbi, SBI_NEED_FSCK);
650                 return -EINVAL;
651         }
652         return 0;
653 }
654
655 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
656                                                 unsigned int start)
657 {
658         struct sit_info *sit_i = SIT_I(sbi);
659         unsigned int offset = SIT_BLOCK_OFFSET(start);
660         block_t blk_addr = sit_i->sit_base_addr + offset;
661
662         check_seg_range(sbi, start);
663
664         /* calculate sit block address */
665         if (f2fs_test_bit(offset, sit_i->sit_bitmap))
666                 blk_addr += sit_i->sit_blocks;
667
668         return blk_addr;
669 }
670
671 static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
672                                                 pgoff_t block_addr)
673 {
674         struct sit_info *sit_i = SIT_I(sbi);
675         block_addr -= sit_i->sit_base_addr;
676         if (block_addr < sit_i->sit_blocks)
677                 block_addr += sit_i->sit_blocks;
678         else
679                 block_addr -= sit_i->sit_blocks;
680
681         return block_addr + sit_i->sit_base_addr;
682 }
683
684 static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
685 {
686         unsigned int block_off = SIT_BLOCK_OFFSET(start);
687
688         f2fs_change_bit(block_off, sit_i->sit_bitmap);
689 }
690
691 static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi)
692 {
693         struct sit_info *sit_i = SIT_I(sbi);
694         return sit_i->elapsed_time + CURRENT_TIME_SEC.tv_sec -
695                                                 sit_i->mounted_time;
696 }
697
698 static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
699                         unsigned int ofs_in_node, unsigned char version)
700 {
701         sum->nid = cpu_to_le32(nid);
702         sum->ofs_in_node = cpu_to_le16(ofs_in_node);
703         sum->version = version;
704 }
705
706 static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
707 {
708         return __start_cp_addr(sbi) +
709                 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
710 }
711
712 static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
713 {
714         return __start_cp_addr(sbi) +
715                 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
716                                 - (base + 1) + type;
717 }
718
719 static inline bool no_fggc_candidate(struct f2fs_sb_info *sbi,
720                                                 unsigned int secno)
721 {
722         if (get_valid_blocks(sbi, secno, sbi->segs_per_sec) >=
723                                                 sbi->fggc_threshold)
724                 return true;
725         return false;
726 }
727
728 static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
729 {
730         if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
731                 return true;
732         return false;
733 }
734
735 static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi)
736 {
737         struct block_device *bdev = sbi->sb->s_bdev;
738         struct request_queue *q = bdev_get_queue(bdev);
739         return SECTOR_TO_BLOCK(queue_max_sectors(q));
740 }
741
742 /*
743  * It is very important to gather dirty pages and write at once, so that we can
744  * submit a big bio without interfering other data writes.
745  * By default, 512 pages for directory data,
746  * 512 pages (2MB) * 3 for three types of nodes, and
747  * max_bio_blocks for meta are set.
748  */
749 static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
750 {
751         if (sbi->sb->s_bdi->wb.dirty_exceeded)
752                 return 0;
753
754         if (type == DATA)
755                 return sbi->blocks_per_seg;
756         else if (type == NODE)
757                 return 8 * sbi->blocks_per_seg;
758         else if (type == META)
759                 return 8 * MAX_BIO_BLOCKS(sbi);
760         else
761                 return 0;
762 }
763
764 /*
765  * When writing pages, it'd better align nr_to_write for segment size.
766  */
767 static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
768                                         struct writeback_control *wbc)
769 {
770         long nr_to_write, desired;
771
772         if (wbc->sync_mode != WB_SYNC_NONE)
773                 return 0;
774
775         nr_to_write = wbc->nr_to_write;
776
777         if (type == NODE)
778                 desired = 2 * max_hw_blocks(sbi);
779         else
780                 desired = MAX_BIO_BLOCKS(sbi);
781
782         wbc->nr_to_write = desired;
783         return desired - nr_to_write;
784 }