In this round, we've investigated how f2fs deals with errors given by our fault

injection facility. With this, we could fix several corner cases. And, in order
 to improve the performance, we set inline_dentry by default and enhance the
 exisiting discard issue flow. In addition, we added f2fs_migrate_page for better
 memory management.
 
 = Enhancement =
  - set inline_dentry by default
  - improve discard issue flow
  - add more fault injection cases in f2fs
  - allow block preallocation for encrypted files
  - introduce migrate_page callback function
  - avoid truncating the next direct node block at every checkpoint
 
 = Bug fixes =
  - set page flag correctly between write_begin and write_end
  - missing error handling cases detected by fault injection
  - preallocate blocks regarding to 4KB alignement correctly
  - dentry and filename handling of encryption
  - lost xattrs of directories
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJX9sMhAAoJEEAUqH6CSFDSFhQQAIQ99GkcaPmSACHg7JNa9zG1
 wb6eeKIDee+Jr4vu7yQ++T3Ih4lesl2ZLABVaP+IcXlsYWI2VUvlChczuwVSDQMg
 ZiBIR2IwXVVY6Zpb0xuw8C/vmQAJjLZTBV33s+wgsYHaTDobYexVUjkCM+pekrzj
 HBXrk7zx8NHUh41yr/kVQl6FY8KPC6bTtBH23UUp6Vuy1zMZDR/VjL440IyT5Ded
 JRSBX0XSAC9He6n+kZ4S2kMc11kmqZYW7mE4SmiPDzAhGwUv4SmQ1871lK00EOUp
 5EN1Lcy8M7kkl8en2zpZ002R/LDbzRTYjb1fjGJVR+s5Q3piGokxtwAMd0/a7k9v
 wwZm64Bm4NMHBEK6uc/DPWFUmnUySrboTvOCDRunNogPGTjMJwnzAQmTcB/Hdpr5
 oAJQwyAq7ZzkMk3xt0ifeNqy+78uiwfpPEnZDoWqU6zxa+vIyqpFDD+8wEPBO9qo
 JLRocH0Yl7+ExJvi+2W9wMQq9DsxZWR+CwUc8pg68E+1oOEycJ3weAwg5XSVHoNr
 59I2blZQU6P922sH2HVhp0n58xZfYrR7Z3NSsiSfKXeL4gN222dHHT1UfRUmY+A3
 7EeuYm8EUecKV0fZimMcqCCrUXQpubT+qGZfI6NZhu3Qhno1Y8ApxqH8Ieypx7ol
 YD5prZs2qqVKO5LjLV5o
 =crpN
 -----END PGP SIGNATURE-----

Merge tag 'for-f2fs-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs

Pull f2fs updates from Jaegeuk Kim:
 "In this round, we've investigated how f2fs deals with errors given by
  our fault injection facility. With this, we could fix several corner
  cases. And, in order to improve the performance, we set inline_dentry
  by default and enhance the exisiting discard issue flow. In addition,
  we added f2fs_migrate_page for better memory management.

  Enhancements:
   - set inline_dentry by default
   - improve discard issue flow
   - add more fault injection cases in f2fs
   - allow block preallocation for encrypted files
   - introduce migrate_page callback function
   - avoid truncating the next direct node block at every checkpoint

  Bug fixes:
   - set page flag correctly between write_begin and write_end
   - missing error handling cases detected by fault injection
   - preallocate blocks regarding to 4KB alignement correctly
   - dentry and filename handling of encryption
   - lost xattrs of directories"

* tag 'for-f2fs-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (69 commits)
  f2fs: introduce update_ckpt_flags to clean up
  f2fs: don't submit irrelevant page
  f2fs: fix to commit bio cache after flushing node pages
  f2fs: introduce get_checkpoint_version for cleanup
  f2fs: remove dead variable
  f2fs: remove redundant io plug
  f2fs: support checkpoint error injection
  f2fs: fix to recover old fault injection config in ->remount_fs
  f2fs: do fault injection initialization in default_options
  f2fs: remove redundant value definition
  f2fs: support configuring fault injection per superblock
  f2fs: adjust display format of segment bit
  f2fs: remove dirty inode pages in error path
  f2fs: do not unnecessarily null-terminate encrypted symlink data
  f2fs: handle errors during recover_orphan_inodes
  f2fs: avoid gc in cp_error case
  f2fs: should put_page for summary page
  f2fs: assign return value in f2fs_gc
  f2fs: add customized migrate_page callback
  f2fs: introduce cp_lock to protect updating of ckpt_flags
  ...
master
Linus Torvalds 6 years ago
commit 4c1fad64ef
  1. 1
      Documentation/filesystems/f2fs.txt
  2. 5
      MAINTAINERS
  3. 12
      fs/f2fs/acl.c
  4. 1
      fs/f2fs/acl.h
  5. 205
      fs/f2fs/checkpoint.c
  6. 131
      fs/f2fs/data.c
  7. 17
      fs/f2fs/debug.c
  8. 110
      fs/f2fs/dir.c
  9. 175
      fs/f2fs/f2fs.h
  10. 20
      fs/f2fs/file.c
  11. 90
      fs/f2fs/gc.c
  12. 25
      fs/f2fs/inline.c
  13. 17
      fs/f2fs/inode.c
  14. 15
      fs/f2fs/namei.c
  15. 33
      fs/f2fs/node.c
  16. 77
      fs/f2fs/node.h
  17. 124
      fs/f2fs/recovery.c
  18. 219
      fs/f2fs/segment.c
  19. 11
      fs/f2fs/segment.h
  20. 93
      fs/f2fs/super.c
  21. 39
      fs/f2fs/xattr.c
  22. 1
      include/linux/f2fs_fs.h
  23. 18
      include/trace/events/f2fs.h

@ -131,6 +131,7 @@ inline_dentry Enable the inline dir feature: data in new created
directory entries can be written into inode block. The
space of inode block which is used to store inline
dentries is limited to ~3.4k.
noinline_dentry Diable the inline dentry feature.
flush_merge Merge concurrent cache_flush commands as much as possible
to eliminate redundant command issues. If the underlying
device handles the cache_flush command relatively slowly,

@ -5105,10 +5105,9 @@ F: include/linux/fscrypto.h
F2FS FILE SYSTEM
M: Jaegeuk Kim <jaegeuk@kernel.org>
M: Changman Lee <cm224.lee@samsung.com>
R: Chao Yu <yuchao0@huawei.com>
M: Chao Yu <yuchao0@huawei.com>
L: linux-f2fs-devel@lists.sourceforge.net
W: http://en.wikipedia.org/wiki/F2FS
W: https://f2fs.wiki.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git
S: Maintained
F: Documentation/filesystems/f2fs.txt

@ -109,14 +109,16 @@ fail:
return ERR_PTR(-EINVAL);
}
static void *f2fs_acl_to_disk(const struct posix_acl *acl, size_t *size)
static void *f2fs_acl_to_disk(struct f2fs_sb_info *sbi,
const struct posix_acl *acl, size_t *size)
{
struct f2fs_acl_header *f2fs_acl;
struct f2fs_acl_entry *entry;
int i;
f2fs_acl = f2fs_kmalloc(sizeof(struct f2fs_acl_header) + acl->a_count *
sizeof(struct f2fs_acl_entry), GFP_NOFS);
f2fs_acl = f2fs_kmalloc(sbi, sizeof(struct f2fs_acl_header) +
acl->a_count * sizeof(struct f2fs_acl_entry),
GFP_NOFS);
if (!f2fs_acl)
return ERR_PTR(-ENOMEM);
@ -175,7 +177,7 @@ static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type,
retval = f2fs_getxattr(inode, name_index, "", NULL, 0, dpage);
if (retval > 0) {
value = f2fs_kmalloc(retval, GFP_F2FS_ZERO);
value = f2fs_kmalloc(F2FS_I_SB(inode), retval, GFP_F2FS_ZERO);
if (!value)
return ERR_PTR(-ENOMEM);
retval = f2fs_getxattr(inode, name_index, "", value,
@ -230,7 +232,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
}
if (acl) {
value = f2fs_acl_to_disk(acl, &size);
value = f2fs_acl_to_disk(F2FS_I_SB(inode), acl, &size);
if (IS_ERR(value)) {
clear_inode_flag(inode, FI_ACL_MODE);
return (int)PTR_ERR(value);

@ -41,7 +41,6 @@ extern int f2fs_set_acl(struct inode *, struct posix_acl *, int);
extern int f2fs_init_acl(struct inode *, struct inode *, struct page *,
struct page *);
#else
#define f2fs_check_acl NULL
#define f2fs_get_acl NULL
#define f2fs_set_acl NULL

@ -28,7 +28,7 @@ struct kmem_cache *inode_entry_slab;
void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
{
set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
set_ckpt_flags(sbi, CP_ERROR_FLAG);
sbi->sb->s_flags |= MS_RDONLY;
if (!end_io)
f2fs_flush_merged_bios(sbi);
@ -267,7 +267,6 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
struct blk_plug plug;
long diff, written;
/* collect a number of dirty meta pages and write together */
@ -280,9 +279,7 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
/* if mounting is failed, skip writing node pages */
mutex_lock(&sbi->cp_mutex);
diff = nr_pages_to_write(sbi, META, wbc);
blk_start_plug(&plug);
written = sync_meta_pages(sbi, META, wbc->nr_to_write);
blk_finish_plug(&plug);
mutex_unlock(&sbi->cp_mutex);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
return 0;
@ -388,6 +385,9 @@ const struct address_space_operations f2fs_meta_aops = {
.set_page_dirty = f2fs_set_meta_page_dirty,
.invalidatepage = f2fs_invalidate_page,
.releasepage = f2fs_release_page,
#ifdef CONFIG_MIGRATION
.migratepage = f2fs_migrate_page,
#endif
};
static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
@ -491,7 +491,7 @@ int acquire_orphan_inode(struct f2fs_sb_info *sbi)
spin_lock(&im->ino_lock);
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(FAULT_ORPHAN)) {
if (time_to_inject(sbi, FAULT_ORPHAN)) {
spin_unlock(&im->ino_lock);
return -ENOSPC;
}
@ -531,8 +531,20 @@ void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
{
struct inode *inode;
struct node_info ni;
int err = acquire_orphan_inode(sbi);
if (err) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_msg(sbi->sb, KERN_WARNING,
"%s: orphan failed (ino=%x), run fsck to fix.",
__func__, ino);
return err;
}
inode = f2fs_iget(sbi->sb, ino);
__add_ino_entry(sbi, ino, ORPHAN_INO);
inode = f2fs_iget_retry(sbi->sb, ino);
if (IS_ERR(inode)) {
/*
* there should be a bug that we can't find the entry
@ -546,6 +558,18 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
/* truncate all the data during iput */
iput(inode);
get_node_info(sbi, ino, &ni);
/* ENOMEM was fully retried in f2fs_evict_inode. */
if (ni.blk_addr != NULL_ADDR) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_msg(sbi->sb, KERN_WARNING,
"%s: orphan failed (ino=%x), run fsck to fix.",
__func__, ino);
return -EIO;
}
__remove_ino_entry(sbi, ino, ORPHAN_INO);
return 0;
}
@ -554,7 +578,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
block_t start_blk, orphan_blocks, i, j;
int err;
if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
return 0;
start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
@ -578,7 +602,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
f2fs_put_page(page, 1);
}
/* clear Orphan Flag */
clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
return 0;
}
@ -639,45 +663,55 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
}
}
static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
block_t cp_addr, unsigned long long *version)
static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
struct f2fs_checkpoint **cp_block, struct page **cp_page,
unsigned long long *version)
{
struct page *cp_page_1, *cp_page_2 = NULL;
unsigned long blk_size = sbi->blocksize;
struct f2fs_checkpoint *cp_block;
unsigned long long cur_version = 0, pre_version = 0;
size_t crc_offset;
size_t crc_offset = 0;
__u32 crc = 0;
/* Read the 1st cp block in this CP pack */
cp_page_1 = get_meta_page(sbi, cp_addr);
*cp_page = get_meta_page(sbi, cp_addr);
*cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
/* get the version number */
cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1);
crc_offset = le32_to_cpu(cp_block->checksum_offset);
if (crc_offset >= blk_size)
goto invalid_cp1;
crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
if (crc_offset >= blk_size) {
f2fs_msg(sbi->sb, KERN_WARNING,
"invalid crc_offset: %zu", crc_offset);
return -EINVAL;
}
crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
if (!f2fs_crc_valid(sbi, crc, cp_block, crc_offset))
goto invalid_cp1;
crc = le32_to_cpu(*((__le32 *)((unsigned char *)*cp_block
+ crc_offset)));
if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
return -EINVAL;
}
pre_version = cur_cp_version(cp_block);
*version = cur_cp_version(*cp_block);
return 0;
}
/* Read the 2nd cp block in this CP pack */
cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
cp_page_2 = get_meta_page(sbi, cp_addr);
static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
block_t cp_addr, unsigned long long *version)
{
struct page *cp_page_1 = NULL, *cp_page_2 = NULL;
struct f2fs_checkpoint *cp_block = NULL;
unsigned long long cur_version = 0, pre_version = 0;
int err;
cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
crc_offset = le32_to_cpu(cp_block->checksum_offset);
if (crc_offset >= blk_size)
goto invalid_cp2;
err = get_checkpoint_version(sbi, cp_addr, &cp_block,
&cp_page_1, version);
if (err)
goto invalid_cp1;
pre_version = *version;
crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
if (!f2fs_crc_valid(sbi, crc, cp_block, crc_offset))
cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
err = get_checkpoint_version(sbi, cp_addr, &cp_block,
&cp_page_2, version);
if (err)
goto invalid_cp2;
cur_version = cur_cp_version(cp_block);
cur_version = *version;
if (cur_version == pre_version) {
*version = cur_version;
@ -972,10 +1006,40 @@ static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
finish_wait(&sbi->cp_wait, &wait);
}
static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
spin_lock(&sbi->cp_lock);
if (cpc->reason == CP_UMOUNT)
__set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
else
__clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
if (cpc->reason == CP_FASTBOOT)
__set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
else
__clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
if (orphan_num)
__set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
else
__clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
__set_ckpt_flags(ckpt, CP_FSCK_FLAG);
/* set this flag to activate crc|cp_ver for recovery */
__set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
spin_unlock(&sbi->cp_lock);
}
static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
struct f2fs_nm_info *nm_i = NM_I(sbi);
unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
nid_t last_nid = nm_i->next_scan_nid;
@ -984,19 +1048,10 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
__u32 crc32 = 0;
int i;
int cp_payload_blks = __cp_payload(sbi);
block_t discard_blk = NEXT_FREE_BLKADDR(sbi, curseg);
bool invalidate = false;
struct super_block *sb = sbi->sb;
struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
u64 kbytes_written;
/*
* This avoids to conduct wrong roll-forward operations and uses
* metapages, so should be called prior to sync_meta_pages below.
*/
if (!test_opt(sbi, LFS) && discard_next_dnode(sbi, discard_blk))
invalidate = true;
/* Flush all the NAT/SIT pages */
while (get_pages(sbi, F2FS_DIRTY_META)) {
sync_meta_pages(sbi, META, LONG_MAX);
@ -1036,10 +1091,12 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* 2 cp + n data seg summary + orphan inode blocks */
data_sum_blocks = npages_for_summary_flush(sbi, false);
spin_lock(&sbi->cp_lock);
if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
__set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
else
clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
__clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
spin_unlock(&sbi->cp_lock);
orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num);
ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
@ -1054,23 +1111,8 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
cp_payload_blks + data_sum_blocks +
orphan_blocks);
if (cpc->reason == CP_UMOUNT)
set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
else
clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
if (cpc->reason == CP_FASTBOOT)
set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
else
clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
if (orphan_num)
set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
else
clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
set_ckpt_flags(ckpt, CP_FSCK_FLAG);
/* update ckpt flag for checkpoint */
update_ckpt_flags(sbi, cpc);
/* update SIT/NAT bitmap */
get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
@ -1137,14 +1179,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* wait for previous submitted meta pages writeback */
wait_on_all_pages_writeback(sbi);
/*
* invalidate meta page which is used temporarily for zeroing out
* block at the end of warm node chain.
*/
if (invalidate)
invalidate_mapping_pages(META_MAPPING(sbi), discard_blk,
discard_blk);
release_ino_entry(sbi, false);
if (unlikely(f2fs_cp_error(sbi)))
@ -1152,6 +1186,17 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
clear_prefree_segments(sbi, cpc);
clear_sbi_flag(sbi, SBI_IS_DIRTY);
clear_sbi_flag(sbi, SBI_NEED_CP);
/*
* redirty superblock if metadata like node page or inode cache is
* updated during writing checkpoint.
*/
if (get_pages(sbi, F2FS_DIRTY_NODES) ||
get_pages(sbi, F2FS_DIRTY_IMETA))
set_sbi_flag(sbi, SBI_IS_DIRTY);
f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
return 0;
}
@ -1190,6 +1235,18 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
f2fs_flush_merged_bios(sbi);
/* this is the case of multiple fstrims without any changes */
if (cpc->reason == CP_DISCARD && !is_sbi_flag_set(sbi, SBI_IS_DIRTY)) {
f2fs_bug_on(sbi, NM_I(sbi)->dirty_nat_cnt);
f2fs_bug_on(sbi, SIT_I(sbi)->dirty_sentries);
f2fs_bug_on(sbi, prefree_segments(sbi));
flush_sit_entries(sbi, cpc);
clear_prefree_segments(sbi, cpc);
f2fs_wait_all_discard_bio(sbi);
unblock_operations(sbi);
goto out;
}
/*
* update checkpoint pack index
* Increase the version number so that
@ -1205,6 +1262,8 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* unlock all the fs_lock[] in do_checkpoint() */
err = do_checkpoint(sbi, cpc);
f2fs_wait_all_discard_bio(sbi);
unblock_operations(sbi);
stat_inc_cp_count(sbi->stat_info);

@ -34,6 +34,11 @@ static void f2fs_read_end_io(struct bio *bio)
struct bio_vec *bvec;
int i;
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO))
bio->bi_error = -EIO;
#endif
if (f2fs_bio_encrypted(bio)) {
if (bio->bi_error) {
fscrypt_release_ctx(bio->bi_private);
@ -626,11 +631,13 @@ ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
ssize_t ret = 0;
map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
map.m_len = F2FS_BYTES_TO_BLK(iov_iter_count(from));
map.m_next_pgofs = NULL;
map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
if (map.m_len > map.m_lblk)
map.m_len -= map.m_lblk;
else
map.m_len = 0;
if (f2fs_encrypted_inode(inode))
return 0;
map.m_next_pgofs = NULL;
if (iocb->ki_flags & IOCB_DIRECT) {
ret = f2fs_convert_inline_inode(inode);
@ -672,6 +679,9 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
bool allocated = false;
block_t blkaddr;
if (!maxblocks)
return 0;
map->m_len = 0;
map->m_flags = 0;
@ -783,6 +793,7 @@ skip:
err = reserve_new_blocks(&dn, prealloc);
if (err)
goto sync_out;
allocated = dn.node_changed;
map->m_len += dn.ofs_in_node - ofs_in_node;
if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
@ -966,8 +977,8 @@ out:
return ret;
}
struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
unsigned nr_pages)
static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
unsigned nr_pages)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct fscrypt_ctx *ctx = NULL;
@ -1284,7 +1295,7 @@ write:
if (!wbc->for_reclaim)
need_balance_fs = true;
else if (has_not_enough_free_secs(sbi, 0))
else if (has_not_enough_free_secs(sbi, 0, 0))
goto redirty_out;
err = -EAGAIN;
@ -1344,6 +1355,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
int cycled;
int range_whole = 0;
int tag;
int nwritten = 0;
pagevec_init(&pvec, 0);
@ -1418,6 +1430,8 @@ continue_unlock:
done_index = page->index + 1;
done = 1;
break;
} else {
nwritten++;
}
if (--wbc->nr_to_write <= 0 &&
@ -1439,6 +1453,10 @@ continue_unlock:
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = done_index;
if (nwritten)
f2fs_submit_merged_bio_cond(F2FS_M_SB(mapping), mapping->host,
NULL, 0, DATA, WRITE);
return ret;
}
@ -1480,7 +1498,6 @@ static int f2fs_write_data_pages(struct address_space *mapping,
* if some pages were truncated, we cannot guarantee its mapping->host
* to detect pending bios.
*/
f2fs_submit_merged_bio(sbi, DATA, WRITE);
remove_dirty_inode(inode);
return ret;
@ -1518,8 +1535,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
* we already allocated all the blocks, so we don't need to get
* the block addresses when there is no need to fill the page.
*/
if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
len == PAGE_SIZE)
if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE)
return 0;
if (f2fs_has_inline_data(inode) ||
@ -1616,7 +1632,7 @@ repeat:
if (err)
goto fail;
if (need_balance && has_not_enough_free_secs(sbi, 0)) {
if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
unlock_page(page);
f2fs_balance_fs(sbi, true);
lock_page(page);
@ -1633,22 +1649,12 @@ repeat:
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
if (len == PAGE_SIZE)
goto out_update;
if (PageUptodate(page))
goto out_clear;
if ((pos & PAGE_MASK) >= i_size_read(inode)) {
unsigned start = pos & (PAGE_SIZE - 1);
unsigned end = start + len;
/* Reading beyond i_size is simple: memset to zero */
zero_user_segments(page, 0, start, end, PAGE_SIZE);
goto out_update;
}
if (len == PAGE_SIZE || PageUptodate(page))
return 0;
if (blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_SIZE);
SetPageUptodate(page);
} else {
struct bio *bio;
@ -1676,11 +1682,6 @@ repeat:
goto fail;
}
}
out_update:
if (!PageUptodate(page))
SetPageUptodate(page);
out_clear:
clear_cold_data(page);
return 0;
fail:
@ -1698,11 +1699,26 @@ static int f2fs_write_end(struct file *file,
trace_f2fs_write_end(inode, pos, len, copied);
/*
* This should be come from len == PAGE_SIZE, and we expect copied
* should be PAGE_SIZE. Otherwise, we treat it with zero copied and
* let generic_perform_write() try to copy data again through copied=0.
*/
if (!PageUptodate(page)) {
if (unlikely(copied != PAGE_SIZE))
copied = 0;
else
SetPageUptodate(page);
}
if (!copied)
goto unlock_out;
set_page_dirty(page);
clear_cold_data(page);
if (pos + copied > i_size_read(inode))
f2fs_i_size_write(inode, pos + copied);
unlock_out:
f2fs_put_page(page, 1);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return copied;
@ -1873,6 +1889,58 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, get_data_block_bmap);
}
#ifdef CONFIG_MIGRATION
#include <linux/migrate.h>
int f2fs_migrate_page(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode)
{
int rc, extra_count;
struct f2fs_inode_info *fi = F2FS_I(mapping->host);
bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
BUG_ON(PageWriteback(page));
/* migrating an atomic written page is safe with the inmem_lock hold */
if (atomic_written && !mutex_trylock(&fi->inmem_lock))
return -EAGAIN;
/*
* A reference is expected if PagePrivate set when move mapping,
* however F2FS breaks this for maintaining dirty page counts when
* truncating pages. So here adjusting the 'extra_count' make it work.
*/
extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
rc = migrate_page_move_mapping(mapping, newpage,
page, NULL, mode, extra_count);
if (rc != MIGRATEPAGE_SUCCESS) {
if (atomic_written)
mutex_unlock(&fi->inmem_lock);
return rc;
}
if (atomic_written) {
struct inmem_pages *cur;
list_for_each_entry(cur, &fi->inmem_pages, list)
if (cur->page == page) {
cur->page = newpage;
break;
}
mutex_unlock(&fi->inmem_lock);
put_page(page);
get_page(newpage);
}
if (PagePrivate(page))
SetPagePrivate(newpage);
set_page_private(newpage, page_private(page));
migrate_page_copy(newpage, page);
return MIGRATEPAGE_SUCCESS;
}
#endif
const struct address_space_operations f2fs_dblock_aops = {
.readpage = f2fs_read_data_page,
.readpages = f2fs_read_data_pages,
@ -1885,4 +1953,7 @@ const struct address_space_operations f2fs_dblock_aops = {
.releasepage = f2fs_release_page,
.direct_IO = f2fs_direct_IO,
.bmap = f2fs_bmap,
#ifdef CONFIG_MIGRATION
.migratepage = f2fs_migrate_page,
#endif
};

@ -45,6 +45,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS);
si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META);
si->ndirty_data = get_pages(sbi, F2FS_DIRTY_DATA);
si->ndirty_imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE];
si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
@ -54,6 +55,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->rsvd_segs = reserved_segments(sbi);
si->overp_segs = overprovision_segments(sbi);
si->valid_count = valid_user_blocks(sbi);
si->discard_blks = discard_blocks(sbi);
si->valid_node_count = valid_node_count(sbi);
si->valid_inode_count = valid_inode_count(sbi);
si->inline_xattr = atomic_read(&sbi->inline_xattr);
@ -154,7 +156,9 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += sizeof(struct sit_info);
si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry);
si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
si->base_mem += 3 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
if (f2fs_discard_en(sbi))
si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
si->base_mem += SIT_VBLOCK_MAP_SIZE;
if (sbi->segs_per_sec > 1)
si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
@ -228,8 +232,13 @@ static int stat_show(struct seq_file *s, void *v)
si->ssa_area_segs, si->main_area_segs);
seq_printf(s, "(OverProv:%d Resv:%d)]\n\n",
si->overp_segs, si->rsvd_segs);
seq_printf(s, "Utilization: %d%% (%d valid blocks)\n",
si->utilization, si->valid_count);
if (test_opt(si->sbi, DISCARD))
seq_printf(s, "Utilization: %u%% (%u valid blocks, %u discard blocks)\n",
si->utilization, si->valid_count, si->discard_blks);
else
seq_printf(s, "Utilization: %u%% (%u valid blocks)\n",
si->utilization, si->valid_count);
seq_printf(s, " - Node: %u (Inode: %u, ",
si->valid_node_count, si->valid_inode_count);
seq_printf(s, "Other: %u)\n - Data: %u\n",
@ -311,6 +320,8 @@ static int stat_show(struct seq_file *s, void *v)
si->ndirty_data, si->ndirty_files);
seq_printf(s, " - meta: %4lld in %4d\n",
si->ndirty_meta, si->meta_pages);
seq_printf(s, " - imeta: %4lld\n",
si->ndirty_imeta);
seq_printf(s, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n",
si->dirty_nats, si->nats, si->dirty_sits, si->sits);
seq_printf(s, " - free_nids: %9d\n",

@ -37,7 +37,7 @@ static unsigned int bucket_blocks(unsigned int level)
return 4;
}
unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
static unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
[F2FS_FT_UNKNOWN] = DT_UNKNOWN,
[F2FS_FT_REG_FILE] = DT_REG,
[F2FS_FT_DIR] = DT_DIR,
@ -172,7 +172,10 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
int max_slots;
f2fs_hash_t namehash;
namehash = f2fs_dentry_hash(&name);
if(fname->hash)
namehash = cpu_to_le32(fname->hash);
else
namehash = f2fs_dentry_hash(&name);
nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
nblock = bucket_blocks(level);
@ -212,31 +215,17 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
return de;
}
/*
* Find an entry in the specified directory with the wanted name.
* It returns the page where the entry was found (as a parameter - res_page),
* and the entry itself. Page is returned mapped and unlocked.
* Entry is guaranteed to be valid.
*/
struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
const struct qstr *child, struct page **res_page)
struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
struct fscrypt_name *fname, struct page **res_page)
{
unsigned long npages = dir_blocks(dir);
struct f2fs_dir_entry *de = NULL;
unsigned int max_depth;
unsigned int level;
struct fscrypt_name fname;
int err;
err = fscrypt_setup_filename(dir, child, 1, &fname);
if (err) {
*res_page = ERR_PTR(err);
return NULL;
}
if (f2fs_has_inline_dentry(dir)) {
*res_page = NULL;
de = find_in_inline_dir(dir, &fname, res_page);
de = find_in_inline_dir(dir, fname, res_page);
goto out;
}
@ -256,11 +245,35 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
for (level = 0; level < max_depth; level++) {
*res_page = NULL;
de = find_in_level(dir, level, &fname, res_page);
de = find_in_level(dir, level, fname, res_page);
if (de || IS_ERR(*res_page))
break;
}
out:
return de;
}
/*
* Find an entry in the specified directory with the wanted name.
* It returns the page where the entry was found (as a parameter - res_page),
* and the entry itself. Page is returned mapped and unlocked.
* Entry is guaranteed to be valid.
*/
struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
const struct qstr *child, struct page **res_page)
{
struct f2fs_dir_entry *de = NULL;
struct fscrypt_name fname;
int err;
err = fscrypt_setup_filename(dir, child, 1, &fname);
if (err) {
*res_page = ERR_PTR(err);
return NULL;
}
de = __f2fs_find_entry(dir, &fname, res_page);
fscrypt_free_filename(&fname);
return de;
}
@ -375,7 +388,8 @@ static int make_empty_dir(struct inode *inode,
}
struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
const struct qstr *name, struct page *dpage)
const struct qstr *new_name, const struct qstr *orig_name,
struct page *dpage)
{
struct page *page;
int err;
@ -400,7 +414,7 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
if (err)
goto put_error;
err = f2fs_init_security(inode, dir, name, page);
err = f2fs_init_security(inode, dir, orig_name, page);
if (err)
goto put_error;
@ -417,8 +431,8 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
set_cold_node(inode, page);
}
if (name)
init_dent_inode(name, page);
if (new_name)
init_dent_inode(new_name, page);
/*
* This file should be checkpointed during fsync.
@ -496,7 +510,7 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
de->ino = cpu_to_le32(ino);
set_de_type(de, mode);
for (i = 0; i < slots; i++) {
test_and_set_bit_le(bit_pos + i, (void *)d->bitmap);
__set_bit_le(bit_pos + i, (void *)d->bitmap);
/* avoid wrong garbage data for readdir */
if (i)
(de + i)->name_len = 0;
@ -504,6 +518,7 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
}
int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
const struct qstr *orig_name,
struct inode *inode, nid_t ino, umode_t mode)
{
unsigned int bit_pos;
@ -530,7 +545,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
start:
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(FAULT_DIR_DEPTH))
if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH))
return -ENOSPC;
#endif
if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
@ -569,7 +584,8 @@ add_dentry:
if (inode) {
down_write(&F2FS_I(inode)->i_sem);
page = init_inode_metadata(inode, dir, new_name, NULL);
page = init_inode_metadata(inode, dir, new_name,
orig_name, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
@ -599,6 +615,26 @@ fail:
return err;
}
int __f2fs_do_add_link(struct inode *dir, struct fscrypt_name *fname,
struct inode *inode, nid_t ino, umode_t mode)
{
struct qstr new_name;
int err = -EAGAIN;
new_name.name = fname_name(fname);
new_name.len = fname_len(fname);
if (f2fs_has_inline_dentry(dir))
err = f2fs_add_inline_entry(dir, &new_name, fname->usr_fname,
inode, ino, mode);
if (err == -EAGAIN)
err = f2fs_add_regular_entry(dir, &new_name, fname->usr_fname,
inode, ino, mode);
f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
return err;
}
/*
* Caller should grab and release a rwsem by calling f2fs_lock_op() and
* f2fs_unlock_op().
@ -607,24 +643,15 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
struct inode *inode, nid_t ino, umode_t mode)
{
struct fscrypt_name fname;
struct qstr new_name;
int err;
err = fscrypt_setup_filename(dir, name, 0, &fname);
if (err)
return err;
new_name.name = fname_name(&fname);
new_name.len = fname_len(&fname);
err = -EAGAIN;
if (f2fs_has_inline_dentry(dir))
err = f2fs_add_inline_entry(dir, &new_name, inode, ino, mode);
if (err == -EAGAIN)
err = f2fs_add_regular_entry(dir, &new_name, inode, ino, mode);
err = __f2fs_do_add_link(dir, &fname, inode, ino, mode);
fscrypt_free_filename(&fname);
f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
return err;
}
@ -634,7 +661,7 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
int err = 0;
down_write(&F2FS_I(inode)->i_sem);
page = init_inode_metadata(inode, dir, NULL, NULL);
page = init_inode_metadata(inode, dir, NULL, NULL, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
@ -788,16 +815,9 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
int save_len = fstr->len;
int ret;
de_name.name = f2fs_kmalloc(de_name.len, GFP_NOFS);
if (!de_name.name)
return false;
memcpy(de_name.name, d->filename[bit_pos], de_name.len);
ret = fscrypt_fname_disk_to_usr(d->inode,
(u32)de->hash_code, 0,
&de_name, fstr);
kfree(de_name.name);
if (ret < 0)
return true;

@ -46,6 +46,8 @@ enum {
FAULT_BLOCK,
FAULT_DIR_DEPTH,
FAULT_EVICT_INODE,
FAULT_IO,
FAULT_CHECKPOINT,
FAULT_MAX,
};
@ -55,40 +57,8 @@ struct f2fs_fault_info {
unsigned int inject_type;
};
extern struct f2fs_fault_info f2fs_fault;
extern char *fault_name[FAULT_MAX];
#define IS_FAULT_SET(type) (f2fs_fault.inject_type & (1 << (type)))
static inline bool time_to_inject(int type)
{
if (!f2fs_fault.inject_rate)
return false;
if (type == FAULT_KMALLOC && !IS_FAULT_SET(type))
return false;
else if (type == FAULT_PAGE_ALLOC && !IS_FAULT_SET(type))
return false;
else if (type == FAULT_ALLOC_NID && !IS_FAULT_SET(type))
return false;
else if (type == FAULT_ORPHAN && !IS_FAULT_SET(type))
return false;
else if (type == FAULT_BLOCK && !IS_FAULT_SET(type))
return false;
else if (type == FAULT_DIR_DEPTH && !IS_FAULT_SET(type))
return false;
else if (type == FAULT_EVICT_INODE && !IS_FAULT_SET(type))
return false;
atomic_inc(&f2fs_fault.inject_ops);
if (atomic_read(&f2fs_fault.inject_ops) >= f2fs_fault.inject_rate) {
atomic_set(&f2fs_fault.inject_ops, 0);
printk("%sF2FS-fs : inject %s in %pF\n",
KERN_INFO,
fault_name[type],
__builtin_return_address(0));
return true;
}
return false;
}
#define IS_FAULT_SET(fi, type) (fi->inject_type & (1 << (type)))
#endif
/*
@ -158,7 +128,7 @@ enum {
CP_DISCARD,
};
#define DEF_BATCHED_TRIM_SECTIONS 32
#define DEF_BATCHED_TRIM_SECTIONS 2
#define BATCHED_TRIM_SEGMENTS(sbi) \
(SM_I(sbi)->trim_sections * (sbi)->segs_per_sec)
#define BATCHED_TRIM_BLOCKS(sbi) \
@ -211,6 +181,13 @@ struct discard_entry {
int len; /* # of consecutive blocks of the discard */
};
struct bio_entry {
struct list_head list;
struct bio *bio;
struct completion event;
int error;
};
/* for the list of fsync inodes, used only during recovery */
struct fsync_inode_entry {
struct list_head list; /* list head */
@ -645,6 +622,7 @@ struct f2fs_sm_info {
/* for small discard management */
struct list_head discard_list; /* 4KB discard list */
struct list_head wait_list; /* linked with issued discard bio */
int nr_discards; /* # of discards in the list */
int max_discards; /* max. discards to be issued */
@ -748,6 +726,7 @@ enum {
SBI_NEED_FSCK, /* need fsck.f2fs to fix */
SBI_POR_DOING, /* recovery is doing or not */
SBI_NEED_SB_WRITE, /* need to recover superblock */
SBI_NEED_CP, /* need to checkpoint */
};
enum {
@ -765,7 +744,7 @@ struct f2fs_sb_info {
struct proc_dir_entry *s_proc; /* proc entry */
struct f2fs_super_block *raw_super; /* raw super block pointer */
int valid_super_block; /* valid super block no */
int s_flag; /* flags for sbi */
unsigned long s_flag; /* flags for sbi */
#ifdef CONFIG_F2FS_FS_ENCRYPTION
u8 key_prefix[F2FS_KEY_DESC_PREFIX_SIZE];
@ -785,6 +764,7 @@ struct f2fs_sb_info {
/* for checkpoint */
struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
spinlock_t cp_lock; /* for flag in ckpt */
struct inode *meta_inode; /* cache meta blocks */
struct mutex cp_mutex; /* checkpoint procedure lock */
struct rw_semaphore cp_rwsem; /* blocking FS operations */
@ -892,8 +872,37 @@ struct f2fs_sb_info {
/* Reference to checksum algorithm driver via cryptoapi */
struct crypto_shash *s_chksum_driver;
/* For fault injection */
#ifdef CONFIG_F2FS_FAULT_INJECTION
struct f2fs_fault_info fault_info;
#endif
};
#ifdef CONFIG_F2FS_FAULT_INJECTION
static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
{
struct f2fs_fault_info *ffi = &sbi->fault_info;
if (!ffi->inject_rate)
return false;
if (!IS_FAULT_SET(ffi, type))
return false;
atomic_inc(&ffi->inject_ops);
if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
atomic_set(&ffi->inject_ops, 0);
printk("%sF2FS-fs : inject %s in %pF\n",
KERN_INFO,
fault_name[type],
__builtin_return_address(0));
return true;
}
return false;
}
#endif
/* For write statistics. Suppose sector size is 512 bytes,
* and the return value is in kbytes. s is of struct f2fs_sb_info.
*/
@ -1034,17 +1043,17 @@ static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
{
return sbi->s_flag & (0x01 << type);
return test_bit(type, &sbi->s_flag);
}
static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
{
sbi->s_flag |= (0x01 << type);
set_bit(type, &sbi->s_flag);
}
static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
{
sbi->s_flag &= ~(0x01 << type);
clear_bit(type, &sbi->s_flag);
}
static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
@ -1052,26 +1061,57 @@ static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
return le64_to_cpu(cp->checkpoint_ver);
}
static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
{
unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
return ckpt_flags & f;
}
static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
{
unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
}
static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
{
unsigned int ckpt_flags;
ckpt_flags = le32_to_cpu(cp->ckpt_flags);
ckpt_flags |= f;
cp->ckpt_flags = cpu_to_le32(ckpt_flags);
}
static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
{
unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
spin_lock(&sbi->cp_lock);
__set_ckpt_flags(F2FS_CKPT(sbi), f);
spin_unlock(&sbi->cp_lock);
}
static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
{
unsigned int ckpt_flags;
ckpt_flags = le32_to_cpu(cp->ckpt_flags);
ckpt_flags &= (~f);
cp->ckpt_flags = cpu_to_le32(ckpt_flags);
}
static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
{
spin_lock(&sbi->cp_lock);
__clear_ckpt_flags(F2FS_CKPT(sbi), f);
spin_unlock(&sbi->cp_lock);
}
static inline bool f2fs_discard_en(struct f2fs_sb_info *sbi)
{
struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
return blk_queue_discard(q);
}
static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
{
down_read(&sbi->cp_rwsem);
@ -1110,8 +1150,8 @@ static inline bool __remain_node_summaries(int reason)
static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
{
return (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG) ||