mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 16:54:21 +00:00 
			
		
		
		
	f2fs-for-5.7-rc1
In this round, we've mainly focused on fixing bugs and addressing issues in
 recently introduced compression support.
 
 Enhancement:
 - add zstd support, and set LZ4 by default
 - add ioctl() to show # of compressed blocks
 - show mount time in debugfs
 - replace rwsem with spinlock
 - avoid lock contention in DIO reads
 
 Some major bug fixes wrt compression:
 - compressed block count
 - memory access and leak
 - remove obsolete fields
 - flag controls
 
 Other bug fixes and clean ups:
 - fix overflow when handling .flags in inode_info
 - fix SPO issue during resize FS flow
 - fix compression with fsverity enabled
 - potential deadlock when writing compressed pages
 - show missing mount options
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEE00UqedjCtOrGVvQiQBSofoJIUNIFAl6L5f0ACgkQQBSofoJI
 UNImoQ/7BHKpwgpgH/DuydO9ess0XuUgQPQxyj+LE79l0jdBo8FxQZJVNAekx2+h
 ANTDHjsgqry6xuczOJXzFhECoOrCqZuffrkQM1p3owfzOH9Wrx6aiOomFBJyk/WB
 kXAY7LDPUwGF5uDB8tvVhM082qLXOlP0coO57f9Ip/OpaG8YOkti+KbcOKJrJ9o3
 63IAzu89D/9XJw8834rDSersiEenSJm3jLC12uOxgXMzjDb1Ul1JH0P51gEu6g6O
 3df5tiGJcFfaKVVWPHG+UEfav6mvi28+zU9f+dSmL0Wvb8dIqSgUf6ty8KCuRBYw
 kQYi+E0G1dcGi99AitCOrFxzY+oEo/A5wq2HDI6RkNlu6krD8qYNyWcMbP7dNHnU
 /5BVN+5d78iR1vKZpTo4X6dojf6B21Tn/OmABSZ5d7B6pI2fY5bjy2WgSLSY7YvP
 A6sepb9RAAGvpKvvkHI7gYwDKFMel+vfD6em1SKH5iKaDC0rJTUDUy8PTz/qMPBS
 vMn396dLx+TzTa0dZUuSF8NNk6sPZEReC3AuMNAIPSKiuD7tatRxvutHeEg5ktrr
 ggOQB67MfKjPMBKmgMIm6XMuILcCGIB1MqbPRlyKtC6rjdMPIKKOfeHJlLFmYwfF
 gqvCIFlW4DlxHpHH+LbUFKoUA3zofltL91SHUVATJjmiZIT2pqQ=
 =FVxq
 -----END PGP SIGNATURE-----
Merge tag 'f2fs-for-5.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs
Pull f2fs updates from Jaegeuk Kim:
 "In this round, we've mainly focused on fixing bugs and addressing
  issues in recently introduced compression support.
  Enhancement:
   - add zstd support, and set LZ4 by default
   - add ioctl() to show # of compressed blocks
   - show mount time in debugfs
   - replace rwsem with spinlock
   - avoid lock contention in DIO reads
  Some major bug fixes wrt compression:
   - compressed block count
   - memory access and leak
   - remove obsolete fields
   - flag controls
  Other bug fixes and clean ups:
   - fix overflow when handling .flags in inode_info
   - fix SPO issue during resize FS flow
   - fix compression with fsverity enabled
   - potential deadlock when writing compressed pages
   - show missing mount options"
* tag 'f2fs-for-5.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (66 commits)
  f2fs: keep inline_data when compression conversion
  f2fs: fix to disable compression on directory
  f2fs: add missing CONFIG_F2FS_FS_COMPRESSION
  f2fs: switch discard_policy.timeout to bool type
  f2fs: fix to verify tpage before releasing in f2fs_free_dic()
  f2fs: show compression in statx
  f2fs: clean up dic->tpages assignment
  f2fs: compress: support zstd compress algorithm
  f2fs: compress: add .{init,destroy}_decompress_ctx callback
  f2fs: compress: fix to call missing destroy_compress_ctx()
  f2fs: change default compression algorithm
  f2fs: clean up {cic,dic}.ref handling
  f2fs: fix to use f2fs_readpage_limit() in f2fs_read_multi_pages()
  f2fs: xattr.h: Make stub helpers inline
  f2fs: fix to avoid double unlock
  f2fs: fix potential .flags overflow on 32bit architecture
  f2fs: fix NULL pointer dereference in f2fs_verity_work()
  f2fs: fix to clear PG_error if fsverity failed
  f2fs: don't call fscrypt_get_encryption_info() explicitly in f2fs_tmpfile()
  f2fs: don't trigger data flush in foreground operation
  ...
			
			
This commit is contained in:
		
						commit
						f40f31cadc
					
				
					 24 changed files with 821 additions and 429 deletions
				
			
		|  | @ -318,3 +318,8 @@ Date:		September 2019 | ||||||
| Contact:	"Hridya Valsaraju" <hridya@google.com> | Contact:	"Hridya Valsaraju" <hridya@google.com> | ||||||
| Description:	Average number of valid blocks. | Description:	Average number of valid blocks. | ||||||
| 		Available when CONFIG_F2FS_STAT_FS=y. | 		Available when CONFIG_F2FS_STAT_FS=y. | ||||||
|  | 
 | ||||||
|  | What:		/sys/fs/f2fs/<disk>/mounted_time_sec | ||||||
|  | Date:		February 2020 | ||||||
|  | Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org> | ||||||
|  | Description:	Show the mounted time in secs of this partition. | ||||||
|  |  | ||||||
|  | @ -243,8 +243,8 @@ checkpoint=%s[:%u[%]]  Set to "disable" to turn off checkpointing. Set to "enabl | ||||||
|                        hide up to all remaining free space. The actual space that |                        hide up to all remaining free space. The actual space that | ||||||
|                        would be unusable can be viewed at /sys/fs/f2fs/<disk>/unusable |                        would be unusable can be viewed at /sys/fs/f2fs/<disk>/unusable | ||||||
|                        This space is reclaimed once checkpoint=enable. |                        This space is reclaimed once checkpoint=enable. | ||||||
| compress_algorithm=%s  Control compress algorithm, currently f2fs supports "lzo" | compress_algorithm=%s  Control compress algorithm, currently f2fs supports "lzo", | ||||||
|                        and "lz4" algorithm. |                        "lz4" and "zstd" algorithm. | ||||||
| compress_log_size=%u   Support configuring compress cluster size, the size will | compress_log_size=%u   Support configuring compress cluster size, the size will | ||||||
|                        be 4KB * (1 << %u), 16KB is minimum size, also it's |                        be 4KB * (1 << %u), 16KB is minimum size, also it's | ||||||
|                        default size. |                        default size. | ||||||
|  |  | ||||||
|  | @ -118,3 +118,12 @@ config F2FS_FS_LZ4 | ||||||
| 	default y | 	default y | ||||||
| 	help | 	help | ||||||
| 	  Support LZ4 compress algorithm, if unsure, say Y. | 	  Support LZ4 compress algorithm, if unsure, say Y. | ||||||
|  | 
 | ||||||
|  | config F2FS_FS_ZSTD | ||||||
|  | 	bool "ZSTD compression support" | ||||||
|  | 	depends on F2FS_FS_COMPRESSION | ||||||
|  | 	select ZSTD_COMPRESS | ||||||
|  | 	select ZSTD_DECOMPRESS | ||||||
|  | 	default y | ||||||
|  | 	help | ||||||
|  | 	  Support ZSTD compress algorithm, if unsure, say Y. | ||||||
|  |  | ||||||
|  | @ -50,9 +50,6 @@ repeat: | ||||||
| 	return page; | 	return page; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 |  | ||||||
|  * We guarantee no failure on the returned page. |  | ||||||
|  */ |  | ||||||
| static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, | static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, | ||||||
| 							bool is_meta) | 							bool is_meta) | ||||||
| { | { | ||||||
|  | @ -206,7 +203,7 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  * Readahead CP/NAT/SIT/SSA pages |  * Readahead CP/NAT/SIT/SSA/POR pages | ||||||
|  */ |  */ | ||||||
| int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, | int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, | ||||||
| 							int type, bool sync) | 							int type, bool sync) | ||||||
|  | @ -898,7 +895,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Finding out valid cp block involves read both | 	 * Finding out valid cp block involves read both | ||||||
| 	 * sets( cp pack1 and cp pack 2) | 	 * sets( cp pack 1 and cp pack 2) | ||||||
| 	 */ | 	 */ | ||||||
| 	cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr); | 	cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr); | ||||||
| 	cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version); | 	cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version); | ||||||
|  | @ -1250,20 +1247,20 @@ static void unblock_operations(struct f2fs_sb_info *sbi) | ||||||
| 	f2fs_unlock_all(sbi); | 	f2fs_unlock_all(sbi); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi) | void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type) | ||||||
| { | { | ||||||
| 	DEFINE_WAIT(wait); | 	DEFINE_WAIT(wait); | ||||||
| 
 | 
 | ||||||
| 	for (;;) { | 	for (;;) { | ||||||
| 		prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE); | 		prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE); | ||||||
| 
 | 
 | ||||||
| 		if (!get_pages(sbi, F2FS_WB_CP_DATA)) | 		if (!get_pages(sbi, type)) | ||||||
| 			break; | 			break; | ||||||
| 
 | 
 | ||||||
| 		if (unlikely(f2fs_cp_error(sbi))) | 		if (unlikely(f2fs_cp_error(sbi))) | ||||||
| 			break; | 			break; | ||||||
| 
 | 
 | ||||||
| 		io_schedule_timeout(5*HZ); | 		io_schedule_timeout(DEFAULT_IO_TIMEOUT); | ||||||
| 	} | 	} | ||||||
| 	finish_wait(&sbi->cp_wait, &wait); | 	finish_wait(&sbi->cp_wait, &wait); | ||||||
| } | } | ||||||
|  | @ -1301,10 +1298,14 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc) | ||||||
| 	else | 	else | ||||||
| 		__clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); | 		__clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); | ||||||
| 
 | 
 | ||||||
| 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) || | 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) | ||||||
| 		is_sbi_flag_set(sbi, SBI_IS_RESIZEFS)) |  | ||||||
| 		__set_ckpt_flags(ckpt, CP_FSCK_FLAG); | 		__set_ckpt_flags(ckpt, CP_FSCK_FLAG); | ||||||
| 
 | 
 | ||||||
|  | 	if (is_sbi_flag_set(sbi, SBI_IS_RESIZEFS)) | ||||||
|  | 		__set_ckpt_flags(ckpt, CP_RESIZEFS_FLAG); | ||||||
|  | 	else | ||||||
|  | 		__clear_ckpt_flags(ckpt, CP_RESIZEFS_FLAG); | ||||||
|  | 
 | ||||||
| 	if (is_sbi_flag_set(sbi, SBI_CP_DISABLED)) | 	if (is_sbi_flag_set(sbi, SBI_CP_DISABLED)) | ||||||
| 		__set_ckpt_flags(ckpt, CP_DISABLED_FLAG); | 		__set_ckpt_flags(ckpt, CP_DISABLED_FLAG); | ||||||
| 	else | 	else | ||||||
|  | @ -1384,13 +1385,8 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) | ||||||
| 
 | 
 | ||||||
| 	/* Flush all the NAT/SIT pages */ | 	/* Flush all the NAT/SIT pages */ | ||||||
| 	f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); | 	f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); | ||||||
| 	f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) && |  | ||||||
| 					!f2fs_cp_error(sbi)); |  | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/* start to update checkpoint, cp ver is already updated previously */ | ||||||
| 	 * modify checkpoint |  | ||||||
| 	 * version number is already updated |  | ||||||
| 	 */ |  | ||||||
| 	ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true)); | 	ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true)); | ||||||
| 	ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); | 	ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); | ||||||
| 	for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) { | 	for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) { | ||||||
|  | @ -1493,11 +1489,11 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) | ||||||
| 
 | 
 | ||||||
| 	/* Here, we have one bio having CP pack except cp pack 2 page */ | 	/* Here, we have one bio having CP pack except cp pack 2 page */ | ||||||
| 	f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); | 	f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); | ||||||
| 	f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) && | 	/* Wait for all dirty meta pages to be submitted for IO */ | ||||||
| 					!f2fs_cp_error(sbi)); | 	f2fs_wait_on_all_pages(sbi, F2FS_DIRTY_META); | ||||||
| 
 | 
 | ||||||
| 	/* wait for previous submitted meta pages writeback */ | 	/* wait for previous submitted meta pages writeback */ | ||||||
| 	f2fs_wait_on_all_pages_writeback(sbi); | 	f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA); | ||||||
| 
 | 
 | ||||||
| 	/* flush all device cache */ | 	/* flush all device cache */ | ||||||
| 	err = f2fs_flush_device_cache(sbi); | 	err = f2fs_flush_device_cache(sbi); | ||||||
|  | @ -1506,7 +1502,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) | ||||||
| 
 | 
 | ||||||
| 	/* barrier and flush checkpoint cp pack 2 page if it can */ | 	/* barrier and flush checkpoint cp pack 2 page if it can */ | ||||||
| 	commit_checkpoint(sbi, ckpt, start_blk); | 	commit_checkpoint(sbi, ckpt, start_blk); | ||||||
| 	f2fs_wait_on_all_pages_writeback(sbi); | 	f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA); | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * invalidate intermediate page cache borrowed from meta inode which are | 	 * invalidate intermediate page cache borrowed from meta inode which are | ||||||
|  | @ -1543,9 +1539,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) | ||||||
| 	return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0; | 	return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 |  | ||||||
|  * We guarantee that this checkpoint procedure will not fail. |  | ||||||
|  */ |  | ||||||
| int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) | int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) | ||||||
| { | { | ||||||
| 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | ||||||
|  | @ -1613,7 +1606,6 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) | ||||||
| 
 | 
 | ||||||
| 	f2fs_flush_sit_entries(sbi, cpc); | 	f2fs_flush_sit_entries(sbi, cpc); | ||||||
| 
 | 
 | ||||||
| 	/* unlock all the fs_lock[] in do_checkpoint() */ |  | ||||||
| 	err = do_checkpoint(sbi, cpc); | 	err = do_checkpoint(sbi, cpc); | ||||||
| 	if (err) | 	if (err) | ||||||
| 		f2fs_release_discard_addrs(sbi); | 		f2fs_release_discard_addrs(sbi); | ||||||
|  | @ -1626,7 +1618,7 @@ stop: | ||||||
| 	if (cpc->reason & CP_RECOVERY) | 	if (cpc->reason & CP_RECOVERY) | ||||||
| 		f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver); | 		f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver); | ||||||
| 
 | 
 | ||||||
| 	/* do checkpoint periodically */ | 	/* update CP_TIME to trigger checkpoint periodically */ | ||||||
| 	f2fs_update_time(sbi, CP_TIME); | 	f2fs_update_time(sbi, CP_TIME); | ||||||
| 	trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint"); | 	trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint"); | ||||||
| out: | out: | ||||||
|  |  | ||||||
|  | @ -11,6 +11,7 @@ | ||||||
| #include <linux/backing-dev.h> | #include <linux/backing-dev.h> | ||||||
| #include <linux/lzo.h> | #include <linux/lzo.h> | ||||||
| #include <linux/lz4.h> | #include <linux/lz4.h> | ||||||
|  | #include <linux/zstd.h> | ||||||
| 
 | 
 | ||||||
| #include "f2fs.h" | #include "f2fs.h" | ||||||
| #include "node.h" | #include "node.h" | ||||||
|  | @ -20,6 +21,8 @@ struct f2fs_compress_ops { | ||||||
| 	int (*init_compress_ctx)(struct compress_ctx *cc); | 	int (*init_compress_ctx)(struct compress_ctx *cc); | ||||||
| 	void (*destroy_compress_ctx)(struct compress_ctx *cc); | 	void (*destroy_compress_ctx)(struct compress_ctx *cc); | ||||||
| 	int (*compress_pages)(struct compress_ctx *cc); | 	int (*compress_pages)(struct compress_ctx *cc); | ||||||
|  | 	int (*init_decompress_ctx)(struct decompress_io_ctx *dic); | ||||||
|  | 	void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic); | ||||||
| 	int (*decompress_pages)(struct decompress_io_ctx *dic); | 	int (*decompress_pages)(struct decompress_io_ctx *dic); | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | @ -52,7 +55,7 @@ bool f2fs_is_compressed_page(struct page *page) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void f2fs_set_compressed_page(struct page *page, | static void f2fs_set_compressed_page(struct page *page, | ||||||
| 		struct inode *inode, pgoff_t index, void *data, refcount_t *r) | 		struct inode *inode, pgoff_t index, void *data) | ||||||
| { | { | ||||||
| 	SetPagePrivate(page); | 	SetPagePrivate(page); | ||||||
| 	set_page_private(page, (unsigned long)data); | 	set_page_private(page, (unsigned long)data); | ||||||
|  | @ -60,8 +63,6 @@ static void f2fs_set_compressed_page(struct page *page, | ||||||
| 	/* i_crypto_info and iv index */ | 	/* i_crypto_info and iv index */ | ||||||
| 	page->index = index; | 	page->index = index; | ||||||
| 	page->mapping = inode->i_mapping; | 	page->mapping = inode->i_mapping; | ||||||
| 	if (r) |  | ||||||
| 		refcount_inc(r); |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void f2fs_put_compressed_page(struct page *page) | static void f2fs_put_compressed_page(struct page *page) | ||||||
|  | @ -291,6 +292,165 @@ static const struct f2fs_compress_ops f2fs_lz4_ops = { | ||||||
| }; | }; | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
|  | #ifdef CONFIG_F2FS_FS_ZSTD | ||||||
|  | #define F2FS_ZSTD_DEFAULT_CLEVEL	1 | ||||||
|  | 
 | ||||||
|  | static int zstd_init_compress_ctx(struct compress_ctx *cc) | ||||||
|  | { | ||||||
|  | 	ZSTD_parameters params; | ||||||
|  | 	ZSTD_CStream *stream; | ||||||
|  | 	void *workspace; | ||||||
|  | 	unsigned int workspace_size; | ||||||
|  | 
 | ||||||
|  | 	params = ZSTD_getParams(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen, 0); | ||||||
|  | 	workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams); | ||||||
|  | 
 | ||||||
|  | 	workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode), | ||||||
|  | 					workspace_size, GFP_NOFS); | ||||||
|  | 	if (!workspace) | ||||||
|  | 		return -ENOMEM; | ||||||
|  | 
 | ||||||
|  | 	stream = ZSTD_initCStream(params, 0, workspace, workspace_size); | ||||||
|  | 	if (!stream) { | ||||||
|  | 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n", | ||||||
|  | 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, | ||||||
|  | 				__func__); | ||||||
|  | 		kvfree(workspace); | ||||||
|  | 		return -EIO; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	cc->private = workspace; | ||||||
|  | 	cc->private2 = stream; | ||||||
|  | 
 | ||||||
|  | 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE; | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static void zstd_destroy_compress_ctx(struct compress_ctx *cc) | ||||||
|  | { | ||||||
|  | 	kvfree(cc->private); | ||||||
|  | 	cc->private = NULL; | ||||||
|  | 	cc->private2 = NULL; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static int zstd_compress_pages(struct compress_ctx *cc) | ||||||
|  | { | ||||||
|  | 	ZSTD_CStream *stream = cc->private2; | ||||||
|  | 	ZSTD_inBuffer inbuf; | ||||||
|  | 	ZSTD_outBuffer outbuf; | ||||||
|  | 	int src_size = cc->rlen; | ||||||
|  | 	int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE; | ||||||
|  | 	int ret; | ||||||
|  | 
 | ||||||
|  | 	inbuf.pos = 0; | ||||||
|  | 	inbuf.src = cc->rbuf; | ||||||
|  | 	inbuf.size = src_size; | ||||||
|  | 
 | ||||||
|  | 	outbuf.pos = 0; | ||||||
|  | 	outbuf.dst = cc->cbuf->cdata; | ||||||
|  | 	outbuf.size = dst_size; | ||||||
|  | 
 | ||||||
|  | 	ret = ZSTD_compressStream(stream, &outbuf, &inbuf); | ||||||
|  | 	if (ZSTD_isError(ret)) { | ||||||
|  | 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n", | ||||||
|  | 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, | ||||||
|  | 				__func__, ZSTD_getErrorCode(ret)); | ||||||
|  | 		return -EIO; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	ret = ZSTD_endStream(stream, &outbuf); | ||||||
|  | 	if (ZSTD_isError(ret)) { | ||||||
|  | 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n", | ||||||
|  | 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, | ||||||
|  | 				__func__, ZSTD_getErrorCode(ret)); | ||||||
|  | 		return -EIO; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	cc->clen = outbuf.pos; | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic) | ||||||
|  | { | ||||||
|  | 	ZSTD_DStream *stream; | ||||||
|  | 	void *workspace; | ||||||
|  | 	unsigned int workspace_size; | ||||||
|  | 
 | ||||||
|  | 	workspace_size = ZSTD_DStreamWorkspaceBound(MAX_COMPRESS_WINDOW_SIZE); | ||||||
|  | 
 | ||||||
|  | 	workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode), | ||||||
|  | 					workspace_size, GFP_NOFS); | ||||||
|  | 	if (!workspace) | ||||||
|  | 		return -ENOMEM; | ||||||
|  | 
 | ||||||
|  | 	stream = ZSTD_initDStream(MAX_COMPRESS_WINDOW_SIZE, | ||||||
|  | 					workspace, workspace_size); | ||||||
|  | 	if (!stream) { | ||||||
|  | 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n", | ||||||
|  | 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, | ||||||
|  | 				__func__); | ||||||
|  | 		kvfree(workspace); | ||||||
|  | 		return -EIO; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	dic->private = workspace; | ||||||
|  | 	dic->private2 = stream; | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic) | ||||||
|  | { | ||||||
|  | 	kvfree(dic->private); | ||||||
|  | 	dic->private = NULL; | ||||||
|  | 	dic->private2 = NULL; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static int zstd_decompress_pages(struct decompress_io_ctx *dic) | ||||||
|  | { | ||||||
|  | 	ZSTD_DStream *stream = dic->private2; | ||||||
|  | 	ZSTD_inBuffer inbuf; | ||||||
|  | 	ZSTD_outBuffer outbuf; | ||||||
|  | 	int ret; | ||||||
|  | 
 | ||||||
|  | 	inbuf.pos = 0; | ||||||
|  | 	inbuf.src = dic->cbuf->cdata; | ||||||
|  | 	inbuf.size = dic->clen; | ||||||
|  | 
 | ||||||
|  | 	outbuf.pos = 0; | ||||||
|  | 	outbuf.dst = dic->rbuf; | ||||||
|  | 	outbuf.size = dic->rlen; | ||||||
|  | 
 | ||||||
|  | 	ret = ZSTD_decompressStream(stream, &outbuf, &inbuf); | ||||||
|  | 	if (ZSTD_isError(ret)) { | ||||||
|  | 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n", | ||||||
|  | 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, | ||||||
|  | 				__func__, ZSTD_getErrorCode(ret)); | ||||||
|  | 		return -EIO; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if (dic->rlen != outbuf.pos) { | ||||||
|  | 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, " | ||||||
|  | 				"expected:%lu\n", KERN_ERR, | ||||||
|  | 				F2FS_I_SB(dic->inode)->sb->s_id, | ||||||
|  | 				__func__, dic->rlen, | ||||||
|  | 				PAGE_SIZE << dic->log_cluster_size); | ||||||
|  | 		return -EIO; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static const struct f2fs_compress_ops f2fs_zstd_ops = { | ||||||
|  | 	.init_compress_ctx	= zstd_init_compress_ctx, | ||||||
|  | 	.destroy_compress_ctx	= zstd_destroy_compress_ctx, | ||||||
|  | 	.compress_pages		= zstd_compress_pages, | ||||||
|  | 	.init_decompress_ctx	= zstd_init_decompress_ctx, | ||||||
|  | 	.destroy_decompress_ctx	= zstd_destroy_decompress_ctx, | ||||||
|  | 	.decompress_pages	= zstd_decompress_pages, | ||||||
|  | }; | ||||||
|  | #endif | ||||||
|  | 
 | ||||||
| static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = { | static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = { | ||||||
| #ifdef CONFIG_F2FS_FS_LZO | #ifdef CONFIG_F2FS_FS_LZO | ||||||
| 	&f2fs_lzo_ops, | 	&f2fs_lzo_ops, | ||||||
|  | @ -302,6 +462,11 @@ static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = { | ||||||
| #else | #else | ||||||
| 	NULL, | 	NULL, | ||||||
| #endif | #endif | ||||||
|  | #ifdef CONFIG_F2FS_FS_ZSTD | ||||||
|  | 	&f2fs_zstd_ops, | ||||||
|  | #else | ||||||
|  | 	NULL, | ||||||
|  | #endif | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| bool f2fs_is_compress_backend_ready(struct inode *inode) | bool f2fs_is_compress_backend_ready(struct inode *inode) | ||||||
|  | @ -334,9 +499,11 @@ static int f2fs_compress_pages(struct compress_ctx *cc) | ||||||
| 	trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx, | 	trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx, | ||||||
| 				cc->cluster_size, fi->i_compress_algorithm); | 				cc->cluster_size, fi->i_compress_algorithm); | ||||||
| 
 | 
 | ||||||
| 	ret = cops->init_compress_ctx(cc); | 	if (cops->init_compress_ctx) { | ||||||
| 	if (ret) | 		ret = cops->init_compress_ctx(cc); | ||||||
| 		goto out; | 		if (ret) | ||||||
|  | 			goto out; | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	max_len = COMPRESS_HEADER_SIZE + cc->clen; | 	max_len = COMPRESS_HEADER_SIZE + cc->clen; | ||||||
| 	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE); | 	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE); | ||||||
|  | @ -380,21 +547,27 @@ static int f2fs_compress_pages(struct compress_ctx *cc) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	cc->cbuf->clen = cpu_to_le32(cc->clen); | 	cc->cbuf->clen = cpu_to_le32(cc->clen); | ||||||
| 	cc->cbuf->chksum = cpu_to_le32(0); |  | ||||||
| 
 | 
 | ||||||
| 	for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++) | 	for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++) | ||||||
| 		cc->cbuf->reserved[i] = cpu_to_le32(0); | 		cc->cbuf->reserved[i] = cpu_to_le32(0); | ||||||
| 
 | 
 | ||||||
|  | 	nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE); | ||||||
|  | 
 | ||||||
|  | 	/* zero out any unused part of the last page */ | ||||||
|  | 	memset(&cc->cbuf->cdata[cc->clen], 0, | ||||||
|  | 	       (nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE)); | ||||||
|  | 
 | ||||||
| 	vunmap(cc->cbuf); | 	vunmap(cc->cbuf); | ||||||
| 	vunmap(cc->rbuf); | 	vunmap(cc->rbuf); | ||||||
| 
 | 
 | ||||||
| 	nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE); |  | ||||||
| 
 |  | ||||||
| 	for (i = nr_cpages; i < cc->nr_cpages; i++) { | 	for (i = nr_cpages; i < cc->nr_cpages; i++) { | ||||||
| 		f2fs_put_compressed_page(cc->cpages[i]); | 		f2fs_put_compressed_page(cc->cpages[i]); | ||||||
| 		cc->cpages[i] = NULL; | 		cc->cpages[i] = NULL; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	if (cops->destroy_compress_ctx) | ||||||
|  | 		cops->destroy_compress_ctx(cc); | ||||||
|  | 
 | ||||||
| 	cc->nr_cpages = nr_cpages; | 	cc->nr_cpages = nr_cpages; | ||||||
| 
 | 
 | ||||||
| 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx, | 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx, | ||||||
|  | @ -413,7 +586,8 @@ out_free_cpages: | ||||||
| 	kfree(cc->cpages); | 	kfree(cc->cpages); | ||||||
| 	cc->cpages = NULL; | 	cc->cpages = NULL; | ||||||
| destroy_compress_ctx: | destroy_compress_ctx: | ||||||
| 	cops->destroy_compress_ctx(cc); | 	if (cops->destroy_compress_ctx) | ||||||
|  | 		cops->destroy_compress_ctx(cc); | ||||||
| out: | out: | ||||||
| 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx, | 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx, | ||||||
| 							cc->clen, ret); | 							cc->clen, ret); | ||||||
|  | @ -447,10 +621,16 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity) | ||||||
| 		goto out_free_dic; | 		goto out_free_dic; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	if (cops->init_decompress_ctx) { | ||||||
|  | 		ret = cops->init_decompress_ctx(dic); | ||||||
|  | 		if (ret) | ||||||
|  | 			goto out_free_dic; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	dic->rbuf = vmap(dic->tpages, dic->cluster_size, VM_MAP, PAGE_KERNEL); | 	dic->rbuf = vmap(dic->tpages, dic->cluster_size, VM_MAP, PAGE_KERNEL); | ||||||
| 	if (!dic->rbuf) { | 	if (!dic->rbuf) { | ||||||
| 		ret = -ENOMEM; | 		ret = -ENOMEM; | ||||||
| 		goto out_free_dic; | 		goto destroy_decompress_ctx; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	dic->cbuf = vmap(dic->cpages, dic->nr_cpages, VM_MAP, PAGE_KERNEL_RO); | 	dic->cbuf = vmap(dic->cpages, dic->nr_cpages, VM_MAP, PAGE_KERNEL_RO); | ||||||
|  | @ -473,7 +653,12 @@ out_vunmap_cbuf: | ||||||
| 	vunmap(dic->cbuf); | 	vunmap(dic->cbuf); | ||||||
| out_vunmap_rbuf: | out_vunmap_rbuf: | ||||||
| 	vunmap(dic->rbuf); | 	vunmap(dic->rbuf); | ||||||
|  | destroy_decompress_ctx: | ||||||
|  | 	if (cops->destroy_decompress_ctx) | ||||||
|  | 		cops->destroy_decompress_ctx(dic); | ||||||
| out_free_dic: | out_free_dic: | ||||||
|  | 	if (verity) | ||||||
|  | 		refcount_set(&dic->ref, dic->nr_cpages); | ||||||
| 	if (!verity) | 	if (!verity) | ||||||
| 		f2fs_decompress_end_io(dic->rpages, dic->cluster_size, | 		f2fs_decompress_end_io(dic->rpages, dic->cluster_size, | ||||||
| 								ret, false); | 								ret, false); | ||||||
|  | @ -532,8 +717,7 @@ static bool __cluster_may_compress(struct compress_ctx *cc) | ||||||
| 	return true; | 	return true; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /* return # of compressed block addresses */ | static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr) | ||||||
| static int f2fs_compressed_blocks(struct compress_ctx *cc) |  | ||||||
| { | { | ||||||
| 	struct dnode_of_data dn; | 	struct dnode_of_data dn; | ||||||
| 	int ret; | 	int ret; | ||||||
|  | @ -554,10 +738,15 @@ static int f2fs_compressed_blocks(struct compress_ctx *cc) | ||||||
| 		for (i = 1; i < cc->cluster_size; i++) { | 		for (i = 1; i < cc->cluster_size; i++) { | ||||||
| 			block_t blkaddr; | 			block_t blkaddr; | ||||||
| 
 | 
 | ||||||
| 			blkaddr = datablock_addr(dn.inode, | 			blkaddr = data_blkaddr(dn.inode, | ||||||
| 					dn.node_page, dn.ofs_in_node + i); | 					dn.node_page, dn.ofs_in_node + i); | ||||||
| 			if (blkaddr != NULL_ADDR) | 			if (compr) { | ||||||
| 				ret++; | 				if (__is_valid_data_blkaddr(blkaddr)) | ||||||
|  | 					ret++; | ||||||
|  | 			} else { | ||||||
|  | 				if (blkaddr != NULL_ADDR) | ||||||
|  | 					ret++; | ||||||
|  | 			} | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| fail: | fail: | ||||||
|  | @ -565,6 +754,18 @@ fail: | ||||||
| 	return ret; | 	return ret; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /* return # of compressed blocks in compressed cluster */ | ||||||
|  | static int f2fs_compressed_blocks(struct compress_ctx *cc) | ||||||
|  | { | ||||||
|  | 	return __f2fs_cluster_blocks(cc, true); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /* return # of valid blocks in compressed cluster */ | ||||||
|  | static int f2fs_cluster_blocks(struct compress_ctx *cc, bool compr) | ||||||
|  | { | ||||||
|  | 	return __f2fs_cluster_blocks(cc, false); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index) | int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index) | ||||||
| { | { | ||||||
| 	struct compress_ctx cc = { | 	struct compress_ctx cc = { | ||||||
|  | @ -574,7 +775,7 @@ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index) | ||||||
| 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size, | 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size, | ||||||
| 	}; | 	}; | ||||||
| 
 | 
 | ||||||
| 	return f2fs_compressed_blocks(&cc); | 	return f2fs_cluster_blocks(&cc, false); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static bool cluster_may_compress(struct compress_ctx *cc) | static bool cluster_may_compress(struct compress_ctx *cc) | ||||||
|  | @ -623,7 +824,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc, | ||||||
| 	bool prealloc; | 	bool prealloc; | ||||||
| 
 | 
 | ||||||
| retry: | retry: | ||||||
| 	ret = f2fs_compressed_blocks(cc); | 	ret = f2fs_cluster_blocks(cc, false); | ||||||
| 	if (ret <= 0) | 	if (ret <= 0) | ||||||
| 		return ret; | 		return ret; | ||||||
| 
 | 
 | ||||||
|  | @ -653,7 +854,7 @@ retry: | ||||||
| 		struct bio *bio = NULL; | 		struct bio *bio = NULL; | ||||||
| 
 | 
 | ||||||
| 		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size, | 		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size, | ||||||
| 						&last_block_in_bio, false); | 					&last_block_in_bio, false, true); | ||||||
| 		f2fs_destroy_compress_ctx(cc); | 		f2fs_destroy_compress_ctx(cc); | ||||||
| 		if (ret) | 		if (ret) | ||||||
| 			goto release_pages; | 			goto release_pages; | ||||||
|  | @ -772,7 +973,6 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, | ||||||
| 		.encrypted_page = NULL, | 		.encrypted_page = NULL, | ||||||
| 		.compressed_page = NULL, | 		.compressed_page = NULL, | ||||||
| 		.submitted = false, | 		.submitted = false, | ||||||
| 		.need_lock = LOCK_RETRY, |  | ||||||
| 		.io_type = io_type, | 		.io_type = io_type, | ||||||
| 		.io_wbc = wbc, | 		.io_wbc = wbc, | ||||||
| 		.encrypted = f2fs_encrypted_file(cc->inode), | 		.encrypted = f2fs_encrypted_file(cc->inode), | ||||||
|  | @ -785,16 +985,17 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, | ||||||
| 	loff_t psize; | 	loff_t psize; | ||||||
| 	int i, err; | 	int i, err; | ||||||
| 
 | 
 | ||||||
| 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0); | 	if (!f2fs_trylock_op(sbi)) | ||||||
|  | 		return -EAGAIN; | ||||||
| 
 | 
 | ||||||
| 	f2fs_lock_op(sbi); | 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0); | ||||||
| 
 | 
 | ||||||
| 	err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE); | 	err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE); | ||||||
| 	if (err) | 	if (err) | ||||||
| 		goto out_unlock_op; | 		goto out_unlock_op; | ||||||
| 
 | 
 | ||||||
| 	for (i = 0; i < cc->cluster_size; i++) { | 	for (i = 0; i < cc->cluster_size; i++) { | ||||||
| 		if (datablock_addr(dn.inode, dn.node_page, | 		if (data_blkaddr(dn.inode, dn.node_page, | ||||||
| 					dn.ofs_in_node + i) == NULL_ADDR) | 					dn.ofs_in_node + i) == NULL_ADDR) | ||||||
| 			goto out_put_dnode; | 			goto out_put_dnode; | ||||||
| 	} | 	} | ||||||
|  | @ -813,7 +1014,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, | ||||||
| 
 | 
 | ||||||
| 	cic->magic = F2FS_COMPRESSED_PAGE_MAGIC; | 	cic->magic = F2FS_COMPRESSED_PAGE_MAGIC; | ||||||
| 	cic->inode = inode; | 	cic->inode = inode; | ||||||
| 	refcount_set(&cic->ref, 1); | 	refcount_set(&cic->ref, cc->nr_cpages); | ||||||
| 	cic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) << | 	cic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) << | ||||||
| 			cc->log_cluster_size, GFP_NOFS); | 			cc->log_cluster_size, GFP_NOFS); | ||||||
| 	if (!cic->rpages) | 	if (!cic->rpages) | ||||||
|  | @ -823,8 +1024,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, | ||||||
| 
 | 
 | ||||||
| 	for (i = 0; i < cc->nr_cpages; i++) { | 	for (i = 0; i < cc->nr_cpages; i++) { | ||||||
| 		f2fs_set_compressed_page(cc->cpages[i], inode, | 		f2fs_set_compressed_page(cc->cpages[i], inode, | ||||||
| 					cc->rpages[i + 1]->index, | 					cc->rpages[i + 1]->index, cic); | ||||||
| 					cic, i ? &cic->ref : NULL); |  | ||||||
| 		fio.compressed_page = cc->cpages[i]; | 		fio.compressed_page = cc->cpages[i]; | ||||||
| 		if (fio.encrypted) { | 		if (fio.encrypted) { | ||||||
| 			fio.page = cc->rpages[i + 1]; | 			fio.page = cc->rpages[i + 1]; | ||||||
|  | @ -843,9 +1043,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, | ||||||
| 	for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) { | 	for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) { | ||||||
| 		block_t blkaddr; | 		block_t blkaddr; | ||||||
| 
 | 
 | ||||||
| 		blkaddr = datablock_addr(dn.inode, dn.node_page, | 		blkaddr = f2fs_data_blkaddr(&dn); | ||||||
| 							dn.ofs_in_node); | 		fio.page = cc->rpages[i]; | ||||||
| 		fio.page = cic->rpages[i]; |  | ||||||
| 		fio.old_blkaddr = blkaddr; | 		fio.old_blkaddr = blkaddr; | ||||||
| 
 | 
 | ||||||
| 		/* cluster header */ | 		/* cluster header */ | ||||||
|  | @ -895,10 +1094,10 @@ unlock_continue: | ||||||
| 	f2fs_put_dnode(&dn); | 	f2fs_put_dnode(&dn); | ||||||
| 	f2fs_unlock_op(sbi); | 	f2fs_unlock_op(sbi); | ||||||
| 
 | 
 | ||||||
| 	down_write(&fi->i_sem); | 	spin_lock(&fi->i_size_lock); | ||||||
| 	if (fi->last_disk_size < psize) | 	if (fi->last_disk_size < psize) | ||||||
| 		fi->last_disk_size = psize; | 		fi->last_disk_size = psize; | ||||||
| 	up_write(&fi->i_sem); | 	spin_unlock(&fi->i_size_lock); | ||||||
| 
 | 
 | ||||||
| 	f2fs_put_rpages(cc); | 	f2fs_put_rpages(cc); | ||||||
| 	f2fs_destroy_compress_ctx(cc); | 	f2fs_destroy_compress_ctx(cc); | ||||||
|  | @ -984,24 +1183,30 @@ retry_write: | ||||||
| 				unlock_page(cc->rpages[i]); | 				unlock_page(cc->rpages[i]); | ||||||
| 				ret = 0; | 				ret = 0; | ||||||
| 			} else if (ret == -EAGAIN) { | 			} else if (ret == -EAGAIN) { | ||||||
|  | 				/*
 | ||||||
|  | 				 * for quota file, just redirty left pages to | ||||||
|  | 				 * avoid deadlock caused by cluster update race | ||||||
|  | 				 * from foreground operation. | ||||||
|  | 				 */ | ||||||
|  | 				if (IS_NOQUOTA(cc->inode)) { | ||||||
|  | 					err = 0; | ||||||
|  | 					goto out_err; | ||||||
|  | 				} | ||||||
| 				ret = 0; | 				ret = 0; | ||||||
| 				cond_resched(); | 				cond_resched(); | ||||||
| 				congestion_wait(BLK_RW_ASYNC, HZ/50); | 				congestion_wait(BLK_RW_ASYNC, | ||||||
|  | 						DEFAULT_IO_TIMEOUT); | ||||||
| 				lock_page(cc->rpages[i]); | 				lock_page(cc->rpages[i]); | ||||||
| 				clear_page_dirty_for_io(cc->rpages[i]); | 				clear_page_dirty_for_io(cc->rpages[i]); | ||||||
| 				goto retry_write; | 				goto retry_write; | ||||||
| 			} | 			} | ||||||
| 			err = ret; | 			err = ret; | ||||||
| 			goto out_fail; | 			goto out_err; | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		*submitted += _submitted; | 		*submitted += _submitted; | ||||||
| 	} | 	} | ||||||
| 	return 0; | 	return 0; | ||||||
| 
 |  | ||||||
| out_fail: |  | ||||||
| 	/* TODO: revoke partially updated block addresses */ |  | ||||||
| 	BUG_ON(compr_blocks); |  | ||||||
| out_err: | out_err: | ||||||
| 	for (++i; i < cc->cluster_size; i++) { | 	for (++i; i < cc->cluster_size; i++) { | ||||||
| 		if (!cc->rpages[i]) | 		if (!cc->rpages[i]) | ||||||
|  | @ -1069,7 +1274,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) | ||||||
| 
 | 
 | ||||||
| 	dic->magic = F2FS_COMPRESSED_PAGE_MAGIC; | 	dic->magic = F2FS_COMPRESSED_PAGE_MAGIC; | ||||||
| 	dic->inode = cc->inode; | 	dic->inode = cc->inode; | ||||||
| 	refcount_set(&dic->ref, 1); | 	refcount_set(&dic->ref, cc->nr_cpages); | ||||||
| 	dic->cluster_idx = cc->cluster_idx; | 	dic->cluster_idx = cc->cluster_idx; | ||||||
| 	dic->cluster_size = cc->cluster_size; | 	dic->cluster_size = cc->cluster_size; | ||||||
| 	dic->log_cluster_size = cc->log_cluster_size; | 	dic->log_cluster_size = cc->log_cluster_size; | ||||||
|  | @ -1093,8 +1298,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) | ||||||
| 			goto out_free; | 			goto out_free; | ||||||
| 
 | 
 | ||||||
| 		f2fs_set_compressed_page(page, cc->inode, | 		f2fs_set_compressed_page(page, cc->inode, | ||||||
| 					start_idx + i + 1, | 					start_idx + i + 1, dic); | ||||||
| 					dic, i ? &dic->ref : NULL); |  | ||||||
| 		dic->cpages[i] = page; | 		dic->cpages[i] = page; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -1104,20 +1308,16 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) | ||||||
| 		goto out_free; | 		goto out_free; | ||||||
| 
 | 
 | ||||||
| 	for (i = 0; i < dic->cluster_size; i++) { | 	for (i = 0; i < dic->cluster_size; i++) { | ||||||
| 		if (cc->rpages[i]) | 		if (cc->rpages[i]) { | ||||||
|  | 			dic->tpages[i] = cc->rpages[i]; | ||||||
| 			continue; | 			continue; | ||||||
|  | 		} | ||||||
| 
 | 
 | ||||||
| 		dic->tpages[i] = f2fs_grab_page(); | 		dic->tpages[i] = f2fs_grab_page(); | ||||||
| 		if (!dic->tpages[i]) | 		if (!dic->tpages[i]) | ||||||
| 			goto out_free; | 			goto out_free; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	for (i = 0; i < dic->cluster_size; i++) { |  | ||||||
| 		if (dic->tpages[i]) |  | ||||||
| 			continue; |  | ||||||
| 		dic->tpages[i] = cc->rpages[i]; |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return dic; | 	return dic; | ||||||
| 
 | 
 | ||||||
| out_free: | out_free: | ||||||
|  | @ -1133,7 +1333,10 @@ void f2fs_free_dic(struct decompress_io_ctx *dic) | ||||||
| 		for (i = 0; i < dic->cluster_size; i++) { | 		for (i = 0; i < dic->cluster_size; i++) { | ||||||
| 			if (dic->rpages[i]) | 			if (dic->rpages[i]) | ||||||
| 				continue; | 				continue; | ||||||
| 			f2fs_put_page(dic->tpages[i], 1); | 			if (!dic->tpages[i]) | ||||||
|  | 				continue; | ||||||
|  | 			unlock_page(dic->tpages[i]); | ||||||
|  | 			put_page(dic->tpages[i]); | ||||||
| 		} | 		} | ||||||
| 		kfree(dic->tpages); | 		kfree(dic->tpages); | ||||||
| 	} | 	} | ||||||
|  | @ -1162,15 +1365,17 @@ void f2fs_decompress_end_io(struct page **rpages, | ||||||
| 		if (!rpage) | 		if (!rpage) | ||||||
| 			continue; | 			continue; | ||||||
| 
 | 
 | ||||||
| 		if (err || PageError(rpage)) { | 		if (err || PageError(rpage)) | ||||||
| 			ClearPageUptodate(rpage); | 			goto clear_uptodate; | ||||||
| 			ClearPageError(rpage); | 
 | ||||||
| 		} else { | 		if (!verity || fsverity_verify_page(rpage)) { | ||||||
| 			if (!verity || fsverity_verify_page(rpage)) | 			SetPageUptodate(rpage); | ||||||
| 				SetPageUptodate(rpage); | 			goto unlock; | ||||||
| 			else |  | ||||||
| 				SetPageError(rpage); |  | ||||||
| 		} | 		} | ||||||
|  | clear_uptodate: | ||||||
|  | 		ClearPageUptodate(rpage); | ||||||
|  | 		ClearPageError(rpage); | ||||||
|  | unlock: | ||||||
| 		unlock_page(rpage); | 		unlock_page(rpage); | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
							
								
								
									
										143
									
								
								fs/f2fs/data.c
									
										
									
									
									
								
							
							
						
						
									
										143
									
								
								fs/f2fs/data.c
									
										
									
									
									
								
							|  | @ -54,17 +54,13 @@ static inline struct bio *__f2fs_bio_alloc(gfp_t gfp_mask, | ||||||
| 	return bio_alloc_bioset(gfp_mask, nr_iovecs, &f2fs_bioset); | 	return bio_alloc_bioset(gfp_mask, nr_iovecs, &f2fs_bioset); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool no_fail) | struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio) | ||||||
| { | { | ||||||
| 	struct bio *bio; | 	if (noio) { | ||||||
| 
 |  | ||||||
| 	if (no_fail) { |  | ||||||
| 		/* No failure on bio allocation */ | 		/* No failure on bio allocation */ | ||||||
| 		bio = __f2fs_bio_alloc(GFP_NOIO, npages); | 		return __f2fs_bio_alloc(GFP_NOIO, npages); | ||||||
| 		if (!bio) |  | ||||||
| 			bio = __f2fs_bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages); |  | ||||||
| 		return bio; |  | ||||||
| 	} | 	} | ||||||
|  | 
 | ||||||
| 	if (time_to_inject(sbi, FAULT_ALLOC_BIO)) { | 	if (time_to_inject(sbi, FAULT_ALLOC_BIO)) { | ||||||
| 		f2fs_show_injection_info(sbi, FAULT_ALLOC_BIO); | 		f2fs_show_injection_info(sbi, FAULT_ALLOC_BIO); | ||||||
| 		return NULL; | 		return NULL; | ||||||
|  | @ -143,6 +139,8 @@ static void __read_end_io(struct bio *bio, bool compr, bool verity) | ||||||
| 			f2fs_decompress_pages(bio, page, verity); | 			f2fs_decompress_pages(bio, page, verity); | ||||||
| 			continue; | 			continue; | ||||||
| 		} | 		} | ||||||
|  | 		if (verity) | ||||||
|  | 			continue; | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| 		/* PG_error was set if any post_read step failed */ | 		/* PG_error was set if any post_read step failed */ | ||||||
|  | @ -191,12 +189,38 @@ static void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size) | ||||||
| 
 | 
 | ||||||
| static void f2fs_verify_bio(struct bio *bio) | static void f2fs_verify_bio(struct bio *bio) | ||||||
| { | { | ||||||
| 	struct page *page = bio_first_page_all(bio); | 	struct bio_vec *bv; | ||||||
| 	struct decompress_io_ctx *dic = | 	struct bvec_iter_all iter_all; | ||||||
| 			(struct decompress_io_ctx *)page_private(page); |  | ||||||
| 
 | 
 | ||||||
| 	f2fs_verify_pages(dic->rpages, dic->cluster_size); | 	bio_for_each_segment_all(bv, bio, iter_all) { | ||||||
| 	f2fs_free_dic(dic); | 		struct page *page = bv->bv_page; | ||||||
|  | 		struct decompress_io_ctx *dic; | ||||||
|  | 
 | ||||||
|  | 		dic = (struct decompress_io_ctx *)page_private(page); | ||||||
|  | 
 | ||||||
|  | 		if (dic) { | ||||||
|  | 			if (refcount_dec_not_one(&dic->ref)) | ||||||
|  | 				continue; | ||||||
|  | 			f2fs_verify_pages(dic->rpages, | ||||||
|  | 						dic->cluster_size); | ||||||
|  | 			f2fs_free_dic(dic); | ||||||
|  | 			continue; | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if (bio->bi_status || PageError(page)) | ||||||
|  | 			goto clear_uptodate; | ||||||
|  | 
 | ||||||
|  | 		if (fsverity_verify_page(page)) { | ||||||
|  | 			SetPageUptodate(page); | ||||||
|  | 			goto unlock; | ||||||
|  | 		} | ||||||
|  | clear_uptodate: | ||||||
|  | 		ClearPageUptodate(page); | ||||||
|  | 		ClearPageError(page); | ||||||
|  | unlock: | ||||||
|  | 		dec_page_count(F2FS_P_SB(page), __read_io_type(page)); | ||||||
|  | 		unlock_page(page); | ||||||
|  | 	} | ||||||
| } | } | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
|  | @ -364,9 +388,6 @@ static void f2fs_write_end_io(struct bio *bio) | ||||||
| 	bio_put(bio); | 	bio_put(bio); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 |  | ||||||
|  * Return true, if pre_bio's bdev is same as its target device. |  | ||||||
|  */ |  | ||||||
| struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, | struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, | ||||||
| 				block_t blk_addr, struct bio *bio) | 				block_t blk_addr, struct bio *bio) | ||||||
| { | { | ||||||
|  | @ -403,6 +424,9 @@ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr) | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /*
 | ||||||
|  |  * Return true, if pre_bio's bdev is same as its target device. | ||||||
|  |  */ | ||||||
| static bool __same_bdev(struct f2fs_sb_info *sbi, | static bool __same_bdev(struct f2fs_sb_info *sbi, | ||||||
| 				block_t blk_addr, struct bio *bio) | 				block_t blk_addr, struct bio *bio) | ||||||
| { | { | ||||||
|  | @ -410,9 +434,6 @@ static bool __same_bdev(struct f2fs_sb_info *sbi, | ||||||
| 	return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno; | 	return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 |  | ||||||
|  * Low-level block read/write IO operations. |  | ||||||
|  */ |  | ||||||
| static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages) | static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages) | ||||||
| { | { | ||||||
| 	struct f2fs_sb_info *sbi = fio->sbi; | 	struct f2fs_sb_info *sbi = fio->sbi; | ||||||
|  | @ -445,7 +466,7 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi, | ||||||
| 		if (type != DATA && type != NODE) | 		if (type != DATA && type != NODE) | ||||||
| 			goto submit_io; | 			goto submit_io; | ||||||
| 
 | 
 | ||||||
| 		if (test_opt(sbi, LFS) && current->plug) | 		if (f2fs_lfs_mode(sbi) && current->plug) | ||||||
| 			blk_finish_plug(current->plug); | 			blk_finish_plug(current->plug); | ||||||
| 
 | 
 | ||||||
| 		if (F2FS_IO_ALIGNED(sbi)) | 		if (F2FS_IO_ALIGNED(sbi)) | ||||||
|  | @ -928,14 +949,15 @@ static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx) | ||||||
| 
 | 
 | ||||||
| static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, | static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, | ||||||
| 				      unsigned nr_pages, unsigned op_flag, | 				      unsigned nr_pages, unsigned op_flag, | ||||||
| 				      pgoff_t first_idx) | 				      pgoff_t first_idx, bool for_write) | ||||||
| { | { | ||||||
| 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | ||||||
| 	struct bio *bio; | 	struct bio *bio; | ||||||
| 	struct bio_post_read_ctx *ctx; | 	struct bio_post_read_ctx *ctx; | ||||||
| 	unsigned int post_read_steps = 0; | 	unsigned int post_read_steps = 0; | ||||||
| 
 | 
 | ||||||
| 	bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false); | 	bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), | ||||||
|  | 								for_write); | ||||||
| 	if (!bio) | 	if (!bio) | ||||||
| 		return ERR_PTR(-ENOMEM); | 		return ERR_PTR(-ENOMEM); | ||||||
| 	f2fs_target_device(sbi, blkaddr, bio); | 	f2fs_target_device(sbi, blkaddr, bio); | ||||||
|  | @ -970,12 +992,12 @@ static void f2fs_release_read_bio(struct bio *bio) | ||||||
| 
 | 
 | ||||||
| /* This can handle encryption stuffs */ | /* This can handle encryption stuffs */ | ||||||
| static int f2fs_submit_page_read(struct inode *inode, struct page *page, | static int f2fs_submit_page_read(struct inode *inode, struct page *page, | ||||||
| 							block_t blkaddr) | 						block_t blkaddr, bool for_write) | ||||||
| { | { | ||||||
| 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | ||||||
| 	struct bio *bio; | 	struct bio *bio; | ||||||
| 
 | 
 | ||||||
| 	bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0, page->index); | 	bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0, page->index, for_write); | ||||||
| 	if (IS_ERR(bio)) | 	if (IS_ERR(bio)) | ||||||
| 		return PTR_ERR(bio); | 		return PTR_ERR(bio); | ||||||
| 
 | 
 | ||||||
|  | @ -1047,8 +1069,7 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count) | ||||||
| 	f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true); | 	f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true); | ||||||
| 
 | 
 | ||||||
| 	for (; count > 0; dn->ofs_in_node++) { | 	for (; count > 0; dn->ofs_in_node++) { | ||||||
| 		block_t blkaddr = datablock_addr(dn->inode, | 		block_t blkaddr = f2fs_data_blkaddr(dn); | ||||||
| 					dn->node_page, dn->ofs_in_node); |  | ||||||
| 		if (blkaddr == NULL_ADDR) { | 		if (blkaddr == NULL_ADDR) { | ||||||
| 			dn->data_blkaddr = NEW_ADDR; | 			dn->data_blkaddr = NEW_ADDR; | ||||||
| 			__set_data_blkaddr(dn); | 			__set_data_blkaddr(dn); | ||||||
|  | @ -1162,7 +1183,7 @@ got_it: | ||||||
| 		return page; | 		return page; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	err = f2fs_submit_page_read(inode, page, dn.data_blkaddr); | 	err = f2fs_submit_page_read(inode, page, dn.data_blkaddr, for_write); | ||||||
| 	if (err) | 	if (err) | ||||||
| 		goto put_err; | 		goto put_err; | ||||||
| 	return page; | 	return page; | ||||||
|  | @ -1300,8 +1321,7 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type) | ||||||
| 	if (err) | 	if (err) | ||||||
| 		return err; | 		return err; | ||||||
| 
 | 
 | ||||||
| 	dn->data_blkaddr = datablock_addr(dn->inode, | 	dn->data_blkaddr = f2fs_data_blkaddr(dn); | ||||||
| 				dn->node_page, dn->ofs_in_node); |  | ||||||
| 	if (dn->data_blkaddr != NULL_ADDR) | 	if (dn->data_blkaddr != NULL_ADDR) | ||||||
| 		goto alloc; | 		goto alloc; | ||||||
| 
 | 
 | ||||||
|  | @ -1388,13 +1408,9 @@ void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with |  * f2fs_map_blocks() tries to find or build mapping relationship which | ||||||
|  * f2fs_map_blocks structure. |  * maps continuous logical blocks to physical blocks, and return such | ||||||
|  * If original data blocks are allocated, then give them to blockdev. |  * info via f2fs_map_blocks structure. | ||||||
|  * Otherwise, |  | ||||||
|  *     a. preallocate requested block addresses |  | ||||||
|  *     b. do not use extent cache for better performance |  | ||||||
|  *     c. give the block addresses to blockdev |  | ||||||
|  */ |  */ | ||||||
| int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, | int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, | ||||||
| 						int create, int flag) | 						int create, int flag) | ||||||
|  | @ -1422,7 +1438,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, | ||||||
| 	end = pgofs + maxblocks; | 	end = pgofs + maxblocks; | ||||||
| 
 | 
 | ||||||
| 	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) { | 	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) { | ||||||
| 		if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO && | 		if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO && | ||||||
| 							map->m_may_create) | 							map->m_may_create) | ||||||
| 			goto next_dnode; | 			goto next_dnode; | ||||||
| 
 | 
 | ||||||
|  | @ -1467,7 +1483,7 @@ next_dnode: | ||||||
| 	end_offset = ADDRS_PER_PAGE(dn.node_page, inode); | 	end_offset = ADDRS_PER_PAGE(dn.node_page, inode); | ||||||
| 
 | 
 | ||||||
| next_block: | next_block: | ||||||
| 	blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node); | 	blkaddr = f2fs_data_blkaddr(&dn); | ||||||
| 
 | 
 | ||||||
| 	if (__is_valid_data_blkaddr(blkaddr) && | 	if (__is_valid_data_blkaddr(blkaddr) && | ||||||
| 		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) { | 		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) { | ||||||
|  | @ -1477,7 +1493,7 @@ next_block: | ||||||
| 
 | 
 | ||||||
| 	if (__is_valid_data_blkaddr(blkaddr)) { | 	if (__is_valid_data_blkaddr(blkaddr)) { | ||||||
| 		/* use out-place-update for driect IO under LFS mode */ | 		/* use out-place-update for driect IO under LFS mode */ | ||||||
| 		if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO && | 		if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO && | ||||||
| 							map->m_may_create) { | 							map->m_may_create) { | ||||||
| 			err = __allocate_data_block(&dn, map->m_seg_type); | 			err = __allocate_data_block(&dn, map->m_seg_type); | ||||||
| 			if (err) | 			if (err) | ||||||
|  | @ -1980,7 +1996,8 @@ submit_and_realloc: | ||||||
| 	} | 	} | ||||||
| 	if (bio == NULL) { | 	if (bio == NULL) { | ||||||
| 		bio = f2fs_grab_read_bio(inode, block_nr, nr_pages, | 		bio = f2fs_grab_read_bio(inode, block_nr, nr_pages, | ||||||
| 				is_readahead ? REQ_RAHEAD : 0, page->index); | 				is_readahead ? REQ_RAHEAD : 0, page->index, | ||||||
|  | 				false); | ||||||
| 		if (IS_ERR(bio)) { | 		if (IS_ERR(bio)) { | ||||||
| 			ret = PTR_ERR(bio); | 			ret = PTR_ERR(bio); | ||||||
| 			bio = NULL; | 			bio = NULL; | ||||||
|  | @ -2015,7 +2032,7 @@ out: | ||||||
| #ifdef CONFIG_F2FS_FS_COMPRESSION | #ifdef CONFIG_F2FS_FS_COMPRESSION | ||||||
| int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, | int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, | ||||||
| 				unsigned nr_pages, sector_t *last_block_in_bio, | 				unsigned nr_pages, sector_t *last_block_in_bio, | ||||||
| 				bool is_readahead) | 				bool is_readahead, bool for_write) | ||||||
| { | { | ||||||
| 	struct dnode_of_data dn; | 	struct dnode_of_data dn; | ||||||
| 	struct inode *inode = cc->inode; | 	struct inode *inode = cc->inode; | ||||||
|  | @ -2031,7 +2048,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, | ||||||
| 
 | 
 | ||||||
| 	f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc)); | 	f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc)); | ||||||
| 
 | 
 | ||||||
| 	last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; | 	last_block_in_file = (f2fs_readpage_limit(inode) + | ||||||
|  | 					blocksize - 1) >> blkbits; | ||||||
| 
 | 
 | ||||||
| 	/* get rid of pages beyond EOF */ | 	/* get rid of pages beyond EOF */ | ||||||
| 	for (i = 0; i < cc->cluster_size; i++) { | 	for (i = 0; i < cc->cluster_size; i++) { | ||||||
|  | @ -2067,7 +2085,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, | ||||||
| 	for (i = 1; i < cc->cluster_size; i++) { | 	for (i = 1; i < cc->cluster_size; i++) { | ||||||
| 		block_t blkaddr; | 		block_t blkaddr; | ||||||
| 
 | 
 | ||||||
| 		blkaddr = datablock_addr(dn.inode, dn.node_page, | 		blkaddr = data_blkaddr(dn.inode, dn.node_page, | ||||||
| 						dn.ofs_in_node + i); | 						dn.ofs_in_node + i); | ||||||
| 
 | 
 | ||||||
| 		if (!__is_valid_data_blkaddr(blkaddr)) | 		if (!__is_valid_data_blkaddr(blkaddr)) | ||||||
|  | @ -2096,7 +2114,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, | ||||||
| 		struct page *page = dic->cpages[i]; | 		struct page *page = dic->cpages[i]; | ||||||
| 		block_t blkaddr; | 		block_t blkaddr; | ||||||
| 
 | 
 | ||||||
| 		blkaddr = datablock_addr(dn.inode, dn.node_page, | 		blkaddr = data_blkaddr(dn.inode, dn.node_page, | ||||||
| 						dn.ofs_in_node + i + 1); | 						dn.ofs_in_node + i + 1); | ||||||
| 
 | 
 | ||||||
| 		if (bio && !page_is_mergeable(sbi, bio, | 		if (bio && !page_is_mergeable(sbi, bio, | ||||||
|  | @ -2109,7 +2127,7 @@ submit_and_realloc: | ||||||
| 		if (!bio) { | 		if (!bio) { | ||||||
| 			bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages, | 			bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages, | ||||||
| 					is_readahead ? REQ_RAHEAD : 0, | 					is_readahead ? REQ_RAHEAD : 0, | ||||||
| 					page->index); | 					page->index, for_write); | ||||||
| 			if (IS_ERR(bio)) { | 			if (IS_ERR(bio)) { | ||||||
| 				ret = PTR_ERR(bio); | 				ret = PTR_ERR(bio); | ||||||
| 				bio = NULL; | 				bio = NULL; | ||||||
|  | @ -2210,7 +2228,7 @@ int f2fs_mpage_readpages(struct address_space *mapping, | ||||||
| 				ret = f2fs_read_multi_pages(&cc, &bio, | 				ret = f2fs_read_multi_pages(&cc, &bio, | ||||||
| 							max_nr_pages, | 							max_nr_pages, | ||||||
| 							&last_block_in_bio, | 							&last_block_in_bio, | ||||||
| 							is_readahead); | 							is_readahead, false); | ||||||
| 				f2fs_destroy_compress_ctx(&cc); | 				f2fs_destroy_compress_ctx(&cc); | ||||||
| 				if (ret) | 				if (ret) | ||||||
| 					goto set_error_page; | 					goto set_error_page; | ||||||
|  | @ -2253,7 +2271,7 @@ next_page: | ||||||
| 				ret = f2fs_read_multi_pages(&cc, &bio, | 				ret = f2fs_read_multi_pages(&cc, &bio, | ||||||
| 							max_nr_pages, | 							max_nr_pages, | ||||||
| 							&last_block_in_bio, | 							&last_block_in_bio, | ||||||
| 							is_readahead); | 							is_readahead, false); | ||||||
| 				f2fs_destroy_compress_ctx(&cc); | 				f2fs_destroy_compress_ctx(&cc); | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
|  | @ -2326,7 +2344,7 @@ retry_encrypt: | ||||||
| 		/* flush pending IOs and wait for a while in the ENOMEM case */ | 		/* flush pending IOs and wait for a while in the ENOMEM case */ | ||||||
| 		if (PTR_ERR(fio->encrypted_page) == -ENOMEM) { | 		if (PTR_ERR(fio->encrypted_page) == -ENOMEM) { | ||||||
| 			f2fs_flush_merged_writes(fio->sbi); | 			f2fs_flush_merged_writes(fio->sbi); | ||||||
| 			congestion_wait(BLK_RW_ASYNC, HZ/50); | 			congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT); | ||||||
| 			gfp_flags |= __GFP_NOFAIL; | 			gfp_flags |= __GFP_NOFAIL; | ||||||
| 			goto retry_encrypt; | 			goto retry_encrypt; | ||||||
| 		} | 		} | ||||||
|  | @ -2397,7 +2415,7 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio) | ||||||
| { | { | ||||||
| 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | ||||||
| 
 | 
 | ||||||
| 	if (test_opt(sbi, LFS)) | 	if (f2fs_lfs_mode(sbi)) | ||||||
| 		return true; | 		return true; | ||||||
| 	if (S_ISDIR(inode->i_mode)) | 	if (S_ISDIR(inode->i_mode)) | ||||||
| 		return true; | 		return true; | ||||||
|  | @ -2647,10 +2665,10 @@ write: | ||||||
| 	if (err) { | 	if (err) { | ||||||
| 		file_set_keep_isize(inode); | 		file_set_keep_isize(inode); | ||||||
| 	} else { | 	} else { | ||||||
| 		down_write(&F2FS_I(inode)->i_sem); | 		spin_lock(&F2FS_I(inode)->i_size_lock); | ||||||
| 		if (F2FS_I(inode)->last_disk_size < psize) | 		if (F2FS_I(inode)->last_disk_size < psize) | ||||||
| 			F2FS_I(inode)->last_disk_size = psize; | 			F2FS_I(inode)->last_disk_size = psize; | ||||||
| 		up_write(&F2FS_I(inode)->i_sem); | 		spin_unlock(&F2FS_I(inode)->i_size_lock); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| done: | done: | ||||||
|  | @ -2917,7 +2935,7 @@ result: | ||||||
| 					if (wbc->sync_mode == WB_SYNC_ALL) { | 					if (wbc->sync_mode == WB_SYNC_ALL) { | ||||||
| 						cond_resched(); | 						cond_resched(); | ||||||
| 						congestion_wait(BLK_RW_ASYNC, | 						congestion_wait(BLK_RW_ASYNC, | ||||||
| 								HZ/50); | 							DEFAULT_IO_TIMEOUT); | ||||||
| 						goto retry_write; | 						goto retry_write; | ||||||
| 					} | 					} | ||||||
| 					goto next; | 					goto next; | ||||||
|  | @ -2973,15 +2991,17 @@ next: | ||||||
| static inline bool __should_serialize_io(struct inode *inode, | static inline bool __should_serialize_io(struct inode *inode, | ||||||
| 					struct writeback_control *wbc) | 					struct writeback_control *wbc) | ||||||
| { | { | ||||||
| 	if (!S_ISREG(inode->i_mode)) |  | ||||||
| 		return false; |  | ||||||
| 	if (f2fs_compressed_file(inode)) |  | ||||||
| 		return true; |  | ||||||
| 	if (IS_NOQUOTA(inode)) |  | ||||||
| 		return false; |  | ||||||
| 	/* to avoid deadlock in path of data flush */ | 	/* to avoid deadlock in path of data flush */ | ||||||
| 	if (F2FS_I(inode)->cp_task) | 	if (F2FS_I(inode)->cp_task) | ||||||
| 		return false; | 		return false; | ||||||
|  | 
 | ||||||
|  | 	if (!S_ISREG(inode->i_mode)) | ||||||
|  | 		return false; | ||||||
|  | 	if (IS_NOQUOTA(inode)) | ||||||
|  | 		return false; | ||||||
|  | 
 | ||||||
|  | 	if (f2fs_compressed_file(inode)) | ||||||
|  | 		return true; | ||||||
| 	if (wbc->sync_mode != WB_SYNC_ALL) | 	if (wbc->sync_mode != WB_SYNC_ALL) | ||||||
| 		return true; | 		return true; | ||||||
| 	if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks) | 	if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks) | ||||||
|  | @ -3283,7 +3303,7 @@ repeat: | ||||||
| 			err = -EFSCORRUPTED; | 			err = -EFSCORRUPTED; | ||||||
| 			goto fail; | 			goto fail; | ||||||
| 		} | 		} | ||||||
| 		err = f2fs_submit_page_read(inode, page, blkaddr); | 		err = f2fs_submit_page_read(inode, page, blkaddr, true); | ||||||
| 		if (err) | 		if (err) | ||||||
| 			goto fail; | 			goto fail; | ||||||
| 
 | 
 | ||||||
|  | @ -3464,7 +3484,8 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) | ||||||
| 	err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, | 	err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, | ||||||
| 			iter, rw == WRITE ? get_data_block_dio_write : | 			iter, rw == WRITE ? get_data_block_dio_write : | ||||||
| 			get_data_block_dio, NULL, f2fs_dio_submit_bio, | 			get_data_block_dio, NULL, f2fs_dio_submit_bio, | ||||||
| 			DIO_LOCKING | DIO_SKIP_HOLES); | 			rw == WRITE ? DIO_LOCKING | DIO_SKIP_HOLES : | ||||||
|  | 			DIO_SKIP_HOLES); | ||||||
| 
 | 
 | ||||||
| 	if (do_opu) | 	if (do_opu) | ||||||
| 		up_read(&fi->i_gc_rwsem[READ]); | 		up_read(&fi->i_gc_rwsem[READ]); | ||||||
|  | @ -3861,7 +3882,7 @@ void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi) | ||||||
| 
 | 
 | ||||||
| int __init f2fs_init_bio_entry_cache(void) | int __init f2fs_init_bio_entry_cache(void) | ||||||
| { | { | ||||||
| 	bio_entry_slab = f2fs_kmem_cache_create("bio_entry_slab", | 	bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab", | ||||||
| 			sizeof(struct bio_entry)); | 			sizeof(struct bio_entry)); | ||||||
| 	if (!bio_entry_slab) | 	if (!bio_entry_slab) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -301,6 +301,9 @@ static int stat_show(struct seq_file *s, void *v) | ||||||
| 			   si->ssa_area_segs, si->main_area_segs); | 			   si->ssa_area_segs, si->main_area_segs); | ||||||
| 		seq_printf(s, "(OverProv:%d Resv:%d)]\n\n", | 		seq_printf(s, "(OverProv:%d Resv:%d)]\n\n", | ||||||
| 			   si->overp_segs, si->rsvd_segs); | 			   si->overp_segs, si->rsvd_segs); | ||||||
|  | 		seq_printf(s, "Current Time Sec: %llu / Mounted Time Sec: %llu\n\n", | ||||||
|  | 					ktime_get_boottime_seconds(), | ||||||
|  | 					SIT_I(si->sbi)->mounted_time); | ||||||
| 		if (test_opt(si->sbi, DISCARD)) | 		if (test_opt(si->sbi, DISCARD)) | ||||||
| 			seq_printf(s, "Utilization: %u%% (%u valid blocks, %u discard blocks)\n", | 			seq_printf(s, "Utilization: %u%% (%u valid blocks, %u discard blocks)\n", | ||||||
| 				si->utilization, si->valid_count, si->discard_blks); | 				si->utilization, si->valid_count, si->discard_blks); | ||||||
|  |  | ||||||
|  | @ -471,7 +471,6 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, | ||||||
| 			struct page *dpage) | 			struct page *dpage) | ||||||
| { | { | ||||||
| 	struct page *page; | 	struct page *page; | ||||||
| 	int dummy_encrypt = DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(dir)); |  | ||||||
| 	int err; | 	int err; | ||||||
| 
 | 
 | ||||||
| 	if (is_inode_flag_set(inode, FI_NEW_INODE)) { | 	if (is_inode_flag_set(inode, FI_NEW_INODE)) { | ||||||
|  | @ -498,8 +497,7 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, | ||||||
| 		if (err) | 		if (err) | ||||||
| 			goto put_error; | 			goto put_error; | ||||||
| 
 | 
 | ||||||
| 		if ((IS_ENCRYPTED(dir) || dummy_encrypt) && | 		if (IS_ENCRYPTED(inode)) { | ||||||
| 					f2fs_may_encrypt(inode)) { |  | ||||||
| 			err = fscrypt_inherit_context(dir, inode, page, false); | 			err = fscrypt_inherit_context(dir, inode, page, false); | ||||||
| 			if (err) | 			if (err) | ||||||
| 				goto put_error; | 				goto put_error; | ||||||
|  | @ -850,12 +848,6 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, | ||||||
| 			0); | 			0); | ||||||
| 	set_page_dirty(page); | 	set_page_dirty(page); | ||||||
| 
 | 
 | ||||||
| 	dir->i_ctime = dir->i_mtime = current_time(dir); |  | ||||||
| 	f2fs_mark_inode_dirty_sync(dir, false); |  | ||||||
| 
 |  | ||||||
| 	if (inode) |  | ||||||
| 		f2fs_drop_nlink(dir, inode); |  | ||||||
| 
 |  | ||||||
| 	if (bit_pos == NR_DENTRY_IN_BLOCK && | 	if (bit_pos == NR_DENTRY_IN_BLOCK && | ||||||
| 		!f2fs_truncate_hole(dir, page->index, page->index + 1)) { | 		!f2fs_truncate_hole(dir, page->index, page->index + 1)) { | ||||||
| 		f2fs_clear_page_cache_dirty_tag(page); | 		f2fs_clear_page_cache_dirty_tag(page); | ||||||
|  | @ -867,6 +859,12 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, | ||||||
| 		f2fs_remove_dirty_inode(dir); | 		f2fs_remove_dirty_inode(dir); | ||||||
| 	} | 	} | ||||||
| 	f2fs_put_page(page, 1); | 	f2fs_put_page(page, 1); | ||||||
|  | 
 | ||||||
|  | 	dir->i_ctime = dir->i_mtime = current_time(dir); | ||||||
|  | 	f2fs_mark_inode_dirty_sync(dir, false); | ||||||
|  | 
 | ||||||
|  | 	if (inode) | ||||||
|  | 		f2fs_drop_nlink(dir, inode); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| bool f2fs_empty_dir(struct inode *dir) | bool f2fs_empty_dir(struct inode *dir) | ||||||
|  |  | ||||||
							
								
								
									
										206
									
								
								fs/f2fs/f2fs.h
									
										
									
									
									
								
							
							
						
						
									
										206
									
								
								fs/f2fs/f2fs.h
									
										
									
									
									
								
							|  | @ -75,7 +75,6 @@ extern const char *f2fs_fault_name[FAULT_MAX]; | ||||||
| /*
 | /*
 | ||||||
|  * For mount options |  * For mount options | ||||||
|  */ |  */ | ||||||
| #define F2FS_MOUNT_BG_GC		0x00000001 |  | ||||||
| #define F2FS_MOUNT_DISABLE_ROLL_FORWARD	0x00000002 | #define F2FS_MOUNT_DISABLE_ROLL_FORWARD	0x00000002 | ||||||
| #define F2FS_MOUNT_DISCARD		0x00000004 | #define F2FS_MOUNT_DISCARD		0x00000004 | ||||||
| #define F2FS_MOUNT_NOHEAP		0x00000008 | #define F2FS_MOUNT_NOHEAP		0x00000008 | ||||||
|  | @ -89,11 +88,8 @@ extern const char *f2fs_fault_name[FAULT_MAX]; | ||||||
| #define F2FS_MOUNT_NOBARRIER		0x00000800 | #define F2FS_MOUNT_NOBARRIER		0x00000800 | ||||||
| #define F2FS_MOUNT_FASTBOOT		0x00001000 | #define F2FS_MOUNT_FASTBOOT		0x00001000 | ||||||
| #define F2FS_MOUNT_EXTENT_CACHE		0x00002000 | #define F2FS_MOUNT_EXTENT_CACHE		0x00002000 | ||||||
| #define F2FS_MOUNT_FORCE_FG_GC		0x00004000 |  | ||||||
| #define F2FS_MOUNT_DATA_FLUSH		0x00008000 | #define F2FS_MOUNT_DATA_FLUSH		0x00008000 | ||||||
| #define F2FS_MOUNT_FAULT_INJECTION	0x00010000 | #define F2FS_MOUNT_FAULT_INJECTION	0x00010000 | ||||||
| #define F2FS_MOUNT_ADAPTIVE		0x00020000 |  | ||||||
| #define F2FS_MOUNT_LFS			0x00040000 |  | ||||||
| #define F2FS_MOUNT_USRQUOTA		0x00080000 | #define F2FS_MOUNT_USRQUOTA		0x00080000 | ||||||
| #define F2FS_MOUNT_GRPQUOTA		0x00100000 | #define F2FS_MOUNT_GRPQUOTA		0x00100000 | ||||||
| #define F2FS_MOUNT_PRJQUOTA		0x00200000 | #define F2FS_MOUNT_PRJQUOTA		0x00200000 | ||||||
|  | @ -101,6 +97,7 @@ extern const char *f2fs_fault_name[FAULT_MAX]; | ||||||
| #define F2FS_MOUNT_INLINE_XATTR_SIZE	0x00800000 | #define F2FS_MOUNT_INLINE_XATTR_SIZE	0x00800000 | ||||||
| #define F2FS_MOUNT_RESERVE_ROOT		0x01000000 | #define F2FS_MOUNT_RESERVE_ROOT		0x01000000 | ||||||
| #define F2FS_MOUNT_DISABLE_CHECKPOINT	0x02000000 | #define F2FS_MOUNT_DISABLE_CHECKPOINT	0x02000000 | ||||||
|  | #define F2FS_MOUNT_NORECOVERY		0x04000000 | ||||||
| 
 | 
 | ||||||
| #define F2FS_OPTION(sbi)	((sbi)->mount_opt) | #define F2FS_OPTION(sbi)	((sbi)->mount_opt) | ||||||
| #define clear_opt(sbi, option)	(F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) | #define clear_opt(sbi, option)	(F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) | ||||||
|  | @ -139,6 +136,8 @@ struct f2fs_mount_info { | ||||||
| 	int whint_mode; | 	int whint_mode; | ||||||
| 	int alloc_mode;			/* segment allocation policy */ | 	int alloc_mode;			/* segment allocation policy */ | ||||||
| 	int fsync_mode;			/* fsync policy */ | 	int fsync_mode;			/* fsync policy */ | ||||||
|  | 	int fs_mode;			/* fs mode: LFS or ADAPTIVE */ | ||||||
|  | 	int bggc_mode;			/* bggc mode: off, on or sync */ | ||||||
| 	bool test_dummy_encryption;	/* test dummy encryption */ | 	bool test_dummy_encryption;	/* test dummy encryption */ | ||||||
| 	block_t unusable_cap;		/* Amount of space allowed to be
 | 	block_t unusable_cap;		/* Amount of space allowed to be
 | ||||||
| 					 * unusable when disabling checkpoint | 					 * unusable when disabling checkpoint | ||||||
|  | @ -332,8 +331,8 @@ struct discard_policy { | ||||||
| 	bool io_aware;			/* issue discard in idle time */ | 	bool io_aware;			/* issue discard in idle time */ | ||||||
| 	bool sync;			/* submit discard with REQ_SYNC flag */ | 	bool sync;			/* submit discard with REQ_SYNC flag */ | ||||||
| 	bool ordered;			/* issue discard by lba order */ | 	bool ordered;			/* issue discard by lba order */ | ||||||
|  | 	bool timeout;			/* discard timeout for put_super */ | ||||||
| 	unsigned int granularity;	/* discard granularity */ | 	unsigned int granularity;	/* discard granularity */ | ||||||
| 	int timeout;			/* discard timeout for put_super */ |  | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| struct discard_cmd_control { | struct discard_cmd_control { | ||||||
|  | @ -428,6 +427,7 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal, | ||||||
| #define F2FS_IOC_GET_PIN_FILE		_IOR(F2FS_IOCTL_MAGIC, 14, __u32) | #define F2FS_IOC_GET_PIN_FILE		_IOR(F2FS_IOCTL_MAGIC, 14, __u32) | ||||||
| #define F2FS_IOC_PRECACHE_EXTENTS	_IO(F2FS_IOCTL_MAGIC, 15) | #define F2FS_IOC_PRECACHE_EXTENTS	_IO(F2FS_IOCTL_MAGIC, 15) | ||||||
| #define F2FS_IOC_RESIZE_FS		_IOW(F2FS_IOCTL_MAGIC, 16, __u64) | #define F2FS_IOC_RESIZE_FS		_IOW(F2FS_IOCTL_MAGIC, 16, __u64) | ||||||
|  | #define F2FS_IOC_GET_COMPRESS_BLOCKS	_IOR(F2FS_IOCTL_MAGIC, 17, __u64) | ||||||
| 
 | 
 | ||||||
| #define F2FS_IOC_GET_VOLUME_NAME	FS_IOC_GETFSLABEL | #define F2FS_IOC_GET_VOLUME_NAME	FS_IOC_GETFSLABEL | ||||||
| #define F2FS_IOC_SET_VOLUME_NAME	FS_IOC_SETFSLABEL | #define F2FS_IOC_SET_VOLUME_NAME	FS_IOC_SETFSLABEL | ||||||
|  | @ -560,6 +560,9 @@ enum { | ||||||
| 
 | 
 | ||||||
| #define DEFAULT_RETRY_IO_COUNT	8	/* maximum retry read IO count */ | #define DEFAULT_RETRY_IO_COUNT	8	/* maximum retry read IO count */ | ||||||
| 
 | 
 | ||||||
|  | /* congestion wait timeout value, default: 20ms */ | ||||||
|  | #define	DEFAULT_IO_TIMEOUT	(msecs_to_jiffies(20)) | ||||||
|  | 
 | ||||||
| /* maximum retry quota flush count */ | /* maximum retry quota flush count */ | ||||||
| #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT		8 | #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT		8 | ||||||
| 
 | 
 | ||||||
|  | @ -676,6 +679,44 @@ enum { | ||||||
| 	MAX_GC_FAILURE | 	MAX_GC_FAILURE | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | /* used for f2fs_inode_info->flags */ | ||||||
|  | enum { | ||||||
|  | 	FI_NEW_INODE,		/* indicate newly allocated inode */ | ||||||
|  | 	FI_DIRTY_INODE,		/* indicate inode is dirty or not */ | ||||||
|  | 	FI_AUTO_RECOVER,	/* indicate inode is recoverable */ | ||||||
|  | 	FI_DIRTY_DIR,		/* indicate directory has dirty pages */ | ||||||
|  | 	FI_INC_LINK,		/* need to increment i_nlink */ | ||||||
|  | 	FI_ACL_MODE,		/* indicate acl mode */ | ||||||
|  | 	FI_NO_ALLOC,		/* should not allocate any blocks */ | ||||||
|  | 	FI_FREE_NID,		/* free allocated nide */ | ||||||
|  | 	FI_NO_EXTENT,		/* not to use the extent cache */ | ||||||
|  | 	FI_INLINE_XATTR,	/* used for inline xattr */ | ||||||
|  | 	FI_INLINE_DATA,		/* used for inline data*/ | ||||||
|  | 	FI_INLINE_DENTRY,	/* used for inline dentry */ | ||||||
|  | 	FI_APPEND_WRITE,	/* inode has appended data */ | ||||||
|  | 	FI_UPDATE_WRITE,	/* inode has in-place-update data */ | ||||||
|  | 	FI_NEED_IPU,		/* used for ipu per file */ | ||||||
|  | 	FI_ATOMIC_FILE,		/* indicate atomic file */ | ||||||
|  | 	FI_ATOMIC_COMMIT,	/* indicate the state of atomical committing */ | ||||||
|  | 	FI_VOLATILE_FILE,	/* indicate volatile file */ | ||||||
|  | 	FI_FIRST_BLOCK_WRITTEN,	/* indicate #0 data block was written */ | ||||||
|  | 	FI_DROP_CACHE,		/* drop dirty page cache */ | ||||||
|  | 	FI_DATA_EXIST,		/* indicate data exists */ | ||||||
|  | 	FI_INLINE_DOTS,		/* indicate inline dot dentries */ | ||||||
|  | 	FI_DO_DEFRAG,		/* indicate defragment is running */ | ||||||
|  | 	FI_DIRTY_FILE,		/* indicate regular/symlink has dirty pages */ | ||||||
|  | 	FI_NO_PREALLOC,		/* indicate skipped preallocated blocks */ | ||||||
|  | 	FI_HOT_DATA,		/* indicate file is hot */ | ||||||
|  | 	FI_EXTRA_ATTR,		/* indicate file has extra attribute */ | ||||||
|  | 	FI_PROJ_INHERIT,	/* indicate file inherits projectid */ | ||||||
|  | 	FI_PIN_FILE,		/* indicate file should not be gced */ | ||||||
|  | 	FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */ | ||||||
|  | 	FI_VERITY_IN_PROGRESS,	/* building fs-verity Merkle tree */ | ||||||
|  | 	FI_COMPRESSED_FILE,	/* indicate file's data can be compressed */ | ||||||
|  | 	FI_MMAP_FILE,		/* indicate file was mmapped */ | ||||||
|  | 	FI_MAX,			/* max flag, never be used */ | ||||||
|  | }; | ||||||
|  | 
 | ||||||
| struct f2fs_inode_info { | struct f2fs_inode_info { | ||||||
| 	struct inode vfs_inode;		/* serve a vfs inode */ | 	struct inode vfs_inode;		/* serve a vfs inode */ | ||||||
| 	unsigned long i_flags;		/* keep an inode flags for ioctl */ | 	unsigned long i_flags;		/* keep an inode flags for ioctl */ | ||||||
|  | @ -688,7 +729,7 @@ struct f2fs_inode_info { | ||||||
| 	umode_t i_acl_mode;		/* keep file acl mode temporarily */ | 	umode_t i_acl_mode;		/* keep file acl mode temporarily */ | ||||||
| 
 | 
 | ||||||
| 	/* Use below internally in f2fs*/ | 	/* Use below internally in f2fs*/ | ||||||
| 	unsigned long flags;		/* use to pass per-file flags */ | 	unsigned long flags[BITS_TO_LONGS(FI_MAX)];	/* use to pass per-file flags */ | ||||||
| 	struct rw_semaphore i_sem;	/* protect fi info */ | 	struct rw_semaphore i_sem;	/* protect fi info */ | ||||||
| 	atomic_t dirty_pages;		/* # of dirty pages */ | 	atomic_t dirty_pages;		/* # of dirty pages */ | ||||||
| 	f2fs_hash_t chash;		/* hash value of given file name */ | 	f2fs_hash_t chash;		/* hash value of given file name */ | ||||||
|  | @ -697,6 +738,7 @@ struct f2fs_inode_info { | ||||||
| 	struct task_struct *cp_task;	/* separate cp/wb IO stats*/ | 	struct task_struct *cp_task;	/* separate cp/wb IO stats*/ | ||||||
| 	nid_t i_xattr_nid;		/* node id that contains xattrs */ | 	nid_t i_xattr_nid;		/* node id that contains xattrs */ | ||||||
| 	loff_t	last_disk_size;		/* lastly written file size */ | 	loff_t	last_disk_size;		/* lastly written file size */ | ||||||
|  | 	spinlock_t i_size_lock;		/* protect last_disk_size */ | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_QUOTA | #ifdef CONFIG_QUOTA | ||||||
| 	struct dquot *i_dquot[MAXQUOTAS]; | 	struct dquot *i_dquot[MAXQUOTAS]; | ||||||
|  | @ -1172,6 +1214,20 @@ enum { | ||||||
| 	GC_URGENT, | 	GC_URGENT, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | enum { | ||||||
|  | 	BGGC_MODE_ON,		/* background gc is on */ | ||||||
|  | 	BGGC_MODE_OFF,		/* background gc is off */ | ||||||
|  | 	BGGC_MODE_SYNC,		/*
 | ||||||
|  | 				 * background gc is on, migrating blocks | ||||||
|  | 				 * like foreground gc | ||||||
|  | 				 */ | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | enum { | ||||||
|  | 	FS_MODE_ADAPTIVE,	/* use both lfs/ssr allocation */ | ||||||
|  | 	FS_MODE_LFS,		/* use lfs allocation only */ | ||||||
|  | }; | ||||||
|  | 
 | ||||||
| enum { | enum { | ||||||
| 	WHINT_MODE_OFF,		/* not pass down write hints */ | 	WHINT_MODE_OFF,		/* not pass down write hints */ | ||||||
| 	WHINT_MODE_USER,	/* try to pass down hints given by users */ | 	WHINT_MODE_USER,	/* try to pass down hints given by users */ | ||||||
|  | @ -1212,13 +1268,13 @@ enum fsync_mode { | ||||||
| enum compress_algorithm_type { | enum compress_algorithm_type { | ||||||
| 	COMPRESS_LZO, | 	COMPRESS_LZO, | ||||||
| 	COMPRESS_LZ4, | 	COMPRESS_LZ4, | ||||||
|  | 	COMPRESS_ZSTD, | ||||||
| 	COMPRESS_MAX, | 	COMPRESS_MAX, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| #define COMPRESS_DATA_RESERVED_SIZE		4 | #define COMPRESS_DATA_RESERVED_SIZE		5 | ||||||
| struct compress_data { | struct compress_data { | ||||||
| 	__le32 clen;			/* compressed data size */ | 	__le32 clen;			/* compressed data size */ | ||||||
| 	__le32 chksum;			/* checksum of compressed data */ |  | ||||||
| 	__le32 reserved[COMPRESS_DATA_RESERVED_SIZE];	/* reserved */ | 	__le32 reserved[COMPRESS_DATA_RESERVED_SIZE];	/* reserved */ | ||||||
| 	u8 cdata[];			/* compressed data */ | 	u8 cdata[];			/* compressed data */ | ||||||
| }; | }; | ||||||
|  | @ -1242,6 +1298,7 @@ struct compress_ctx { | ||||||
| 	size_t rlen;			/* valid data length in rbuf */ | 	size_t rlen;			/* valid data length in rbuf */ | ||||||
| 	size_t clen;			/* valid data length in cbuf */ | 	size_t clen;			/* valid data length in cbuf */ | ||||||
| 	void *private;			/* payload buffer for specified compression algorithm */ | 	void *private;			/* payload buffer for specified compression algorithm */ | ||||||
|  | 	void *private2;			/* extra payload buffer */ | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| /* compress context for write IO path */ | /* compress context for write IO path */ | ||||||
|  | @ -1271,11 +1328,14 @@ struct decompress_io_ctx { | ||||||
| 	size_t clen;			/* valid data length in cbuf */ | 	size_t clen;			/* valid data length in cbuf */ | ||||||
| 	refcount_t ref;			/* referrence count of compressed page */ | 	refcount_t ref;			/* referrence count of compressed page */ | ||||||
| 	bool failed;			/* indicate IO error during decompression */ | 	bool failed;			/* indicate IO error during decompression */ | ||||||
|  | 	void *private;			/* payload buffer for specified decompression algorithm */ | ||||||
|  | 	void *private2;			/* extra payload buffer */ | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| #define NULL_CLUSTER			((unsigned int)(~0)) | #define NULL_CLUSTER			((unsigned int)(~0)) | ||||||
| #define MIN_COMPRESS_LOG_SIZE		2 | #define MIN_COMPRESS_LOG_SIZE		2 | ||||||
| #define MAX_COMPRESS_LOG_SIZE		8 | #define MAX_COMPRESS_LOG_SIZE		8 | ||||||
|  | #define MAX_COMPRESS_WINDOW_SIZE	((PAGE_SIZE) << MAX_COMPRESS_LOG_SIZE) | ||||||
| 
 | 
 | ||||||
| struct f2fs_sb_info { | struct f2fs_sb_info { | ||||||
| 	struct super_block *sb;			/* pointer to VFS super block */ | 	struct super_block *sb;			/* pointer to VFS super block */ | ||||||
|  | @ -1471,6 +1531,9 @@ struct f2fs_sb_info { | ||||||
| 	__u32 s_chksum_seed; | 	__u32 s_chksum_seed; | ||||||
| 
 | 
 | ||||||
| 	struct workqueue_struct *post_read_wq;	/* post read workqueue */ | 	struct workqueue_struct *post_read_wq;	/* post read workqueue */ | ||||||
|  | 
 | ||||||
|  | 	struct kmem_cache *inline_xattr_slab;	/* inline xattr entry */ | ||||||
|  | 	unsigned int inline_xattr_slab_size;	/* default inline xattr slab size */ | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| struct f2fs_private_dio { | struct f2fs_private_dio { | ||||||
|  | @ -2211,7 +2274,7 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, | ||||||
| 		dquot_free_inode(inode); | 		dquot_free_inode(inode); | ||||||
| 	} else { | 	} else { | ||||||
| 		if (unlikely(inode->i_blocks == 0)) { | 		if (unlikely(inode->i_blocks == 0)) { | ||||||
| 			f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu", | 			f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu", | ||||||
| 				  inode->i_ino, | 				  inode->i_ino, | ||||||
| 				  (unsigned long long)inode->i_blocks); | 				  (unsigned long long)inode->i_blocks); | ||||||
| 			set_sbi_flag(sbi, SBI_NEED_FSCK); | 			set_sbi_flag(sbi, SBI_NEED_FSCK); | ||||||
|  | @ -2379,7 +2442,7 @@ static inline __le32 *blkaddr_in_node(struct f2fs_node *node) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline int f2fs_has_extra_attr(struct inode *inode); | static inline int f2fs_has_extra_attr(struct inode *inode); | ||||||
| static inline block_t datablock_addr(struct inode *inode, | static inline block_t data_blkaddr(struct inode *inode, | ||||||
| 			struct page *node_page, unsigned int offset) | 			struct page *node_page, unsigned int offset) | ||||||
| { | { | ||||||
| 	struct f2fs_node *raw_node; | 	struct f2fs_node *raw_node; | ||||||
|  | @ -2389,9 +2452,9 @@ static inline block_t datablock_addr(struct inode *inode, | ||||||
| 
 | 
 | ||||||
| 	raw_node = F2FS_NODE(node_page); | 	raw_node = F2FS_NODE(node_page); | ||||||
| 
 | 
 | ||||||
| 	/* from GC path only */ |  | ||||||
| 	if (is_inode) { | 	if (is_inode) { | ||||||
| 		if (!inode) | 		if (!inode) | ||||||
|  | 			/* from GC path only */ | ||||||
| 			base = offset_in_addr(&raw_node->i); | 			base = offset_in_addr(&raw_node->i); | ||||||
| 		else if (f2fs_has_extra_attr(inode)) | 		else if (f2fs_has_extra_attr(inode)) | ||||||
| 			base = get_extra_isize(inode); | 			base = get_extra_isize(inode); | ||||||
|  | @ -2401,6 +2464,11 @@ static inline block_t datablock_addr(struct inode *inode, | ||||||
| 	return le32_to_cpu(addr_array[base + offset]); | 	return le32_to_cpu(addr_array[base + offset]); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn) | ||||||
|  | { | ||||||
|  | 	return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| static inline int f2fs_test_bit(unsigned int nr, char *addr) | static inline int f2fs_test_bit(unsigned int nr, char *addr) | ||||||
| { | { | ||||||
| 	int mask; | 	int mask; | ||||||
|  | @ -2498,43 +2566,6 @@ static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) | ||||||
| 		return flags & F2FS_OTHER_FLMASK; | 		return flags & F2FS_OTHER_FLMASK; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /* used for f2fs_inode_info->flags */ |  | ||||||
| enum { |  | ||||||
| 	FI_NEW_INODE,		/* indicate newly allocated inode */ |  | ||||||
| 	FI_DIRTY_INODE,		/* indicate inode is dirty or not */ |  | ||||||
| 	FI_AUTO_RECOVER,	/* indicate inode is recoverable */ |  | ||||||
| 	FI_DIRTY_DIR,		/* indicate directory has dirty pages */ |  | ||||||
| 	FI_INC_LINK,		/* need to increment i_nlink */ |  | ||||||
| 	FI_ACL_MODE,		/* indicate acl mode */ |  | ||||||
| 	FI_NO_ALLOC,		/* should not allocate any blocks */ |  | ||||||
| 	FI_FREE_NID,		/* free allocated nide */ |  | ||||||
| 	FI_NO_EXTENT,		/* not to use the extent cache */ |  | ||||||
| 	FI_INLINE_XATTR,	/* used for inline xattr */ |  | ||||||
| 	FI_INLINE_DATA,		/* used for inline data*/ |  | ||||||
| 	FI_INLINE_DENTRY,	/* used for inline dentry */ |  | ||||||
| 	FI_APPEND_WRITE,	/* inode has appended data */ |  | ||||||
| 	FI_UPDATE_WRITE,	/* inode has in-place-update data */ |  | ||||||
| 	FI_NEED_IPU,		/* used for ipu per file */ |  | ||||||
| 	FI_ATOMIC_FILE,		/* indicate atomic file */ |  | ||||||
| 	FI_ATOMIC_COMMIT,	/* indicate the state of atomical committing */ |  | ||||||
| 	FI_VOLATILE_FILE,	/* indicate volatile file */ |  | ||||||
| 	FI_FIRST_BLOCK_WRITTEN,	/* indicate #0 data block was written */ |  | ||||||
| 	FI_DROP_CACHE,		/* drop dirty page cache */ |  | ||||||
| 	FI_DATA_EXIST,		/* indicate data exists */ |  | ||||||
| 	FI_INLINE_DOTS,		/* indicate inline dot dentries */ |  | ||||||
| 	FI_DO_DEFRAG,		/* indicate defragment is running */ |  | ||||||
| 	FI_DIRTY_FILE,		/* indicate regular/symlink has dirty pages */ |  | ||||||
| 	FI_NO_PREALLOC,		/* indicate skipped preallocated blocks */ |  | ||||||
| 	FI_HOT_DATA,		/* indicate file is hot */ |  | ||||||
| 	FI_EXTRA_ATTR,		/* indicate file has extra attribute */ |  | ||||||
| 	FI_PROJ_INHERIT,	/* indicate file inherits projectid */ |  | ||||||
| 	FI_PIN_FILE,		/* indicate file should not be gced */ |  | ||||||
| 	FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */ |  | ||||||
| 	FI_VERITY_IN_PROGRESS,	/* building fs-verity Merkle tree */ |  | ||||||
| 	FI_COMPRESSED_FILE,	/* indicate file's data can be compressed */ |  | ||||||
| 	FI_MMAP_FILE,		/* indicate file was mmapped */ |  | ||||||
| }; |  | ||||||
| 
 |  | ||||||
| static inline void __mark_inode_dirty_flag(struct inode *inode, | static inline void __mark_inode_dirty_flag(struct inode *inode, | ||||||
| 						int flag, bool set) | 						int flag, bool set) | ||||||
| { | { | ||||||
|  | @ -2549,27 +2580,24 @@ static inline void __mark_inode_dirty_flag(struct inode *inode, | ||||||
| 	case FI_DATA_EXIST: | 	case FI_DATA_EXIST: | ||||||
| 	case FI_INLINE_DOTS: | 	case FI_INLINE_DOTS: | ||||||
| 	case FI_PIN_FILE: | 	case FI_PIN_FILE: | ||||||
| 	case FI_COMPRESSED_FILE: |  | ||||||
| 		f2fs_mark_inode_dirty_sync(inode, true); | 		f2fs_mark_inode_dirty_sync(inode, true); | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void set_inode_flag(struct inode *inode, int flag) | static inline void set_inode_flag(struct inode *inode, int flag) | ||||||
| { | { | ||||||
| 	if (!test_bit(flag, &F2FS_I(inode)->flags)) | 	test_and_set_bit(flag, F2FS_I(inode)->flags); | ||||||
| 		set_bit(flag, &F2FS_I(inode)->flags); |  | ||||||
| 	__mark_inode_dirty_flag(inode, flag, true); | 	__mark_inode_dirty_flag(inode, flag, true); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline int is_inode_flag_set(struct inode *inode, int flag) | static inline int is_inode_flag_set(struct inode *inode, int flag) | ||||||
| { | { | ||||||
| 	return test_bit(flag, &F2FS_I(inode)->flags); | 	return test_bit(flag, F2FS_I(inode)->flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void clear_inode_flag(struct inode *inode, int flag) | static inline void clear_inode_flag(struct inode *inode, int flag) | ||||||
| { | { | ||||||
| 	if (test_bit(flag, &F2FS_I(inode)->flags)) | 	test_and_clear_bit(flag, F2FS_I(inode)->flags); | ||||||
| 		clear_bit(flag, &F2FS_I(inode)->flags); |  | ||||||
| 	__mark_inode_dirty_flag(inode, flag, false); | 	__mark_inode_dirty_flag(inode, flag, false); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -2660,19 +2688,19 @@ static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) | ||||||
| 	struct f2fs_inode_info *fi = F2FS_I(inode); | 	struct f2fs_inode_info *fi = F2FS_I(inode); | ||||||
| 
 | 
 | ||||||
| 	if (ri->i_inline & F2FS_INLINE_XATTR) | 	if (ri->i_inline & F2FS_INLINE_XATTR) | ||||||
| 		set_bit(FI_INLINE_XATTR, &fi->flags); | 		set_bit(FI_INLINE_XATTR, fi->flags); | ||||||
| 	if (ri->i_inline & F2FS_INLINE_DATA) | 	if (ri->i_inline & F2FS_INLINE_DATA) | ||||||
| 		set_bit(FI_INLINE_DATA, &fi->flags); | 		set_bit(FI_INLINE_DATA, fi->flags); | ||||||
| 	if (ri->i_inline & F2FS_INLINE_DENTRY) | 	if (ri->i_inline & F2FS_INLINE_DENTRY) | ||||||
| 		set_bit(FI_INLINE_DENTRY, &fi->flags); | 		set_bit(FI_INLINE_DENTRY, fi->flags); | ||||||
| 	if (ri->i_inline & F2FS_DATA_EXIST) | 	if (ri->i_inline & F2FS_DATA_EXIST) | ||||||
| 		set_bit(FI_DATA_EXIST, &fi->flags); | 		set_bit(FI_DATA_EXIST, fi->flags); | ||||||
| 	if (ri->i_inline & F2FS_INLINE_DOTS) | 	if (ri->i_inline & F2FS_INLINE_DOTS) | ||||||
| 		set_bit(FI_INLINE_DOTS, &fi->flags); | 		set_bit(FI_INLINE_DOTS, fi->flags); | ||||||
| 	if (ri->i_inline & F2FS_EXTRA_ATTR) | 	if (ri->i_inline & F2FS_EXTRA_ATTR) | ||||||
| 		set_bit(FI_EXTRA_ATTR, &fi->flags); | 		set_bit(FI_EXTRA_ATTR, fi->flags); | ||||||
| 	if (ri->i_inline & F2FS_PIN_FILE) | 	if (ri->i_inline & F2FS_PIN_FILE) | ||||||
| 		set_bit(FI_PIN_FILE, &fi->flags); | 		set_bit(FI_PIN_FILE, fi->flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) | static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) | ||||||
|  | @ -2857,9 +2885,9 @@ static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) | ||||||
| 	if (!f2fs_is_time_consistent(inode)) | 	if (!f2fs_is_time_consistent(inode)) | ||||||
| 		return false; | 		return false; | ||||||
| 
 | 
 | ||||||
| 	down_read(&F2FS_I(inode)->i_sem); | 	spin_lock(&F2FS_I(inode)->i_size_lock); | ||||||
| 	ret = F2FS_I(inode)->last_disk_size == i_size_read(inode); | 	ret = F2FS_I(inode)->last_disk_size == i_size_read(inode); | ||||||
| 	up_read(&F2FS_I(inode)->i_sem); | 	spin_unlock(&F2FS_I(inode)->i_size_lock); | ||||||
| 
 | 
 | ||||||
| 	return ret; | 	return ret; | ||||||
| } | } | ||||||
|  | @ -3213,7 +3241,7 @@ void f2fs_drop_inmem_pages(struct inode *inode); | ||||||
| void f2fs_drop_inmem_page(struct inode *inode, struct page *page); | void f2fs_drop_inmem_page(struct inode *inode, struct page *page); | ||||||
| int f2fs_commit_inmem_pages(struct inode *inode); | int f2fs_commit_inmem_pages(struct inode *inode); | ||||||
| void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); | void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); | ||||||
| void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi); | void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg); | ||||||
| int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino); | int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino); | ||||||
| int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi); | int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi); | ||||||
| int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); | int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); | ||||||
|  | @ -3309,7 +3337,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); | ||||||
| void f2fs_update_dirty_page(struct inode *inode, struct page *page); | void f2fs_update_dirty_page(struct inode *inode, struct page *page); | ||||||
| void f2fs_remove_dirty_inode(struct inode *inode); | void f2fs_remove_dirty_inode(struct inode *inode); | ||||||
| int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type); | int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type); | ||||||
| void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi); | void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); | ||||||
| int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); | int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); | ||||||
| void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); | void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); | ||||||
| int __init f2fs_create_checkpoint_caches(void); | int __init f2fs_create_checkpoint_caches(void); | ||||||
|  | @ -3320,7 +3348,7 @@ void f2fs_destroy_checkpoint_caches(void); | ||||||
|  */ |  */ | ||||||
| int __init f2fs_init_bioset(void); | int __init f2fs_init_bioset(void); | ||||||
| void f2fs_destroy_bioset(void); | void f2fs_destroy_bioset(void); | ||||||
| struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool no_fail); | struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio); | ||||||
| int f2fs_init_bio_entry_cache(void); | int f2fs_init_bio_entry_cache(void); | ||||||
| void f2fs_destroy_bio_entry_cache(void); | void f2fs_destroy_bio_entry_cache(void); | ||||||
| void f2fs_submit_bio(struct f2fs_sb_info *sbi, | void f2fs_submit_bio(struct f2fs_sb_info *sbi, | ||||||
|  | @ -3776,7 +3804,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc, | ||||||
| int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); | int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); | ||||||
| int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, | int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, | ||||||
| 				unsigned nr_pages, sector_t *last_block_in_bio, | 				unsigned nr_pages, sector_t *last_block_in_bio, | ||||||
| 				bool is_readahead); | 				bool is_readahead, bool for_write); | ||||||
| struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); | struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); | ||||||
| void f2fs_free_dic(struct decompress_io_ctx *dic); | void f2fs_free_dic(struct decompress_io_ctx *dic); | ||||||
| void f2fs_decompress_end_io(struct page **rpages, | void f2fs_decompress_end_io(struct page **rpages, | ||||||
|  | @ -3813,6 +3841,7 @@ static inline void set_compress_context(struct inode *inode) | ||||||
| 	F2FS_I(inode)->i_flags |= F2FS_COMPR_FL; | 	F2FS_I(inode)->i_flags |= F2FS_COMPR_FL; | ||||||
| 	set_inode_flag(inode, FI_COMPRESSED_FILE); | 	set_inode_flag(inode, FI_COMPRESSED_FILE); | ||||||
| 	stat_inc_compr_inode(inode); | 	stat_inc_compr_inode(inode); | ||||||
|  | 	f2fs_mark_inode_dirty_sync(inode, true); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline u64 f2fs_disable_compressed_file(struct inode *inode) | static inline u64 f2fs_disable_compressed_file(struct inode *inode) | ||||||
|  | @ -3821,12 +3850,17 @@ static inline u64 f2fs_disable_compressed_file(struct inode *inode) | ||||||
| 
 | 
 | ||||||
| 	if (!f2fs_compressed_file(inode)) | 	if (!f2fs_compressed_file(inode)) | ||||||
| 		return 0; | 		return 0; | ||||||
| 	if (fi->i_compr_blocks) | 	if (S_ISREG(inode->i_mode)) { | ||||||
| 		return fi->i_compr_blocks; | 		if (get_dirty_pages(inode)) | ||||||
|  | 			return 1; | ||||||
|  | 		if (fi->i_compr_blocks) | ||||||
|  | 			return fi->i_compr_blocks; | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	fi->i_flags &= ~F2FS_COMPR_FL; | 	fi->i_flags &= ~F2FS_COMPR_FL; | ||||||
| 	clear_inode_flag(inode, FI_COMPRESSED_FILE); |  | ||||||
| 	stat_dec_compr_inode(inode); | 	stat_dec_compr_inode(inode); | ||||||
|  | 	clear_inode_flag(inode, FI_COMPRESSED_FILE); | ||||||
|  | 	f2fs_mark_inode_dirty_sync(inode, true); | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -3903,31 +3937,25 @@ static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi) | ||||||
| 	return false; | 	return false; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| 
 | static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi) | ||||||
| static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt) |  | ||||||
| { | { | ||||||
| 	clear_opt(sbi, ADAPTIVE); | 	return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; | ||||||
| 	clear_opt(sbi, LFS); |  | ||||||
| 
 |  | ||||||
| 	switch (mt) { |  | ||||||
| 	case F2FS_MOUNT_ADAPTIVE: |  | ||||||
| 		set_opt(sbi, ADAPTIVE); |  | ||||||
| 		break; |  | ||||||
| 	case F2FS_MOUNT_LFS: |  | ||||||
| 		set_opt(sbi, LFS); |  | ||||||
| 		break; |  | ||||||
| 	} |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline bool f2fs_may_encrypt(struct inode *inode) | static inline bool f2fs_may_encrypt(struct inode *dir, struct inode *inode) | ||||||
| { | { | ||||||
| #ifdef CONFIG_FS_ENCRYPTION | #ifdef CONFIG_FS_ENCRYPTION | ||||||
|  | 	struct f2fs_sb_info *sbi = F2FS_I_SB(dir); | ||||||
| 	umode_t mode = inode->i_mode; | 	umode_t mode = inode->i_mode; | ||||||
| 
 | 
 | ||||||
| 	return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)); | 	/*
 | ||||||
| #else | 	 * If the directory encrypted or dummy encryption enabled, | ||||||
| 	return false; | 	 * then we should encrypt the inode. | ||||||
|  | 	 */ | ||||||
|  | 	if (IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) | ||||||
|  | 		return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)); | ||||||
| #endif | #endif | ||||||
|  | 	return false; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline bool f2fs_may_compress(struct inode *inode) | static inline bool f2fs_may_compress(struct inode *inode) | ||||||
|  | @ -3971,7 +3999,7 @@ static inline int allow_outplace_dio(struct inode *inode, | ||||||
| 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | ||||||
| 	int rw = iov_iter_rw(iter); | 	int rw = iov_iter_rw(iter); | ||||||
| 
 | 
 | ||||||
| 	return (test_opt(sbi, LFS) && (rw == WRITE) && | 	return (f2fs_lfs_mode(sbi) && (rw == WRITE) && | ||||||
| 				!block_unaligned_IO(inode, iocb, iter)); | 				!block_unaligned_IO(inode, iocb, iter)); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -3993,7 +4021,7 @@ static inline bool f2fs_force_buffered_io(struct inode *inode, | ||||||
| 	 */ | 	 */ | ||||||
| 	if (f2fs_sb_has_blkzoned(sbi)) | 	if (f2fs_sb_has_blkzoned(sbi)) | ||||||
| 		return true; | 		return true; | ||||||
| 	if (test_opt(sbi, LFS) && (rw == WRITE)) { | 	if (f2fs_lfs_mode(sbi) && (rw == WRITE)) { | ||||||
| 		if (block_unaligned_IO(inode, iocb, iter)) | 		if (block_unaligned_IO(inode, iocb, iter)) | ||||||
| 			return true; | 			return true; | ||||||
| 		if (F2FS_IO_ALIGNED(sbi)) | 		if (F2FS_IO_ALIGNED(sbi)) | ||||||
|  |  | ||||||
|  | @ -106,13 +106,20 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) | ||||||
| 		err = f2fs_get_block(&dn, page->index); | 		err = f2fs_get_block(&dn, page->index); | ||||||
| 		f2fs_put_dnode(&dn); | 		f2fs_put_dnode(&dn); | ||||||
| 		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false); | 		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false); | ||||||
| 		if (err) { |  | ||||||
| 			unlock_page(page); |  | ||||||
| 			goto out_sem; |  | ||||||
| 		} |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/* fill the page */ | #ifdef CONFIG_F2FS_FS_COMPRESSION | ||||||
|  | 	if (!need_alloc) { | ||||||
|  | 		set_new_dnode(&dn, inode, NULL, NULL, 0); | ||||||
|  | 		err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE); | ||||||
|  | 		f2fs_put_dnode(&dn); | ||||||
|  | 	} | ||||||
|  | #endif | ||||||
|  | 	if (err) { | ||||||
|  | 		unlock_page(page); | ||||||
|  | 		goto out_sem; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	f2fs_wait_on_page_writeback(page, DATA, false, true); | 	f2fs_wait_on_page_writeback(page, DATA, false, true); | ||||||
| 
 | 
 | ||||||
| 	/* wait for GCed page writeback via META_MAPPING */ | 	/* wait for GCed page writeback via META_MAPPING */ | ||||||
|  | @ -448,8 +455,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) | ||||||
| 				data_ofs = (loff_t)pgofs << PAGE_SHIFT) { | 				data_ofs = (loff_t)pgofs << PAGE_SHIFT) { | ||||||
| 			block_t blkaddr; | 			block_t blkaddr; | ||||||
| 
 | 
 | ||||||
| 			blkaddr = datablock_addr(dn.inode, | 			blkaddr = f2fs_data_blkaddr(&dn); | ||||||
| 					dn.node_page, dn.ofs_in_node); |  | ||||||
| 
 | 
 | ||||||
| 			if (__is_valid_data_blkaddr(blkaddr) && | 			if (__is_valid_data_blkaddr(blkaddr) && | ||||||
| 				!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), | 				!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), | ||||||
|  | @ -793,6 +799,8 @@ int f2fs_getattr(const struct path *path, struct kstat *stat, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	flags = fi->i_flags; | 	flags = fi->i_flags; | ||||||
|  | 	if (flags & F2FS_COMPR_FL) | ||||||
|  | 		stat->attributes |= STATX_ATTR_COMPRESSED; | ||||||
| 	if (flags & F2FS_APPEND_FL) | 	if (flags & F2FS_APPEND_FL) | ||||||
| 		stat->attributes |= STATX_ATTR_APPEND; | 		stat->attributes |= STATX_ATTR_APPEND; | ||||||
| 	if (IS_ENCRYPTED(inode)) | 	if (IS_ENCRYPTED(inode)) | ||||||
|  | @ -804,7 +812,8 @@ int f2fs_getattr(const struct path *path, struct kstat *stat, | ||||||
| 	if (IS_VERITY(inode)) | 	if (IS_VERITY(inode)) | ||||||
| 		stat->attributes |= STATX_ATTR_VERITY; | 		stat->attributes |= STATX_ATTR_VERITY; | ||||||
| 
 | 
 | ||||||
| 	stat->attributes_mask |= (STATX_ATTR_APPEND | | 	stat->attributes_mask |= (STATX_ATTR_COMPRESSED | | ||||||
|  | 				  STATX_ATTR_APPEND | | ||||||
| 				  STATX_ATTR_ENCRYPTED | | 				  STATX_ATTR_ENCRYPTED | | ||||||
| 				  STATX_ATTR_IMMUTABLE | | 				  STATX_ATTR_IMMUTABLE | | ||||||
| 				  STATX_ATTR_NODUMP | | 				  STATX_ATTR_NODUMP | | ||||||
|  | @ -929,10 +938,10 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr) | ||||||
| 		if (err) | 		if (err) | ||||||
| 			return err; | 			return err; | ||||||
| 
 | 
 | ||||||
| 		down_write(&F2FS_I(inode)->i_sem); | 		spin_lock(&F2FS_I(inode)->i_size_lock); | ||||||
| 		inode->i_mtime = inode->i_ctime = current_time(inode); | 		inode->i_mtime = inode->i_ctime = current_time(inode); | ||||||
| 		F2FS_I(inode)->last_disk_size = i_size_read(inode); | 		F2FS_I(inode)->last_disk_size = i_size_read(inode); | ||||||
| 		up_write(&F2FS_I(inode)->i_sem); | 		spin_unlock(&F2FS_I(inode)->i_size_lock); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	__setattr_copy(inode, attr); | 	__setattr_copy(inode, attr); | ||||||
|  | @ -1109,8 +1118,7 @@ next_dnode: | ||||||
| 	done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) - | 	done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) - | ||||||
| 							dn.ofs_in_node, len); | 							dn.ofs_in_node, len); | ||||||
| 	for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) { | 	for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) { | ||||||
| 		*blkaddr = datablock_addr(dn.inode, | 		*blkaddr = f2fs_data_blkaddr(&dn); | ||||||
| 					dn.node_page, dn.ofs_in_node); |  | ||||||
| 
 | 
 | ||||||
| 		if (__is_valid_data_blkaddr(*blkaddr) && | 		if (__is_valid_data_blkaddr(*blkaddr) && | ||||||
| 			!f2fs_is_valid_blkaddr(sbi, *blkaddr, | 			!f2fs_is_valid_blkaddr(sbi, *blkaddr, | ||||||
|  | @ -1121,7 +1129,7 @@ next_dnode: | ||||||
| 
 | 
 | ||||||
| 		if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) { | 		if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) { | ||||||
| 
 | 
 | ||||||
| 			if (test_opt(sbi, LFS)) { | 			if (f2fs_lfs_mode(sbi)) { | ||||||
| 				f2fs_put_dnode(&dn); | 				f2fs_put_dnode(&dn); | ||||||
| 				return -EOPNOTSUPP; | 				return -EOPNOTSUPP; | ||||||
| 			} | 			} | ||||||
|  | @ -1199,8 +1207,7 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode, | ||||||
| 				ADDRS_PER_PAGE(dn.node_page, dst_inode) - | 				ADDRS_PER_PAGE(dn.node_page, dst_inode) - | ||||||
| 						dn.ofs_in_node, len - i); | 						dn.ofs_in_node, len - i); | ||||||
| 			do { | 			do { | ||||||
| 				dn.data_blkaddr = datablock_addr(dn.inode, | 				dn.data_blkaddr = f2fs_data_blkaddr(&dn); | ||||||
| 						dn.node_page, dn.ofs_in_node); |  | ||||||
| 				f2fs_truncate_data_blocks_range(&dn, 1); | 				f2fs_truncate_data_blocks_range(&dn, 1); | ||||||
| 
 | 
 | ||||||
| 				if (do_replace[i]) { | 				if (do_replace[i]) { | ||||||
|  | @ -1376,8 +1383,7 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start, | ||||||
| 	int ret; | 	int ret; | ||||||
| 
 | 
 | ||||||
| 	for (; index < end; index++, dn->ofs_in_node++) { | 	for (; index < end; index++, dn->ofs_in_node++) { | ||||||
| 		if (datablock_addr(dn->inode, dn->node_page, | 		if (f2fs_data_blkaddr(dn) == NULL_ADDR) | ||||||
| 					dn->ofs_in_node) == NULL_ADDR) |  | ||||||
| 			count++; | 			count++; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -1388,8 +1394,7 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start, | ||||||
| 
 | 
 | ||||||
| 	dn->ofs_in_node = ofs_in_node; | 	dn->ofs_in_node = ofs_in_node; | ||||||
| 	for (index = start; index < end; index++, dn->ofs_in_node++) { | 	for (index = start; index < end; index++, dn->ofs_in_node++) { | ||||||
| 		dn->data_blkaddr = datablock_addr(dn->inode, | 		dn->data_blkaddr = f2fs_data_blkaddr(dn); | ||||||
| 					dn->node_page, dn->ofs_in_node); |  | ||||||
| 		/*
 | 		/*
 | ||||||
| 		 * f2fs_reserve_new_blocks will not guarantee entire block | 		 * f2fs_reserve_new_blocks will not guarantee entire block | ||||||
| 		 * allocation. | 		 * allocation. | ||||||
|  | @ -1787,12 +1792,15 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id) | ||||||
| static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask) | static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask) | ||||||
| { | { | ||||||
| 	struct f2fs_inode_info *fi = F2FS_I(inode); | 	struct f2fs_inode_info *fi = F2FS_I(inode); | ||||||
|  | 	u32 masked_flags = fi->i_flags & mask; | ||||||
|  | 
 | ||||||
|  | 	f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask)); | ||||||
| 
 | 
 | ||||||
| 	/* Is it quota file? Do not allow user to mess with it */ | 	/* Is it quota file? Do not allow user to mess with it */ | ||||||
| 	if (IS_NOQUOTA(inode)) | 	if (IS_NOQUOTA(inode)) | ||||||
| 		return -EPERM; | 		return -EPERM; | ||||||
| 
 | 
 | ||||||
| 	if ((iflags ^ fi->i_flags) & F2FS_CASEFOLD_FL) { | 	if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) { | ||||||
| 		if (!f2fs_sb_has_casefold(F2FS_I_SB(inode))) | 		if (!f2fs_sb_has_casefold(F2FS_I_SB(inode))) | ||||||
| 			return -EOPNOTSUPP; | 			return -EOPNOTSUPP; | ||||||
| 		if (!f2fs_empty_dir(inode)) | 		if (!f2fs_empty_dir(inode)) | ||||||
|  | @ -1806,27 +1814,22 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask) | ||||||
| 			return -EINVAL; | 			return -EINVAL; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if ((iflags ^ fi->i_flags) & F2FS_COMPR_FL) { | 	if ((iflags ^ masked_flags) & F2FS_COMPR_FL) { | ||||||
| 		if (S_ISREG(inode->i_mode) && | 		if (masked_flags & F2FS_COMPR_FL) { | ||||||
| 			(fi->i_flags & F2FS_COMPR_FL || i_size_read(inode) || | 			if (f2fs_disable_compressed_file(inode)) | ||||||
| 						F2FS_HAS_BLOCKS(inode))) | 				return -EINVAL; | ||||||
| 			return -EINVAL; | 		} | ||||||
| 		if (iflags & F2FS_NOCOMP_FL) | 		if (iflags & F2FS_NOCOMP_FL) | ||||||
| 			return -EINVAL; | 			return -EINVAL; | ||||||
| 		if (iflags & F2FS_COMPR_FL) { | 		if (iflags & F2FS_COMPR_FL) { | ||||||
| 			int err = f2fs_convert_inline_inode(inode); |  | ||||||
| 
 |  | ||||||
| 			if (err) |  | ||||||
| 				return err; |  | ||||||
| 
 |  | ||||||
| 			if (!f2fs_may_compress(inode)) | 			if (!f2fs_may_compress(inode)) | ||||||
| 				return -EINVAL; | 				return -EINVAL; | ||||||
| 
 | 
 | ||||||
| 			set_compress_context(inode); | 			set_compress_context(inode); | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	if ((iflags ^ fi->i_flags) & F2FS_NOCOMP_FL) { | 	if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) { | ||||||
| 		if (fi->i_flags & F2FS_COMPR_FL) | 		if (masked_flags & F2FS_COMPR_FL) | ||||||
| 			return -EINVAL; | 			return -EINVAL; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -3401,6 +3404,21 @@ out: | ||||||
| 	return err; | 	return err; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg) | ||||||
|  | { | ||||||
|  | 	struct inode *inode = file_inode(filp); | ||||||
|  | 	__u64 blocks; | ||||||
|  | 
 | ||||||
|  | 	if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) | ||||||
|  | 		return -EOPNOTSUPP; | ||||||
|  | 
 | ||||||
|  | 	if (!f2fs_compressed_file(inode)) | ||||||
|  | 		return -EINVAL; | ||||||
|  | 
 | ||||||
|  | 	blocks = F2FS_I(inode)->i_compr_blocks; | ||||||
|  | 	return put_user(blocks, (u64 __user *)arg); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||||||
| { | { | ||||||
| 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) | 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) | ||||||
|  | @ -3481,6 +3499,8 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||||||
| 		return f2fs_get_volume_name(filp, arg); | 		return f2fs_get_volume_name(filp, arg); | ||||||
| 	case F2FS_IOC_SET_VOLUME_NAME: | 	case F2FS_IOC_SET_VOLUME_NAME: | ||||||
| 		return f2fs_set_volume_name(filp, arg); | 		return f2fs_set_volume_name(filp, arg); | ||||||
|  | 	case F2FS_IOC_GET_COMPRESS_BLOCKS: | ||||||
|  | 		return f2fs_get_compress_blocks(filp, arg); | ||||||
| 	default: | 	default: | ||||||
| 		return -ENOTTY; | 		return -ENOTTY; | ||||||
| 	} | 	} | ||||||
|  | @ -3508,8 +3528,10 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | ||||||
| 		goto out; | 		goto out; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (!f2fs_is_compress_backend_ready(inode)) | 	if (!f2fs_is_compress_backend_ready(inode)) { | ||||||
| 		return -EOPNOTSUPP; | 		ret = -EOPNOTSUPP; | ||||||
|  | 		goto out; | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	if (iocb->ki_flags & IOCB_NOWAIT) { | 	if (iocb->ki_flags & IOCB_NOWAIT) { | ||||||
| 		if (!inode_trylock(inode)) { | 		if (!inode_trylock(inode)) { | ||||||
|  | @ -3639,6 +3661,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||||||
| 	case FS_IOC_MEASURE_VERITY: | 	case FS_IOC_MEASURE_VERITY: | ||||||
| 	case F2FS_IOC_GET_VOLUME_NAME: | 	case F2FS_IOC_GET_VOLUME_NAME: | ||||||
| 	case F2FS_IOC_SET_VOLUME_NAME: | 	case F2FS_IOC_SET_VOLUME_NAME: | ||||||
|  | 	case F2FS_IOC_GET_COMPRESS_BLOCKS: | ||||||
| 		break; | 		break; | ||||||
| 	default: | 	default: | ||||||
| 		return -ENOIOCTLCMD; | 		return -ENOIOCTLCMD; | ||||||
|  |  | ||||||
							
								
								
									
										51
									
								
								fs/f2fs/gc.c
									
										
									
									
									
								
							
							
						
						
									
										51
									
								
								fs/f2fs/gc.c
									
										
									
									
									
								
							|  | @ -31,6 +31,8 @@ static int gc_thread_func(void *data) | ||||||
| 
 | 
 | ||||||
| 	set_freezable(); | 	set_freezable(); | ||||||
| 	do { | 	do { | ||||||
|  | 		bool sync_mode; | ||||||
|  | 
 | ||||||
| 		wait_event_interruptible_timeout(*wq, | 		wait_event_interruptible_timeout(*wq, | ||||||
| 				kthread_should_stop() || freezing(current) || | 				kthread_should_stop() || freezing(current) || | ||||||
| 				gc_th->gc_wake, | 				gc_th->gc_wake, | ||||||
|  | @ -101,15 +103,17 @@ static int gc_thread_func(void *data) | ||||||
| do_gc: | do_gc: | ||||||
| 		stat_inc_bggc_count(sbi->stat_info); | 		stat_inc_bggc_count(sbi->stat_info); | ||||||
| 
 | 
 | ||||||
|  | 		sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC; | ||||||
|  | 
 | ||||||
| 		/* if return value is not zero, no victim was selected */ | 		/* if return value is not zero, no victim was selected */ | ||||||
| 		if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO)) | 		if (f2fs_gc(sbi, sync_mode, true, NULL_SEGNO)) | ||||||
| 			wait_ms = gc_th->no_gc_sleep_time; | 			wait_ms = gc_th->no_gc_sleep_time; | ||||||
| 
 | 
 | ||||||
| 		trace_f2fs_background_gc(sbi->sb, wait_ms, | 		trace_f2fs_background_gc(sbi->sb, wait_ms, | ||||||
| 				prefree_segments(sbi), free_segments(sbi)); | 				prefree_segments(sbi), free_segments(sbi)); | ||||||
| 
 | 
 | ||||||
| 		/* balancing f2fs's metadata periodically */ | 		/* balancing f2fs's metadata periodically */ | ||||||
| 		f2fs_balance_fs_bg(sbi); | 		f2fs_balance_fs_bg(sbi, true); | ||||||
| next: | next: | ||||||
| 		sb_end_write(sbi->sb); | 		sb_end_write(sbi->sb); | ||||||
| 
 | 
 | ||||||
|  | @ -192,7 +196,10 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type, | ||||||
| 		p->ofs_unit = sbi->segs_per_sec; | 		p->ofs_unit = sbi->segs_per_sec; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/* we need to check every dirty segments in the FG_GC case */ | 	/*
 | ||||||
|  | 	 * adjust candidates range, should select all dirty segments for | ||||||
|  | 	 * foreground GC and urgent GC cases. | ||||||
|  | 	 */ | ||||||
| 	if (gc_type != FG_GC && | 	if (gc_type != FG_GC && | ||||||
| 			(sbi->gc_mode != GC_URGENT) && | 			(sbi->gc_mode != GC_URGENT) && | ||||||
| 			p->max_search > sbi->max_victim_search) | 			p->max_search > sbi->max_victim_search) | ||||||
|  | @ -634,7 +641,7 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	*nofs = ofs_of_node(node_page); | 	*nofs = ofs_of_node(node_page); | ||||||
| 	source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node); | 	source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node); | ||||||
| 	f2fs_put_page(node_page, 1); | 	f2fs_put_page(node_page, 1); | ||||||
| 
 | 
 | ||||||
| 	if (source_blkaddr != blkaddr) { | 	if (source_blkaddr != blkaddr) { | ||||||
|  | @ -762,7 +769,7 @@ static int move_data_block(struct inode *inode, block_t bidx, | ||||||
| 	struct page *page, *mpage; | 	struct page *page, *mpage; | ||||||
| 	block_t newaddr; | 	block_t newaddr; | ||||||
| 	int err = 0; | 	int err = 0; | ||||||
| 	bool lfs_mode = test_opt(fio.sbi, LFS); | 	bool lfs_mode = f2fs_lfs_mode(fio.sbi); | ||||||
| 
 | 
 | ||||||
| 	/* do not read out */ | 	/* do not read out */ | ||||||
| 	page = f2fs_grab_cache_page(inode->i_mapping, bidx, false); | 	page = f2fs_grab_cache_page(inode->i_mapping, bidx, false); | ||||||
|  | @ -970,7 +977,8 @@ retry: | ||||||
| 		if (err) { | 		if (err) { | ||||||
| 			clear_cold_data(page); | 			clear_cold_data(page); | ||||||
| 			if (err == -ENOMEM) { | 			if (err == -ENOMEM) { | ||||||
| 				congestion_wait(BLK_RW_ASYNC, HZ/50); | 				congestion_wait(BLK_RW_ASYNC, | ||||||
|  | 						DEFAULT_IO_TIMEOUT); | ||||||
| 				goto retry; | 				goto retry; | ||||||
| 			} | 			} | ||||||
| 			if (is_dirty) | 			if (is_dirty) | ||||||
|  | @ -1018,8 +1026,8 @@ next_step: | ||||||
| 		 * race condition along with SSR block allocation. | 		 * race condition along with SSR block allocation. | ||||||
| 		 */ | 		 */ | ||||||
| 		if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) || | 		if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) || | ||||||
| 				get_valid_blocks(sbi, segno, false) == | 				get_valid_blocks(sbi, segno, true) == | ||||||
| 							sbi->blocks_per_seg) | 							BLKS_PER_SEC(sbi)) | ||||||
| 			return submitted; | 			return submitted; | ||||||
| 
 | 
 | ||||||
| 		if (check_valid_map(sbi, segno, off) == 0) | 		if (check_valid_map(sbi, segno, off) == 0) | ||||||
|  | @ -1203,7 +1211,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, | ||||||
| 
 | 
 | ||||||
| 		if (get_valid_blocks(sbi, segno, false) == 0) | 		if (get_valid_blocks(sbi, segno, false) == 0) | ||||||
| 			goto freed; | 			goto freed; | ||||||
| 		if (__is_large_section(sbi) && | 		if (gc_type == BG_GC && __is_large_section(sbi) && | ||||||
| 				migrated >= sbi->migration_granularity) | 				migrated >= sbi->migration_granularity) | ||||||
| 			goto skip; | 			goto skip; | ||||||
| 		if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi))) | 		if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi))) | ||||||
|  | @ -1233,12 +1241,12 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, | ||||||
| 							segno, gc_type); | 							segno, gc_type); | ||||||
| 
 | 
 | ||||||
| 		stat_inc_seg_count(sbi, type, gc_type); | 		stat_inc_seg_count(sbi, type, gc_type); | ||||||
|  | 		migrated++; | ||||||
| 
 | 
 | ||||||
| freed: | freed: | ||||||
| 		if (gc_type == FG_GC && | 		if (gc_type == FG_GC && | ||||||
| 				get_valid_blocks(sbi, segno, false) == 0) | 				get_valid_blocks(sbi, segno, false) == 0) | ||||||
| 			seg_freed++; | 			seg_freed++; | ||||||
| 		migrated++; |  | ||||||
| 
 | 
 | ||||||
| 		if (__is_large_section(sbi) && segno + 1 < end_segno) | 		if (__is_large_section(sbi) && segno + 1 < end_segno) | ||||||
| 			sbi->next_victim_seg[gc_type] = segno + 1; | 			sbi->next_victim_seg[gc_type] = segno + 1; | ||||||
|  | @ -1434,12 +1442,19 @@ static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start, | ||||||
| static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs) | static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs) | ||||||
| { | { | ||||||
| 	struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi); | 	struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi); | ||||||
| 	int section_count = le32_to_cpu(raw_sb->section_count); | 	int section_count; | ||||||
| 	int segment_count = le32_to_cpu(raw_sb->segment_count); | 	int segment_count; | ||||||
| 	int segment_count_main = le32_to_cpu(raw_sb->segment_count_main); | 	int segment_count_main; | ||||||
| 	long long block_count = le64_to_cpu(raw_sb->block_count); | 	long long block_count; | ||||||
| 	int segs = secs * sbi->segs_per_sec; | 	int segs = secs * sbi->segs_per_sec; | ||||||
| 
 | 
 | ||||||
|  | 	down_write(&sbi->sb_lock); | ||||||
|  | 
 | ||||||
|  | 	section_count = le32_to_cpu(raw_sb->section_count); | ||||||
|  | 	segment_count = le32_to_cpu(raw_sb->segment_count); | ||||||
|  | 	segment_count_main = le32_to_cpu(raw_sb->segment_count_main); | ||||||
|  | 	block_count = le64_to_cpu(raw_sb->block_count); | ||||||
|  | 
 | ||||||
| 	raw_sb->section_count = cpu_to_le32(section_count + secs); | 	raw_sb->section_count = cpu_to_le32(section_count + secs); | ||||||
| 	raw_sb->segment_count = cpu_to_le32(segment_count + segs); | 	raw_sb->segment_count = cpu_to_le32(segment_count + segs); | ||||||
| 	raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs); | 	raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs); | ||||||
|  | @ -1453,6 +1468,8 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs) | ||||||
| 		raw_sb->devs[last_dev].total_segments = | 		raw_sb->devs[last_dev].total_segments = | ||||||
| 						cpu_to_le32(dev_segs + segs); | 						cpu_to_le32(dev_segs + segs); | ||||||
| 	} | 	} | ||||||
|  | 
 | ||||||
|  | 	up_write(&sbi->sb_lock); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) | static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) | ||||||
|  | @ -1570,11 +1587,17 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) | ||||||
| 		goto out; | 		goto out; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	mutex_lock(&sbi->cp_mutex); | ||||||
| 	update_fs_metadata(sbi, -secs); | 	update_fs_metadata(sbi, -secs); | ||||||
| 	clear_sbi_flag(sbi, SBI_IS_RESIZEFS); | 	clear_sbi_flag(sbi, SBI_IS_RESIZEFS); | ||||||
|  | 	set_sbi_flag(sbi, SBI_IS_DIRTY); | ||||||
|  | 	mutex_unlock(&sbi->cp_mutex); | ||||||
|  | 
 | ||||||
| 	err = f2fs_sync_fs(sbi->sb, 1); | 	err = f2fs_sync_fs(sbi->sb, 1); | ||||||
| 	if (err) { | 	if (err) { | ||||||
|  | 		mutex_lock(&sbi->cp_mutex); | ||||||
| 		update_fs_metadata(sbi, secs); | 		update_fs_metadata(sbi, secs); | ||||||
|  | 		mutex_unlock(&sbi->cp_mutex); | ||||||
| 		update_sb_metadata(sbi, secs); | 		update_sb_metadata(sbi, secs); | ||||||
| 		f2fs_commit_super(sbi, false); | 		f2fs_commit_super(sbi, false); | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -291,13 +291,30 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page) | ||||||
| 			fi->i_flags & F2FS_COMPR_FL && | 			fi->i_flags & F2FS_COMPR_FL && | ||||||
| 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, | 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, | ||||||
| 						i_log_cluster_size)) { | 						i_log_cluster_size)) { | ||||||
| 		if (ri->i_compress_algorithm >= COMPRESS_MAX) | 		if (ri->i_compress_algorithm >= COMPRESS_MAX) { | ||||||
|  | 			f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported " | ||||||
|  | 				"compress algorithm: %u, run fsck to fix", | ||||||
|  | 				  __func__, inode->i_ino, | ||||||
|  | 				  ri->i_compress_algorithm); | ||||||
| 			return false; | 			return false; | ||||||
| 		if (le64_to_cpu(ri->i_compr_blocks) > inode->i_blocks) | 		} | ||||||
|  | 		if (le64_to_cpu(ri->i_compr_blocks) > | ||||||
|  | 				SECTOR_TO_BLOCK(inode->i_blocks)) { | ||||||
|  | 			f2fs_warn(sbi, "%s: inode (ino=%lx) has inconsistent " | ||||||
|  | 				"i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix", | ||||||
|  | 				  __func__, inode->i_ino, | ||||||
|  | 				  le64_to_cpu(ri->i_compr_blocks), | ||||||
|  | 				  SECTOR_TO_BLOCK(inode->i_blocks)); | ||||||
| 			return false; | 			return false; | ||||||
|  | 		} | ||||||
| 		if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE || | 		if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE || | ||||||
| 			ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) | 			ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) { | ||||||
|  | 			f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported " | ||||||
|  | 				"log cluster size: %u, run fsck to fix", | ||||||
|  | 				  __func__, inode->i_ino, | ||||||
|  | 				  ri->i_log_cluster_size); | ||||||
| 			return false; | 			return false; | ||||||
|  | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return true; | 	return true; | ||||||
|  | @ -345,7 +362,7 @@ static int do_read_inode(struct inode *inode) | ||||||
| 	fi->i_flags = le32_to_cpu(ri->i_flags); | 	fi->i_flags = le32_to_cpu(ri->i_flags); | ||||||
| 	if (S_ISREG(inode->i_mode)) | 	if (S_ISREG(inode->i_mode)) | ||||||
| 		fi->i_flags &= ~F2FS_PROJINHERIT_FL; | 		fi->i_flags &= ~F2FS_PROJINHERIT_FL; | ||||||
| 	fi->flags = 0; | 	bitmap_zero(fi->flags, FI_MAX); | ||||||
| 	fi->i_advise = ri->i_advise; | 	fi->i_advise = ri->i_advise; | ||||||
| 	fi->i_pino = le32_to_cpu(ri->i_pino); | 	fi->i_pino = le32_to_cpu(ri->i_pino); | ||||||
| 	fi->i_dir_level = ri->i_dir_level; | 	fi->i_dir_level = ri->i_dir_level; | ||||||
|  | @ -518,7 +535,7 @@ retry: | ||||||
| 	inode = f2fs_iget(sb, ino); | 	inode = f2fs_iget(sb, ino); | ||||||
| 	if (IS_ERR(inode)) { | 	if (IS_ERR(inode)) { | ||||||
| 		if (PTR_ERR(inode) == -ENOMEM) { | 		if (PTR_ERR(inode) == -ENOMEM) { | ||||||
| 			congestion_wait(BLK_RW_ASYNC, HZ/50); | 			congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT); | ||||||
| 			goto retry; | 			goto retry; | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  | @ -759,7 +776,7 @@ no_delete: | ||||||
| 	else | 	else | ||||||
| 		f2fs_inode_synced(inode); | 		f2fs_inode_synced(inode); | ||||||
| 
 | 
 | ||||||
| 	/* ino == 0, if f2fs_new_inode() was failed t*/ | 	/* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */ | ||||||
| 	if (inode->i_ino) | 	if (inode->i_ino) | ||||||
| 		invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, | 		invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, | ||||||
| 							inode->i_ino); | 							inode->i_ino); | ||||||
|  |  | ||||||
|  | @ -75,9 +75,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) | ||||||
| 
 | 
 | ||||||
| 	set_inode_flag(inode, FI_NEW_INODE); | 	set_inode_flag(inode, FI_NEW_INODE); | ||||||
| 
 | 
 | ||||||
| 	/* If the directory encrypted, then we should encrypt the inode. */ | 	if (f2fs_may_encrypt(dir, inode)) | ||||||
| 	if ((IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) && |  | ||||||
| 				f2fs_may_encrypt(inode)) |  | ||||||
| 		f2fs_set_encrypted_inode(inode); | 		f2fs_set_encrypted_inode(inode); | ||||||
| 
 | 
 | ||||||
| 	if (f2fs_sb_has_extra_attr(sbi)) { | 	if (f2fs_sb_has_extra_attr(sbi)) { | ||||||
|  | @ -177,7 +175,7 @@ static inline int is_extension_exist(const unsigned char *s, const char *sub) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  * Set multimedia files as cold files for hot/cold data separation |  * Set file's temperature for hot/cold data separation | ||||||
|  */ |  */ | ||||||
| static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *inode, | static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *inode, | ||||||
| 		const unsigned char *name) | 		const unsigned char *name) | ||||||
|  | @ -876,12 +874,6 @@ static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) | ||||||
| 	if (!f2fs_is_checkpoint_ready(sbi)) | 	if (!f2fs_is_checkpoint_ready(sbi)) | ||||||
| 		return -ENOSPC; | 		return -ENOSPC; | ||||||
| 
 | 
 | ||||||
| 	if (IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) { |  | ||||||
| 		int err = fscrypt_get_encryption_info(dir); |  | ||||||
| 		if (err) |  | ||||||
| 			return err; |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return __f2fs_tmpfile(dir, dentry, mode, NULL); | 	return __f2fs_tmpfile(dir, dentry, mode, NULL); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -510,9 +510,6 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) | ||||||
| 	return nr - nr_shrink; | 	return nr - nr_shrink; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 |  | ||||||
|  * This function always returns success |  | ||||||
|  */ |  | ||||||
| int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, | int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, | ||||||
| 						struct node_info *ni) | 						struct node_info *ni) | ||||||
| { | { | ||||||
|  | @ -716,8 +713,7 @@ got: | ||||||
| /*
 | /*
 | ||||||
|  * Caller should call f2fs_put_dnode(dn). |  * Caller should call f2fs_put_dnode(dn). | ||||||
|  * Also, it should grab and release a rwsem by calling f2fs_lock_op() and |  * Also, it should grab and release a rwsem by calling f2fs_lock_op() and | ||||||
|  * f2fs_unlock_op() only if ro is not set RDONLY_NODE. |  * f2fs_unlock_op() only if mode is set with ALLOC_NODE. | ||||||
|  * In the case of RDONLY_NODE, we don't need to care about mutex. |  | ||||||
|  */ |  */ | ||||||
| int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) | int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) | ||||||
| { | { | ||||||
|  | @ -809,8 +805,7 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) | ||||||
| 	dn->nid = nids[level]; | 	dn->nid = nids[level]; | ||||||
| 	dn->ofs_in_node = offset[level]; | 	dn->ofs_in_node = offset[level]; | ||||||
| 	dn->node_page = npage[level]; | 	dn->node_page = npage[level]; | ||||||
| 	dn->data_blkaddr = datablock_addr(dn->inode, | 	dn->data_blkaddr = f2fs_data_blkaddr(dn); | ||||||
| 				dn->node_page, dn->ofs_in_node); |  | ||||||
| 	return 0; | 	return 0; | ||||||
| 
 | 
 | ||||||
| release_pages: | release_pages: | ||||||
|  | @ -1188,8 +1183,9 @@ int f2fs_remove_inode_page(struct inode *inode) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) { | 	if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) { | ||||||
| 		f2fs_warn(F2FS_I_SB(inode), "Inconsistent i_blocks, ino:%lu, iblocks:%llu", | 		f2fs_warn(F2FS_I_SB(inode), | ||||||
| 			  inode->i_ino, (unsigned long long)inode->i_blocks); | 			"f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu", | ||||||
|  | 			inode->i_ino, (unsigned long long)inode->i_blocks); | ||||||
| 		set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); | 		set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -1562,15 +1558,16 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted, | ||||||
| 	if (atomic && !test_opt(sbi, NOBARRIER)) | 	if (atomic && !test_opt(sbi, NOBARRIER)) | ||||||
| 		fio.op_flags |= REQ_PREFLUSH | REQ_FUA; | 		fio.op_flags |= REQ_PREFLUSH | REQ_FUA; | ||||||
| 
 | 
 | ||||||
| 	set_page_writeback(page); | 	/* should add to global list before clearing PAGECACHE status */ | ||||||
| 	ClearPageError(page); |  | ||||||
| 
 |  | ||||||
| 	if (f2fs_in_warm_node_list(sbi, page)) { | 	if (f2fs_in_warm_node_list(sbi, page)) { | ||||||
| 		seq = f2fs_add_fsync_node_entry(sbi, page); | 		seq = f2fs_add_fsync_node_entry(sbi, page); | ||||||
| 		if (seq_id) | 		if (seq_id) | ||||||
| 			*seq_id = seq; | 			*seq_id = seq; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	set_page_writeback(page); | ||||||
|  | 	ClearPageError(page); | ||||||
|  | 
 | ||||||
| 	fio.old_blkaddr = ni.blk_addr; | 	fio.old_blkaddr = ni.blk_addr; | ||||||
| 	f2fs_do_write_node_page(nid, &fio); | 	f2fs_do_write_node_page(nid, &fio); | ||||||
| 	set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); | 	set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); | ||||||
|  | @ -1979,7 +1976,7 @@ static int f2fs_write_node_pages(struct address_space *mapping, | ||||||
| 		goto skip_write; | 		goto skip_write; | ||||||
| 
 | 
 | ||||||
| 	/* balancing f2fs's metadata in background */ | 	/* balancing f2fs's metadata in background */ | ||||||
| 	f2fs_balance_fs_bg(sbi); | 	f2fs_balance_fs_bg(sbi, true); | ||||||
| 
 | 
 | ||||||
| 	/* collect a number of dirty node pages and write together */ | 	/* collect a number of dirty node pages and write together */ | ||||||
| 	if (wbc->sync_mode != WB_SYNC_ALL && | 	if (wbc->sync_mode != WB_SYNC_ALL && | ||||||
|  | @ -2602,7 +2599,7 @@ int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) | ||||||
| retry: | retry: | ||||||
| 	ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false); | 	ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false); | ||||||
| 	if (!ipage) { | 	if (!ipage) { | ||||||
| 		congestion_wait(BLK_RW_ASYNC, HZ/50); | 		congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT); | ||||||
| 		goto retry; | 		goto retry; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -3193,22 +3190,22 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi) | ||||||
| 
 | 
 | ||||||
| int __init f2fs_create_node_manager_caches(void) | int __init f2fs_create_node_manager_caches(void) | ||||||
| { | { | ||||||
| 	nat_entry_slab = f2fs_kmem_cache_create("nat_entry", | 	nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry", | ||||||
| 			sizeof(struct nat_entry)); | 			sizeof(struct nat_entry)); | ||||||
| 	if (!nat_entry_slab) | 	if (!nat_entry_slab) | ||||||
| 		goto fail; | 		goto fail; | ||||||
| 
 | 
 | ||||||
| 	free_nid_slab = f2fs_kmem_cache_create("free_nid", | 	free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid", | ||||||
| 			sizeof(struct free_nid)); | 			sizeof(struct free_nid)); | ||||||
| 	if (!free_nid_slab) | 	if (!free_nid_slab) | ||||||
| 		goto destroy_nat_entry; | 		goto destroy_nat_entry; | ||||||
| 
 | 
 | ||||||
| 	nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set", | 	nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set", | ||||||
| 			sizeof(struct nat_entry_set)); | 			sizeof(struct nat_entry_set)); | ||||||
| 	if (!nat_entry_set_slab) | 	if (!nat_entry_set_slab) | ||||||
| 		goto destroy_free_nid; | 		goto destroy_free_nid; | ||||||
| 
 | 
 | ||||||
| 	fsync_node_entry_slab = f2fs_kmem_cache_create("fsync_node_entry", | 	fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry", | ||||||
| 			sizeof(struct fsync_node_entry)); | 			sizeof(struct fsync_node_entry)); | ||||||
| 	if (!fsync_node_entry_slab) | 	if (!fsync_node_entry_slab) | ||||||
| 		goto destroy_nat_entry_set; | 		goto destroy_nat_entry_set; | ||||||
|  |  | ||||||
|  | @ -496,8 +496,7 @@ out: | ||||||
| 	return 0; | 	return 0; | ||||||
| 
 | 
 | ||||||
| truncate_out: | truncate_out: | ||||||
| 	if (datablock_addr(tdn.inode, tdn.node_page, | 	if (f2fs_data_blkaddr(&tdn) == blkaddr) | ||||||
| 					tdn.ofs_in_node) == blkaddr) |  | ||||||
| 		f2fs_truncate_data_blocks_range(&tdn, 1); | 		f2fs_truncate_data_blocks_range(&tdn, 1); | ||||||
| 	if (dn->inode->i_ino == nid && !dn->inode_page_locked) | 	if (dn->inode->i_ino == nid && !dn->inode_page_locked) | ||||||
| 		unlock_page(dn->inode_page); | 		unlock_page(dn->inode_page); | ||||||
|  | @ -535,7 +534,7 @@ retry_dn: | ||||||
| 	err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE); | 	err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE); | ||||||
| 	if (err) { | 	if (err) { | ||||||
| 		if (err == -ENOMEM) { | 		if (err == -ENOMEM) { | ||||||
| 			congestion_wait(BLK_RW_ASYNC, HZ/50); | 			congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT); | ||||||
| 			goto retry_dn; | 			goto retry_dn; | ||||||
| 		} | 		} | ||||||
| 		goto out; | 		goto out; | ||||||
|  | @ -560,8 +559,8 @@ retry_dn: | ||||||
| 	for (; start < end; start++, dn.ofs_in_node++) { | 	for (; start < end; start++, dn.ofs_in_node++) { | ||||||
| 		block_t src, dest; | 		block_t src, dest; | ||||||
| 
 | 
 | ||||||
| 		src = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node); | 		src = f2fs_data_blkaddr(&dn); | ||||||
| 		dest = datablock_addr(dn.inode, page, dn.ofs_in_node); | 		dest = data_blkaddr(dn.inode, page, dn.ofs_in_node); | ||||||
| 
 | 
 | ||||||
| 		if (__is_valid_data_blkaddr(src) && | 		if (__is_valid_data_blkaddr(src) && | ||||||
| 			!f2fs_is_valid_blkaddr(sbi, src, META_POR)) { | 			!f2fs_is_valid_blkaddr(sbi, src, META_POR)) { | ||||||
|  | @ -618,7 +617,8 @@ retry_prev: | ||||||
| 			err = check_index_in_prev_nodes(sbi, dest, &dn); | 			err = check_index_in_prev_nodes(sbi, dest, &dn); | ||||||
| 			if (err) { | 			if (err) { | ||||||
| 				if (err == -ENOMEM) { | 				if (err == -ENOMEM) { | ||||||
| 					congestion_wait(BLK_RW_ASYNC, HZ/50); | 					congestion_wait(BLK_RW_ASYNC, | ||||||
|  | 							DEFAULT_IO_TIMEOUT); | ||||||
| 					goto retry_prev; | 					goto retry_prev; | ||||||
| 				} | 				} | ||||||
| 				goto err; | 				goto err; | ||||||
|  |  | ||||||
|  | @ -172,7 +172,7 @@ bool f2fs_need_SSR(struct f2fs_sb_info *sbi) | ||||||
| 	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); | 	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); | ||||||
| 	int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA); | 	int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA); | ||||||
| 
 | 
 | ||||||
| 	if (test_opt(sbi, LFS)) | 	if (f2fs_lfs_mode(sbi)) | ||||||
| 		return false; | 		return false; | ||||||
| 	if (sbi->gc_mode == GC_URGENT) | 	if (sbi->gc_mode == GC_URGENT) | ||||||
| 		return true; | 		return true; | ||||||
|  | @ -245,7 +245,8 @@ retry: | ||||||
| 								LOOKUP_NODE); | 								LOOKUP_NODE); | ||||||
| 			if (err) { | 			if (err) { | ||||||
| 				if (err == -ENOMEM) { | 				if (err == -ENOMEM) { | ||||||
| 					congestion_wait(BLK_RW_ASYNC, HZ/50); | 					congestion_wait(BLK_RW_ASYNC, | ||||||
|  | 							DEFAULT_IO_TIMEOUT); | ||||||
| 					cond_resched(); | 					cond_resched(); | ||||||
| 					goto retry; | 					goto retry; | ||||||
| 				} | 				} | ||||||
|  | @ -312,7 +313,7 @@ next: | ||||||
| skip: | skip: | ||||||
| 		iput(inode); | 		iput(inode); | ||||||
| 	} | 	} | ||||||
| 	congestion_wait(BLK_RW_ASYNC, HZ/50); | 	congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT); | ||||||
| 	cond_resched(); | 	cond_resched(); | ||||||
| 	if (gc_failure) { | 	if (gc_failure) { | ||||||
| 		if (++looped >= count) | 		if (++looped >= count) | ||||||
|  | @ -415,7 +416,8 @@ retry: | ||||||
| 			err = f2fs_do_write_data_page(&fio); | 			err = f2fs_do_write_data_page(&fio); | ||||||
| 			if (err) { | 			if (err) { | ||||||
| 				if (err == -ENOMEM) { | 				if (err == -ENOMEM) { | ||||||
| 					congestion_wait(BLK_RW_ASYNC, HZ/50); | 					congestion_wait(BLK_RW_ASYNC, | ||||||
|  | 							DEFAULT_IO_TIMEOUT); | ||||||
| 					cond_resched(); | 					cond_resched(); | ||||||
| 					goto retry; | 					goto retry; | ||||||
| 				} | 				} | ||||||
|  | @ -494,7 +496,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) | ||||||
| 
 | 
 | ||||||
| 	/* balance_fs_bg is able to be pending */ | 	/* balance_fs_bg is able to be pending */ | ||||||
| 	if (need && excess_cached_nats(sbi)) | 	if (need && excess_cached_nats(sbi)) | ||||||
| 		f2fs_balance_fs_bg(sbi); | 		f2fs_balance_fs_bg(sbi, false); | ||||||
| 
 | 
 | ||||||
| 	if (!f2fs_is_checkpoint_ready(sbi)) | 	if (!f2fs_is_checkpoint_ready(sbi)) | ||||||
| 		return; | 		return; | ||||||
|  | @ -509,7 +511,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) | void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg) | ||||||
| { | { | ||||||
| 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) | 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) | ||||||
| 		return; | 		return; | ||||||
|  | @ -538,7 +540,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) | ||||||
| 			excess_dirty_nats(sbi) || | 			excess_dirty_nats(sbi) || | ||||||
| 			excess_dirty_nodes(sbi) || | 			excess_dirty_nodes(sbi) || | ||||||
| 			f2fs_time_over(sbi, CP_TIME)) { | 			f2fs_time_over(sbi, CP_TIME)) { | ||||||
| 		if (test_opt(sbi, DATA_FLUSH)) { | 		if (test_opt(sbi, DATA_FLUSH) && from_bg) { | ||||||
| 			struct blk_plug plug; | 			struct blk_plug plug; | ||||||
| 
 | 
 | ||||||
| 			mutex_lock(&sbi->flush_lock); | 			mutex_lock(&sbi->flush_lock); | ||||||
|  | @ -1078,7 +1080,7 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi, | ||||||
| 
 | 
 | ||||||
| 	dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST; | 	dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST; | ||||||
| 	dpolicy->io_aware_gran = MAX_PLIST_NUM; | 	dpolicy->io_aware_gran = MAX_PLIST_NUM; | ||||||
| 	dpolicy->timeout = 0; | 	dpolicy->timeout = false; | ||||||
| 
 | 
 | ||||||
| 	if (discard_type == DPOLICY_BG) { | 	if (discard_type == DPOLICY_BG) { | ||||||
| 		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME; | 		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME; | ||||||
|  | @ -1103,6 +1105,7 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi, | ||||||
| 		dpolicy->io_aware = false; | 		dpolicy->io_aware = false; | ||||||
| 		/* we need to issue all to keep CP_TRIMMED_FLAG */ | 		/* we need to issue all to keep CP_TRIMMED_FLAG */ | ||||||
| 		dpolicy->granularity = 1; | 		dpolicy->granularity = 1; | ||||||
|  | 		dpolicy->timeout = true; | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -1471,12 +1474,12 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi, | ||||||
| 	int i, issued = 0; | 	int i, issued = 0; | ||||||
| 	bool io_interrupted = false; | 	bool io_interrupted = false; | ||||||
| 
 | 
 | ||||||
| 	if (dpolicy->timeout != 0) | 	if (dpolicy->timeout) | ||||||
| 		f2fs_update_time(sbi, dpolicy->timeout); | 		f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT); | ||||||
| 
 | 
 | ||||||
| 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { | 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { | ||||||
| 		if (dpolicy->timeout != 0 && | 		if (dpolicy->timeout && | ||||||
| 				f2fs_time_over(sbi, dpolicy->timeout)) | 				f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT)) | ||||||
| 			break; | 			break; | ||||||
| 
 | 
 | ||||||
| 		if (i + 1 < dpolicy->granularity) | 		if (i + 1 < dpolicy->granularity) | ||||||
|  | @ -1497,8 +1500,8 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi, | ||||||
| 		list_for_each_entry_safe(dc, tmp, pend_list, list) { | 		list_for_each_entry_safe(dc, tmp, pend_list, list) { | ||||||
| 			f2fs_bug_on(sbi, dc->state != D_PREP); | 			f2fs_bug_on(sbi, dc->state != D_PREP); | ||||||
| 
 | 
 | ||||||
| 			if (dpolicy->timeout != 0 && | 			if (dpolicy->timeout && | ||||||
| 				f2fs_time_over(sbi, dpolicy->timeout)) | 				f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT)) | ||||||
| 				break; | 				break; | ||||||
| 
 | 
 | ||||||
| 			if (dpolicy->io_aware && i < dpolicy->io_aware_gran && | 			if (dpolicy->io_aware && i < dpolicy->io_aware_gran && | ||||||
|  | @ -1677,7 +1680,6 @@ bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi) | ||||||
| 
 | 
 | ||||||
| 	__init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT, | 	__init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT, | ||||||
| 					dcc->discard_granularity); | 					dcc->discard_granularity); | ||||||
| 	dpolicy.timeout = UMOUNT_DISCARD_TIMEOUT; |  | ||||||
| 	__issue_discard_cmd(sbi, &dpolicy); | 	__issue_discard_cmd(sbi, &dpolicy); | ||||||
| 	dropped = __drop_discard_cmd(sbi); | 	dropped = __drop_discard_cmd(sbi); | ||||||
| 
 | 
 | ||||||
|  | @ -1940,7 +1942,7 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, | ||||||
| 	unsigned int start = 0, end = -1; | 	unsigned int start = 0, end = -1; | ||||||
| 	unsigned int secno, start_segno; | 	unsigned int secno, start_segno; | ||||||
| 	bool force = (cpc->reason & CP_DISCARD); | 	bool force = (cpc->reason & CP_DISCARD); | ||||||
| 	bool need_align = test_opt(sbi, LFS) && __is_large_section(sbi); | 	bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi); | ||||||
| 
 | 
 | ||||||
| 	mutex_lock(&dirty_i->seglist_lock); | 	mutex_lock(&dirty_i->seglist_lock); | ||||||
| 
 | 
 | ||||||
|  | @ -1972,7 +1974,7 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, | ||||||
| 					(end - 1) <= cpc->trim_end) | 					(end - 1) <= cpc->trim_end) | ||||||
| 				continue; | 				continue; | ||||||
| 
 | 
 | ||||||
| 		if (!test_opt(sbi, LFS) || !__is_large_section(sbi)) { | 		if (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi)) { | ||||||
| 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start), | 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start), | ||||||
| 				(end - start) << sbi->log_blocks_per_seg); | 				(end - start) << sbi->log_blocks_per_seg); | ||||||
| 			continue; | 			continue; | ||||||
|  | @ -2801,7 +2803,7 @@ next: | ||||||
| 			blk_finish_plug(&plug); | 			blk_finish_plug(&plug); | ||||||
| 			mutex_unlock(&dcc->cmd_lock); | 			mutex_unlock(&dcc->cmd_lock); | ||||||
| 			trimmed += __wait_all_discard_cmd(sbi, NULL); | 			trimmed += __wait_all_discard_cmd(sbi, NULL); | ||||||
| 			congestion_wait(BLK_RW_ASYNC, HZ/50); | 			congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT); | ||||||
| 			goto next; | 			goto next; | ||||||
| 		} | 		} | ||||||
| skip: | skip: | ||||||
|  | @ -2830,7 +2832,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) | ||||||
| 	struct discard_policy dpolicy; | 	struct discard_policy dpolicy; | ||||||
| 	unsigned long long trimmed = 0; | 	unsigned long long trimmed = 0; | ||||||
| 	int err = 0; | 	int err = 0; | ||||||
| 	bool need_align = test_opt(sbi, LFS) && __is_large_section(sbi); | 	bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi); | ||||||
| 
 | 
 | ||||||
| 	if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize) | 	if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize) | ||||||
| 		return -EINVAL; | 		return -EINVAL; | ||||||
|  | @ -3193,7 +3195,7 @@ static void update_device_state(struct f2fs_io_info *fio) | ||||||
| static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) | static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) | ||||||
| { | { | ||||||
| 	int type = __get_segment_type(fio); | 	int type = __get_segment_type(fio); | ||||||
| 	bool keep_order = (test_opt(fio->sbi, LFS) && type == CURSEG_COLD_DATA); | 	bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA); | ||||||
| 
 | 
 | ||||||
| 	if (keep_order) | 	if (keep_order) | ||||||
| 		down_read(&fio->sbi->io_order_lock); | 		down_read(&fio->sbi->io_order_lock); | ||||||
|  | @ -4071,7 +4073,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi) | ||||||
| 	sit_i->dirty_sentries = 0; | 	sit_i->dirty_sentries = 0; | ||||||
| 	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; | 	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; | ||||||
| 	sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); | 	sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); | ||||||
| 	sit_i->mounted_time = ktime_get_real_seconds(); | 	sit_i->mounted_time = ktime_get_boottime_seconds(); | ||||||
| 	init_rwsem(&sit_i->sentry_lock); | 	init_rwsem(&sit_i->sentry_lock); | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
|  | @ -4678,7 +4680,7 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi) | ||||||
| 	if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS) | 	if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS) | ||||||
| 		sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS; | 		sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS; | ||||||
| 
 | 
 | ||||||
| 	if (!test_opt(sbi, LFS)) | 	if (!f2fs_lfs_mode(sbi)) | ||||||
| 		sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC; | 		sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC; | ||||||
| 	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; | 	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; | ||||||
| 	sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; | 	sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; | ||||||
|  | @ -4830,22 +4832,22 @@ void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi) | ||||||
| 
 | 
 | ||||||
| int __init f2fs_create_segment_manager_caches(void) | int __init f2fs_create_segment_manager_caches(void) | ||||||
| { | { | ||||||
| 	discard_entry_slab = f2fs_kmem_cache_create("discard_entry", | 	discard_entry_slab = f2fs_kmem_cache_create("f2fs_discard_entry", | ||||||
| 			sizeof(struct discard_entry)); | 			sizeof(struct discard_entry)); | ||||||
| 	if (!discard_entry_slab) | 	if (!discard_entry_slab) | ||||||
| 		goto fail; | 		goto fail; | ||||||
| 
 | 
 | ||||||
| 	discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd", | 	discard_cmd_slab = f2fs_kmem_cache_create("f2fs_discard_cmd", | ||||||
| 			sizeof(struct discard_cmd)); | 			sizeof(struct discard_cmd)); | ||||||
| 	if (!discard_cmd_slab) | 	if (!discard_cmd_slab) | ||||||
| 		goto destroy_discard_entry; | 		goto destroy_discard_entry; | ||||||
| 
 | 
 | ||||||
| 	sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set", | 	sit_entry_set_slab = f2fs_kmem_cache_create("f2fs_sit_entry_set", | ||||||
| 			sizeof(struct sit_entry_set)); | 			sizeof(struct sit_entry_set)); | ||||||
| 	if (!sit_entry_set_slab) | 	if (!sit_entry_set_slab) | ||||||
| 		goto destroy_discard_cmd; | 		goto destroy_discard_cmd; | ||||||
| 
 | 
 | ||||||
| 	inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry", | 	inmem_entry_slab = f2fs_kmem_cache_create("f2fs_inmem_page_entry", | ||||||
| 			sizeof(struct inmem_pages)); | 			sizeof(struct inmem_pages)); | ||||||
| 	if (!inmem_entry_slab) | 	if (!inmem_entry_slab) | ||||||
| 		goto destroy_sit_entry_set; | 		goto destroy_sit_entry_set; | ||||||
|  |  | ||||||
|  | @ -756,7 +756,7 @@ static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi, | ||||||
| 						bool base_time) | 						bool base_time) | ||||||
| { | { | ||||||
| 	struct sit_info *sit_i = SIT_I(sbi); | 	struct sit_info *sit_i = SIT_I(sbi); | ||||||
| 	time64_t diff, now = ktime_get_real_seconds(); | 	time64_t diff, now = ktime_get_boottime_seconds(); | ||||||
| 
 | 
 | ||||||
| 	if (now >= sit_i->mounted_time) | 	if (now >= sit_i->mounted_time) | ||||||
| 		return sit_i->elapsed_time + now - sit_i->mounted_time; | 		return sit_i->elapsed_time + now - sit_i->mounted_time; | ||||||
|  |  | ||||||
|  | @ -58,7 +58,7 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink, | ||||||
| 		/* count extent cache entries */ | 		/* count extent cache entries */ | ||||||
| 		count += __count_extent_cache(sbi); | 		count += __count_extent_cache(sbi); | ||||||
| 
 | 
 | ||||||
| 		/* shrink clean nat cache entries */ | 		/* count clean nat cache entries */ | ||||||
| 		count += __count_nat_entries(sbi); | 		count += __count_nat_entries(sbi); | ||||||
| 
 | 
 | ||||||
| 		/* count free nids cache entries */ | 		/* count free nids cache entries */ | ||||||
|  |  | ||||||
|  | @ -428,14 +428,11 @@ static int parse_options(struct super_block *sb, char *options) | ||||||
| 			if (!name) | 			if (!name) | ||||||
| 				return -ENOMEM; | 				return -ENOMEM; | ||||||
| 			if (strlen(name) == 2 && !strncmp(name, "on", 2)) { | 			if (strlen(name) == 2 && !strncmp(name, "on", 2)) { | ||||||
| 				set_opt(sbi, BG_GC); | 				F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON; | ||||||
| 				clear_opt(sbi, FORCE_FG_GC); |  | ||||||
| 			} else if (strlen(name) == 3 && !strncmp(name, "off", 3)) { | 			} else if (strlen(name) == 3 && !strncmp(name, "off", 3)) { | ||||||
| 				clear_opt(sbi, BG_GC); | 				F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF; | ||||||
| 				clear_opt(sbi, FORCE_FG_GC); |  | ||||||
| 			} else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) { | 			} else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) { | ||||||
| 				set_opt(sbi, BG_GC); | 				F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC; | ||||||
| 				set_opt(sbi, FORCE_FG_GC); |  | ||||||
| 			} else { | 			} else { | ||||||
| 				kvfree(name); | 				kvfree(name); | ||||||
| 				return -EINVAL; | 				return -EINVAL; | ||||||
|  | @ -447,7 +444,7 @@ static int parse_options(struct super_block *sb, char *options) | ||||||
| 			break; | 			break; | ||||||
| 		case Opt_norecovery: | 		case Opt_norecovery: | ||||||
| 			/* this option mounts f2fs with ro */ | 			/* this option mounts f2fs with ro */ | ||||||
| 			set_opt(sbi, DISABLE_ROLL_FORWARD); | 			set_opt(sbi, NORECOVERY); | ||||||
| 			if (!f2fs_readonly(sb)) | 			if (!f2fs_readonly(sb)) | ||||||
| 				return -EINVAL; | 				return -EINVAL; | ||||||
| 			break; | 			break; | ||||||
|  | @ -601,10 +598,10 @@ static int parse_options(struct super_block *sb, char *options) | ||||||
| 					kvfree(name); | 					kvfree(name); | ||||||
| 					return -EINVAL; | 					return -EINVAL; | ||||||
| 				} | 				} | ||||||
| 				set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE); | 				F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE; | ||||||
| 			} else if (strlen(name) == 3 && | 			} else if (strlen(name) == 3 && | ||||||
| 					!strncmp(name, "lfs", 3)) { | 					!strncmp(name, "lfs", 3)) { | ||||||
| 				set_opt_mode(sbi, F2FS_MOUNT_LFS); | 				F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS; | ||||||
| 			} else { | 			} else { | ||||||
| 				kvfree(name); | 				kvfree(name); | ||||||
| 				return -EINVAL; | 				return -EINVAL; | ||||||
|  | @ -833,6 +830,10 @@ static int parse_options(struct super_block *sb, char *options) | ||||||
| 					!strcmp(name, "lz4")) { | 					!strcmp(name, "lz4")) { | ||||||
| 				F2FS_OPTION(sbi).compress_algorithm = | 				F2FS_OPTION(sbi).compress_algorithm = | ||||||
| 								COMPRESS_LZ4; | 								COMPRESS_LZ4; | ||||||
|  | 			} else if (strlen(name) == 4 && | ||||||
|  | 					!strcmp(name, "zstd")) { | ||||||
|  | 				F2FS_OPTION(sbi).compress_algorithm = | ||||||
|  | 								COMPRESS_ZSTD; | ||||||
| 			} else { | 			} else { | ||||||
| 				kfree(name); | 				kfree(name); | ||||||
| 				return -EINVAL; | 				return -EINVAL; | ||||||
|  | @ -905,7 +906,7 @@ static int parse_options(struct super_block *sb, char *options) | ||||||
| 	} | 	} | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| 	if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) { | 	if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) { | ||||||
| 		f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO", | 		f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO", | ||||||
| 			 F2FS_IO_SIZE_KB(sbi)); | 			 F2FS_IO_SIZE_KB(sbi)); | ||||||
| 		return -EINVAL; | 		return -EINVAL; | ||||||
|  | @ -935,7 +936,7 @@ static int parse_options(struct super_block *sb, char *options) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (test_opt(sbi, DISABLE_CHECKPOINT) && test_opt(sbi, LFS)) { | 	if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) { | ||||||
| 		f2fs_err(sbi, "LFS not compatible with checkpoint=disable\n"); | 		f2fs_err(sbi, "LFS not compatible with checkpoint=disable\n"); | ||||||
| 		return -EINVAL; | 		return -EINVAL; | ||||||
| 	} | 	} | ||||||
|  | @ -961,6 +962,7 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb) | ||||||
| 	/* Initialize f2fs-specific inode info */ | 	/* Initialize f2fs-specific inode info */ | ||||||
| 	atomic_set(&fi->dirty_pages, 0); | 	atomic_set(&fi->dirty_pages, 0); | ||||||
| 	init_rwsem(&fi->i_sem); | 	init_rwsem(&fi->i_sem); | ||||||
|  | 	spin_lock_init(&fi->i_size_lock); | ||||||
| 	INIT_LIST_HEAD(&fi->dirty_list); | 	INIT_LIST_HEAD(&fi->dirty_list); | ||||||
| 	INIT_LIST_HEAD(&fi->gdirty_list); | 	INIT_LIST_HEAD(&fi->gdirty_list); | ||||||
| 	INIT_LIST_HEAD(&fi->inmem_ilist); | 	INIT_LIST_HEAD(&fi->inmem_ilist); | ||||||
|  | @ -1173,7 +1175,7 @@ static void f2fs_put_super(struct super_block *sb) | ||||||
| 	/* our cp_error case, we can wait for any writeback page */ | 	/* our cp_error case, we can wait for any writeback page */ | ||||||
| 	f2fs_flush_merged_writes(sbi); | 	f2fs_flush_merged_writes(sbi); | ||||||
| 
 | 
 | ||||||
| 	f2fs_wait_on_all_pages_writeback(sbi); | 	f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA); | ||||||
| 
 | 
 | ||||||
| 	f2fs_bug_on(sbi, sbi->fsync_node_num); | 	f2fs_bug_on(sbi, sbi->fsync_node_num); | ||||||
| 
 | 
 | ||||||
|  | @ -1205,6 +1207,7 @@ static void f2fs_put_super(struct super_block *sb) | ||||||
| 	kvfree(sbi->raw_super); | 	kvfree(sbi->raw_super); | ||||||
| 
 | 
 | ||||||
| 	destroy_device_list(sbi); | 	destroy_device_list(sbi); | ||||||
|  | 	f2fs_destroy_xattr_caches(sbi); | ||||||
| 	mempool_destroy(sbi->write_io_dummy); | 	mempool_destroy(sbi->write_io_dummy); | ||||||
| #ifdef CONFIG_QUOTA | #ifdef CONFIG_QUOTA | ||||||
| 	for (i = 0; i < MAXQUOTAS; i++) | 	for (i = 0; i < MAXQUOTAS; i++) | ||||||
|  | @ -1421,6 +1424,9 @@ static inline void f2fs_show_compress_options(struct seq_file *seq, | ||||||
| 	case COMPRESS_LZ4: | 	case COMPRESS_LZ4: | ||||||
| 		algtype = "lz4"; | 		algtype = "lz4"; | ||||||
| 		break; | 		break; | ||||||
|  | 	case COMPRESS_ZSTD: | ||||||
|  | 		algtype = "zstd"; | ||||||
|  | 		break; | ||||||
| 	} | 	} | ||||||
| 	seq_printf(seq, ",compress_algorithm=%s", algtype); | 	seq_printf(seq, ",compress_algorithm=%s", algtype); | ||||||
| 
 | 
 | ||||||
|  | @ -1437,16 +1443,17 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) | ||||||
| { | { | ||||||
| 	struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb); | 	struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb); | ||||||
| 
 | 
 | ||||||
| 	if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) { | 	if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) | ||||||
| 		if (test_opt(sbi, FORCE_FG_GC)) | 		seq_printf(seq, ",background_gc=%s", "sync"); | ||||||
| 			seq_printf(seq, ",background_gc=%s", "sync"); | 	else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON) | ||||||
| 		else | 		seq_printf(seq, ",background_gc=%s", "on"); | ||||||
| 			seq_printf(seq, ",background_gc=%s", "on"); | 	else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF) | ||||||
| 	} else { |  | ||||||
| 		seq_printf(seq, ",background_gc=%s", "off"); | 		seq_printf(seq, ",background_gc=%s", "off"); | ||||||
| 	} | 
 | ||||||
| 	if (test_opt(sbi, DISABLE_ROLL_FORWARD)) | 	if (test_opt(sbi, DISABLE_ROLL_FORWARD)) | ||||||
| 		seq_puts(seq, ",disable_roll_forward"); | 		seq_puts(seq, ",disable_roll_forward"); | ||||||
|  | 	if (test_opt(sbi, NORECOVERY)) | ||||||
|  | 		seq_puts(seq, ",norecovery"); | ||||||
| 	if (test_opt(sbi, DISCARD)) | 	if (test_opt(sbi, DISCARD)) | ||||||
| 		seq_puts(seq, ",discard"); | 		seq_puts(seq, ",discard"); | ||||||
| 	else | 	else | ||||||
|  | @ -1498,9 +1505,9 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) | ||||||
| 		seq_puts(seq, ",data_flush"); | 		seq_puts(seq, ",data_flush"); | ||||||
| 
 | 
 | ||||||
| 	seq_puts(seq, ",mode="); | 	seq_puts(seq, ",mode="); | ||||||
| 	if (test_opt(sbi, ADAPTIVE)) | 	if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE) | ||||||
| 		seq_puts(seq, "adaptive"); | 		seq_puts(seq, "adaptive"); | ||||||
| 	else if (test_opt(sbi, LFS)) | 	else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS) | ||||||
| 		seq_puts(seq, "lfs"); | 		seq_puts(seq, "lfs"); | ||||||
| 	seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs); | 	seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs); | ||||||
| 	if (test_opt(sbi, RESERVE_ROOT)) | 	if (test_opt(sbi, RESERVE_ROOT)) | ||||||
|  | @ -1571,11 +1578,11 @@ static void default_options(struct f2fs_sb_info *sbi) | ||||||
| 	F2FS_OPTION(sbi).test_dummy_encryption = false; | 	F2FS_OPTION(sbi).test_dummy_encryption = false; | ||||||
| 	F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID); | 	F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID); | ||||||
| 	F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID); | 	F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID); | ||||||
| 	F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZO; | 	F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4; | ||||||
| 	F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE; | 	F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE; | ||||||
| 	F2FS_OPTION(sbi).compress_ext_cnt = 0; | 	F2FS_OPTION(sbi).compress_ext_cnt = 0; | ||||||
|  | 	F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON; | ||||||
| 
 | 
 | ||||||
| 	set_opt(sbi, BG_GC); |  | ||||||
| 	set_opt(sbi, INLINE_XATTR); | 	set_opt(sbi, INLINE_XATTR); | ||||||
| 	set_opt(sbi, INLINE_DATA); | 	set_opt(sbi, INLINE_DATA); | ||||||
| 	set_opt(sbi, INLINE_DENTRY); | 	set_opt(sbi, INLINE_DENTRY); | ||||||
|  | @ -1587,9 +1594,9 @@ static void default_options(struct f2fs_sb_info *sbi) | ||||||
| 	set_opt(sbi, FLUSH_MERGE); | 	set_opt(sbi, FLUSH_MERGE); | ||||||
| 	set_opt(sbi, DISCARD); | 	set_opt(sbi, DISCARD); | ||||||
| 	if (f2fs_sb_has_blkzoned(sbi)) | 	if (f2fs_sb_has_blkzoned(sbi)) | ||||||
| 		set_opt_mode(sbi, F2FS_MOUNT_LFS); | 		F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS; | ||||||
| 	else | 	else | ||||||
| 		set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE); | 		F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE; | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_F2FS_FS_XATTR | #ifdef CONFIG_F2FS_FS_XATTR | ||||||
| 	set_opt(sbi, XATTR_USER); | 	set_opt(sbi, XATTR_USER); | ||||||
|  | @ -1658,7 +1665,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi) | ||||||
| out_unlock: | out_unlock: | ||||||
| 	up_write(&sbi->gc_lock); | 	up_write(&sbi->gc_lock); | ||||||
| restore_flag: | restore_flag: | ||||||
| 	sbi->sb->s_flags = s_flags;	/* Restore MS_RDONLY status */ | 	sbi->sb->s_flags = s_flags;	/* Restore SB_RDONLY status */ | ||||||
| 	return err; | 	return err; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -1781,7 +1788,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) | ||||||
| 	 * or if background_gc = off is passed in mount | 	 * or if background_gc = off is passed in mount | ||||||
| 	 * option. Also sync the filesystem. | 	 * option. Also sync the filesystem. | ||||||
| 	 */ | 	 */ | ||||||
| 	if ((*flags & SB_RDONLY) || !test_opt(sbi, BG_GC)) { | 	if ((*flags & SB_RDONLY) || | ||||||
|  | 			F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF) { | ||||||
| 		if (sbi->gc_thread) { | 		if (sbi->gc_thread) { | ||||||
| 			f2fs_stop_gc_thread(sbi); | 			f2fs_stop_gc_thread(sbi); | ||||||
| 			need_restart_gc = true; | 			need_restart_gc = true; | ||||||
|  | @ -1886,7 +1894,8 @@ repeat: | ||||||
| 		page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS); | 		page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS); | ||||||
| 		if (IS_ERR(page)) { | 		if (IS_ERR(page)) { | ||||||
| 			if (PTR_ERR(page) == -ENOMEM) { | 			if (PTR_ERR(page) == -ENOMEM) { | ||||||
| 				congestion_wait(BLK_RW_ASYNC, HZ/50); | 				congestion_wait(BLK_RW_ASYNC, | ||||||
|  | 						DEFAULT_IO_TIMEOUT); | ||||||
| 				goto repeat; | 				goto repeat; | ||||||
| 			} | 			} | ||||||
| 			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); | 			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); | ||||||
|  | @ -1928,6 +1937,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type, | ||||||
| 	int offset = off & (sb->s_blocksize - 1); | 	int offset = off & (sb->s_blocksize - 1); | ||||||
| 	size_t towrite = len; | 	size_t towrite = len; | ||||||
| 	struct page *page; | 	struct page *page; | ||||||
|  | 	void *fsdata = NULL; | ||||||
| 	char *kaddr; | 	char *kaddr; | ||||||
| 	int err = 0; | 	int err = 0; | ||||||
| 	int tocopy; | 	int tocopy; | ||||||
|  | @ -1937,10 +1947,11 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type, | ||||||
| 								towrite); | 								towrite); | ||||||
| retry: | retry: | ||||||
| 		err = a_ops->write_begin(NULL, mapping, off, tocopy, 0, | 		err = a_ops->write_begin(NULL, mapping, off, tocopy, 0, | ||||||
| 							&page, NULL); | 							&page, &fsdata); | ||||||
| 		if (unlikely(err)) { | 		if (unlikely(err)) { | ||||||
| 			if (err == -ENOMEM) { | 			if (err == -ENOMEM) { | ||||||
| 				congestion_wait(BLK_RW_ASYNC, HZ/50); | 				congestion_wait(BLK_RW_ASYNC, | ||||||
|  | 						DEFAULT_IO_TIMEOUT); | ||||||
| 				goto retry; | 				goto retry; | ||||||
| 			} | 			} | ||||||
| 			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); | 			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); | ||||||
|  | @ -1953,7 +1964,7 @@ retry: | ||||||
| 		flush_dcache_page(page); | 		flush_dcache_page(page); | ||||||
| 
 | 
 | ||||||
| 		a_ops->write_end(NULL, mapping, off, tocopy, tocopy, | 		a_ops->write_end(NULL, mapping, off, tocopy, tocopy, | ||||||
| 						page, NULL); | 						page, fsdata); | ||||||
| 		offset = 0; | 		offset = 0; | ||||||
| 		towrite -= tocopy; | 		towrite -= tocopy; | ||||||
| 		off += tocopy; | 		off += tocopy; | ||||||
|  | @ -3457,12 +3468,17 @@ try_onemore: | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	/* init per sbi slab cache */ | ||||||
|  | 	err = f2fs_init_xattr_caches(sbi); | ||||||
|  | 	if (err) | ||||||
|  | 		goto free_io_dummy; | ||||||
|  | 
 | ||||||
| 	/* get an inode for meta space */ | 	/* get an inode for meta space */ | ||||||
| 	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); | 	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); | ||||||
| 	if (IS_ERR(sbi->meta_inode)) { | 	if (IS_ERR(sbi->meta_inode)) { | ||||||
| 		f2fs_err(sbi, "Failed to read F2FS meta data inode"); | 		f2fs_err(sbi, "Failed to read F2FS meta data inode"); | ||||||
| 		err = PTR_ERR(sbi->meta_inode); | 		err = PTR_ERR(sbi->meta_inode); | ||||||
| 		goto free_io_dummy; | 		goto free_xattr_cache; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	err = f2fs_get_valid_checkpoint(sbi); | 	err = f2fs_get_valid_checkpoint(sbi); | ||||||
|  | @ -3590,7 +3606,7 @@ try_onemore: | ||||||
| 			f2fs_err(sbi, "Cannot turn on quotas: error %d", err); | 			f2fs_err(sbi, "Cannot turn on quotas: error %d", err); | ||||||
| 	} | 	} | ||||||
| #endif | #endif | ||||||
| 	/* if there are nt orphan nodes free them */ | 	/* if there are any orphan inodes, free them */ | ||||||
| 	err = f2fs_recover_orphan_inodes(sbi); | 	err = f2fs_recover_orphan_inodes(sbi); | ||||||
| 	if (err) | 	if (err) | ||||||
| 		goto free_meta; | 		goto free_meta; | ||||||
|  | @ -3599,7 +3615,8 @@ try_onemore: | ||||||
| 		goto reset_checkpoint; | 		goto reset_checkpoint; | ||||||
| 
 | 
 | ||||||
| 	/* recover fsynced data */ | 	/* recover fsynced data */ | ||||||
| 	if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) { | 	if (!test_opt(sbi, DISABLE_ROLL_FORWARD) && | ||||||
|  | 			!test_opt(sbi, NORECOVERY)) { | ||||||
| 		/*
 | 		/*
 | ||||||
| 		 * mount should be failed, when device has readonly mode, and | 		 * mount should be failed, when device has readonly mode, and | ||||||
| 		 * previous checkpoint was not done by clean system shutdown. | 		 * previous checkpoint was not done by clean system shutdown. | ||||||
|  | @ -3665,7 +3682,7 @@ reset_checkpoint: | ||||||
| 	 * If filesystem is not mounted as read-only then | 	 * If filesystem is not mounted as read-only then | ||||||
| 	 * do start the gc_thread. | 	 * do start the gc_thread. | ||||||
| 	 */ | 	 */ | ||||||
| 	if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) { | 	if (F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF && !f2fs_readonly(sb)) { | ||||||
| 		/* After POR, we can run background GC thread.*/ | 		/* After POR, we can run background GC thread.*/ | ||||||
| 		err = f2fs_start_gc_thread(sbi); | 		err = f2fs_start_gc_thread(sbi); | ||||||
| 		if (err) | 		if (err) | ||||||
|  | @ -3734,6 +3751,8 @@ free_meta_inode: | ||||||
| 	make_bad_inode(sbi->meta_inode); | 	make_bad_inode(sbi->meta_inode); | ||||||
| 	iput(sbi->meta_inode); | 	iput(sbi->meta_inode); | ||||||
| 	sbi->meta_inode = NULL; | 	sbi->meta_inode = NULL; | ||||||
|  | free_xattr_cache: | ||||||
|  | 	f2fs_destroy_xattr_caches(sbi); | ||||||
| free_io_dummy: | free_io_dummy: | ||||||
| 	mempool_destroy(sbi->write_io_dummy); | 	mempool_destroy(sbi->write_io_dummy); | ||||||
| free_percpu: | free_percpu: | ||||||
|  |  | ||||||
|  | @ -109,47 +109,47 @@ static ssize_t features_show(struct f2fs_attr *a, | ||||||
| 		return sprintf(buf, "0\n"); | 		return sprintf(buf, "0\n"); | ||||||
| 
 | 
 | ||||||
| 	if (f2fs_sb_has_encrypt(sbi)) | 	if (f2fs_sb_has_encrypt(sbi)) | ||||||
| 		len += snprintf(buf, PAGE_SIZE - len, "%s", | 		len += scnprintf(buf, PAGE_SIZE - len, "%s", | ||||||
| 						"encryption"); | 						"encryption"); | ||||||
| 	if (f2fs_sb_has_blkzoned(sbi)) | 	if (f2fs_sb_has_blkzoned(sbi)) | ||||||
| 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", | 		len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s", | ||||||
| 				len ? ", " : "", "blkzoned"); | 				len ? ", " : "", "blkzoned"); | ||||||
| 	if (f2fs_sb_has_extra_attr(sbi)) | 	if (f2fs_sb_has_extra_attr(sbi)) | ||||||
| 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", | 		len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s", | ||||||
| 				len ? ", " : "", "extra_attr"); | 				len ? ", " : "", "extra_attr"); | ||||||
| 	if (f2fs_sb_has_project_quota(sbi)) | 	if (f2fs_sb_has_project_quota(sbi)) | ||||||
| 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", | 		len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s", | ||||||
| 				len ? ", " : "", "projquota"); | 				len ? ", " : "", "projquota"); | ||||||
| 	if (f2fs_sb_has_inode_chksum(sbi)) | 	if (f2fs_sb_has_inode_chksum(sbi)) | ||||||
| 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", | 		len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s", | ||||||
| 				len ? ", " : "", "inode_checksum"); | 				len ? ", " : "", "inode_checksum"); | ||||||
| 	if (f2fs_sb_has_flexible_inline_xattr(sbi)) | 	if (f2fs_sb_has_flexible_inline_xattr(sbi)) | ||||||
| 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", | 		len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s", | ||||||
| 				len ? ", " : "", "flexible_inline_xattr"); | 				len ? ", " : "", "flexible_inline_xattr"); | ||||||
| 	if (f2fs_sb_has_quota_ino(sbi)) | 	if (f2fs_sb_has_quota_ino(sbi)) | ||||||
| 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", | 		len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s", | ||||||
| 				len ? ", " : "", "quota_ino"); | 				len ? ", " : "", "quota_ino"); | ||||||
| 	if (f2fs_sb_has_inode_crtime(sbi)) | 	if (f2fs_sb_has_inode_crtime(sbi)) | ||||||
| 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", | 		len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s", | ||||||
| 				len ? ", " : "", "inode_crtime"); | 				len ? ", " : "", "inode_crtime"); | ||||||
| 	if (f2fs_sb_has_lost_found(sbi)) | 	if (f2fs_sb_has_lost_found(sbi)) | ||||||
| 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", | 		len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s", | ||||||
| 				len ? ", " : "", "lost_found"); | 				len ? ", " : "", "lost_found"); | ||||||
| 	if (f2fs_sb_has_verity(sbi)) | 	if (f2fs_sb_has_verity(sbi)) | ||||||
| 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", | 		len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s", | ||||||
| 				len ? ", " : "", "verity"); | 				len ? ", " : "", "verity"); | ||||||
| 	if (f2fs_sb_has_sb_chksum(sbi)) | 	if (f2fs_sb_has_sb_chksum(sbi)) | ||||||
| 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", | 		len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s", | ||||||
| 				len ? ", " : "", "sb_checksum"); | 				len ? ", " : "", "sb_checksum"); | ||||||
| 	if (f2fs_sb_has_casefold(sbi)) | 	if (f2fs_sb_has_casefold(sbi)) | ||||||
| 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", | 		len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s", | ||||||
| 				len ? ", " : "", "casefold"); | 				len ? ", " : "", "casefold"); | ||||||
| 	if (f2fs_sb_has_compression(sbi)) | 	if (f2fs_sb_has_compression(sbi)) | ||||||
| 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", | 		len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s", | ||||||
| 				len ? ", " : "", "compression"); | 				len ? ", " : "", "compression"); | ||||||
| 	len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", | 	len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s", | ||||||
| 				len ? ", " : "", "pin_file"); | 				len ? ", " : "", "pin_file"); | ||||||
| 	len += snprintf(buf + len, PAGE_SIZE - len, "\n"); | 	len += scnprintf(buf + len, PAGE_SIZE - len, "\n"); | ||||||
| 	return len; | 	return len; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -185,6 +185,12 @@ static ssize_t encoding_show(struct f2fs_attr *a, | ||||||
| 	return sprintf(buf, "(none)"); | 	return sprintf(buf, "(none)"); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | static ssize_t mounted_time_sec_show(struct f2fs_attr *a, | ||||||
|  | 		struct f2fs_sb_info *sbi, char *buf) | ||||||
|  | { | ||||||
|  | 	return sprintf(buf, "%llu", SIT_I(sbi)->mounted_time); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| #ifdef CONFIG_F2FS_STAT_FS | #ifdef CONFIG_F2FS_STAT_FS | ||||||
| static ssize_t moved_blocks_foreground_show(struct f2fs_attr *a, | static ssize_t moved_blocks_foreground_show(struct f2fs_attr *a, | ||||||
| 				struct f2fs_sb_info *sbi, char *buf) | 				struct f2fs_sb_info *sbi, char *buf) | ||||||
|  | @ -233,16 +239,16 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a, | ||||||
| 		int hot_count = sbi->raw_super->hot_ext_count; | 		int hot_count = sbi->raw_super->hot_ext_count; | ||||||
| 		int len = 0, i; | 		int len = 0, i; | ||||||
| 
 | 
 | ||||||
| 		len += snprintf(buf + len, PAGE_SIZE - len, | 		len += scnprintf(buf + len, PAGE_SIZE - len, | ||||||
| 						"cold file extension:\n"); | 						"cold file extension:\n"); | ||||||
| 		for (i = 0; i < cold_count; i++) | 		for (i = 0; i < cold_count; i++) | ||||||
| 			len += snprintf(buf + len, PAGE_SIZE - len, "%s\n", | 			len += scnprintf(buf + len, PAGE_SIZE - len, "%s\n", | ||||||
| 								extlist[i]); | 								extlist[i]); | ||||||
| 
 | 
 | ||||||
| 		len += snprintf(buf + len, PAGE_SIZE - len, | 		len += scnprintf(buf + len, PAGE_SIZE - len, | ||||||
| 						"hot file extension:\n"); | 						"hot file extension:\n"); | ||||||
| 		for (i = cold_count; i < cold_count + hot_count; i++) | 		for (i = cold_count; i < cold_count + hot_count; i++) | ||||||
| 			len += snprintf(buf + len, PAGE_SIZE - len, "%s\n", | 			len += scnprintf(buf + len, PAGE_SIZE - len, "%s\n", | ||||||
| 								extlist[i]); | 								extlist[i]); | ||||||
| 		return len; | 		return len; | ||||||
| 	} | 	} | ||||||
|  | @ -544,6 +550,7 @@ F2FS_GENERAL_RO_ATTR(features); | ||||||
| F2FS_GENERAL_RO_ATTR(current_reserved_blocks); | F2FS_GENERAL_RO_ATTR(current_reserved_blocks); | ||||||
| F2FS_GENERAL_RO_ATTR(unusable); | F2FS_GENERAL_RO_ATTR(unusable); | ||||||
| F2FS_GENERAL_RO_ATTR(encoding); | F2FS_GENERAL_RO_ATTR(encoding); | ||||||
|  | F2FS_GENERAL_RO_ATTR(mounted_time_sec); | ||||||
| #ifdef CONFIG_F2FS_STAT_FS | #ifdef CONFIG_F2FS_STAT_FS | ||||||
| F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_foreground_calls, cp_count); | F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_foreground_calls, cp_count); | ||||||
| F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_background_calls, bg_cp_count); | F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_background_calls, bg_cp_count); | ||||||
|  | @ -573,7 +580,9 @@ F2FS_FEATURE_RO_ATTR(verity, FEAT_VERITY); | ||||||
| #endif | #endif | ||||||
| F2FS_FEATURE_RO_ATTR(sb_checksum, FEAT_SB_CHECKSUM); | F2FS_FEATURE_RO_ATTR(sb_checksum, FEAT_SB_CHECKSUM); | ||||||
| F2FS_FEATURE_RO_ATTR(casefold, FEAT_CASEFOLD); | F2FS_FEATURE_RO_ATTR(casefold, FEAT_CASEFOLD); | ||||||
|  | #ifdef CONFIG_F2FS_FS_COMPRESSION | ||||||
| F2FS_FEATURE_RO_ATTR(compression, FEAT_COMPRESSION); | F2FS_FEATURE_RO_ATTR(compression, FEAT_COMPRESSION); | ||||||
|  | #endif | ||||||
| 
 | 
 | ||||||
| #define ATTR_LIST(name) (&f2fs_attr_##name.attr) | #define ATTR_LIST(name) (&f2fs_attr_##name.attr) | ||||||
| static struct attribute *f2fs_attrs[] = { | static struct attribute *f2fs_attrs[] = { | ||||||
|  | @ -621,6 +630,7 @@ static struct attribute *f2fs_attrs[] = { | ||||||
| 	ATTR_LIST(reserved_blocks), | 	ATTR_LIST(reserved_blocks), | ||||||
| 	ATTR_LIST(current_reserved_blocks), | 	ATTR_LIST(current_reserved_blocks), | ||||||
| 	ATTR_LIST(encoding), | 	ATTR_LIST(encoding), | ||||||
|  | 	ATTR_LIST(mounted_time_sec), | ||||||
| #ifdef CONFIG_F2FS_STAT_FS | #ifdef CONFIG_F2FS_STAT_FS | ||||||
| 	ATTR_LIST(cp_foreground_calls), | 	ATTR_LIST(cp_foreground_calls), | ||||||
| 	ATTR_LIST(cp_background_calls), | 	ATTR_LIST(cp_background_calls), | ||||||
|  | @ -654,7 +664,9 @@ static struct attribute *f2fs_feat_attrs[] = { | ||||||
| #endif | #endif | ||||||
| 	ATTR_LIST(sb_checksum), | 	ATTR_LIST(sb_checksum), | ||||||
| 	ATTR_LIST(casefold), | 	ATTR_LIST(casefold), | ||||||
|  | #ifdef CONFIG_F2FS_FS_COMPRESSION | ||||||
| 	ATTR_LIST(compression), | 	ATTR_LIST(compression), | ||||||
|  | #endif | ||||||
| 	NULL, | 	NULL, | ||||||
| }; | }; | ||||||
| ATTRIBUTE_GROUPS(f2fs_feat); | ATTRIBUTE_GROUPS(f2fs_feat); | ||||||
|  |  | ||||||
|  | @ -23,6 +23,25 @@ | ||||||
| #include "xattr.h" | #include "xattr.h" | ||||||
| #include "segment.h" | #include "segment.h" | ||||||
| 
 | 
 | ||||||
|  | static void *xattr_alloc(struct f2fs_sb_info *sbi, int size, bool *is_inline) | ||||||
|  | { | ||||||
|  | 	if (likely(size == sbi->inline_xattr_slab_size)) { | ||||||
|  | 		*is_inline = true; | ||||||
|  | 		return kmem_cache_zalloc(sbi->inline_xattr_slab, GFP_NOFS); | ||||||
|  | 	} | ||||||
|  | 	*is_inline = false; | ||||||
|  | 	return f2fs_kzalloc(sbi, size, GFP_NOFS); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static void xattr_free(struct f2fs_sb_info *sbi, void *xattr_addr, | ||||||
|  | 							bool is_inline) | ||||||
|  | { | ||||||
|  | 	if (is_inline) | ||||||
|  | 		kmem_cache_free(sbi->inline_xattr_slab, xattr_addr); | ||||||
|  | 	else | ||||||
|  | 		kvfree(xattr_addr); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| static int f2fs_xattr_generic_get(const struct xattr_handler *handler, | static int f2fs_xattr_generic_get(const struct xattr_handler *handler, | ||||||
| 		struct dentry *unused, struct inode *inode, | 		struct dentry *unused, struct inode *inode, | ||||||
| 		const char *name, void *buffer, size_t size) | 		const char *name, void *buffer, size_t size) | ||||||
|  | @ -301,7 +320,8 @@ static int read_xattr_block(struct inode *inode, void *txattr_addr) | ||||||
| static int lookup_all_xattrs(struct inode *inode, struct page *ipage, | static int lookup_all_xattrs(struct inode *inode, struct page *ipage, | ||||||
| 				unsigned int index, unsigned int len, | 				unsigned int index, unsigned int len, | ||||||
| 				const char *name, struct f2fs_xattr_entry **xe, | 				const char *name, struct f2fs_xattr_entry **xe, | ||||||
| 				void **base_addr, int *base_size) | 				void **base_addr, int *base_size, | ||||||
|  | 				bool *is_inline) | ||||||
| { | { | ||||||
| 	void *cur_addr, *txattr_addr, *last_txattr_addr; | 	void *cur_addr, *txattr_addr, *last_txattr_addr; | ||||||
| 	void *last_addr = NULL; | 	void *last_addr = NULL; | ||||||
|  | @ -312,12 +332,12 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage, | ||||||
| 	if (!xnid && !inline_size) | 	if (!xnid && !inline_size) | ||||||
| 		return -ENODATA; | 		return -ENODATA; | ||||||
| 
 | 
 | ||||||
| 	*base_size = XATTR_SIZE(xnid, inode) + XATTR_PADDING_SIZE; | 	*base_size = XATTR_SIZE(inode) + XATTR_PADDING_SIZE; | ||||||
| 	txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode), *base_size, GFP_NOFS); | 	txattr_addr = xattr_alloc(F2FS_I_SB(inode), *base_size, is_inline); | ||||||
| 	if (!txattr_addr) | 	if (!txattr_addr) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
| 	last_txattr_addr = (void *)txattr_addr + XATTR_SIZE(xnid, inode); | 	last_txattr_addr = (void *)txattr_addr + XATTR_SIZE(inode); | ||||||
| 
 | 
 | ||||||
| 	/* read from inline xattr */ | 	/* read from inline xattr */ | ||||||
| 	if (inline_size) { | 	if (inline_size) { | ||||||
|  | @ -362,7 +382,7 @@ check: | ||||||
| 	*base_addr = txattr_addr; | 	*base_addr = txattr_addr; | ||||||
| 	return 0; | 	return 0; | ||||||
| out: | out: | ||||||
| 	kvfree(txattr_addr); | 	xattr_free(F2FS_I_SB(inode), txattr_addr, *is_inline); | ||||||
| 	return err; | 	return err; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -499,6 +519,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name, | ||||||
| 	unsigned int size, len; | 	unsigned int size, len; | ||||||
| 	void *base_addr = NULL; | 	void *base_addr = NULL; | ||||||
| 	int base_size; | 	int base_size; | ||||||
|  | 	bool is_inline; | ||||||
| 
 | 
 | ||||||
| 	if (name == NULL) | 	if (name == NULL) | ||||||
| 		return -EINVAL; | 		return -EINVAL; | ||||||
|  | @ -509,7 +530,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name, | ||||||
| 
 | 
 | ||||||
| 	down_read(&F2FS_I(inode)->i_xattr_sem); | 	down_read(&F2FS_I(inode)->i_xattr_sem); | ||||||
| 	error = lookup_all_xattrs(inode, ipage, index, len, name, | 	error = lookup_all_xattrs(inode, ipage, index, len, name, | ||||||
| 				&entry, &base_addr, &base_size); | 				&entry, &base_addr, &base_size, &is_inline); | ||||||
| 	up_read(&F2FS_I(inode)->i_xattr_sem); | 	up_read(&F2FS_I(inode)->i_xattr_sem); | ||||||
| 	if (error) | 	if (error) | ||||||
| 		return error; | 		return error; | ||||||
|  | @ -532,14 +553,13 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name, | ||||||
| 	} | 	} | ||||||
| 	error = size; | 	error = size; | ||||||
| out: | out: | ||||||
| 	kvfree(base_addr); | 	xattr_free(F2FS_I_SB(inode), base_addr, is_inline); | ||||||
| 	return error; | 	return error; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) | ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) | ||||||
| { | { | ||||||
| 	struct inode *inode = d_inode(dentry); | 	struct inode *inode = d_inode(dentry); | ||||||
| 	nid_t xnid = F2FS_I(inode)->i_xattr_nid; |  | ||||||
| 	struct f2fs_xattr_entry *entry; | 	struct f2fs_xattr_entry *entry; | ||||||
| 	void *base_addr, *last_base_addr; | 	void *base_addr, *last_base_addr; | ||||||
| 	int error = 0; | 	int error = 0; | ||||||
|  | @ -551,7 +571,7 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) | ||||||
| 	if (error) | 	if (error) | ||||||
| 		return error; | 		return error; | ||||||
| 
 | 
 | ||||||
| 	last_base_addr = (void *)base_addr + XATTR_SIZE(xnid, inode); | 	last_base_addr = (void *)base_addr + XATTR_SIZE(inode); | ||||||
| 
 | 
 | ||||||
| 	list_for_each_xattr(entry, base_addr) { | 	list_for_each_xattr(entry, base_addr) { | ||||||
| 		const struct xattr_handler *handler = | 		const struct xattr_handler *handler = | ||||||
|  | @ -609,7 +629,6 @@ static int __f2fs_setxattr(struct inode *inode, int index, | ||||||
| { | { | ||||||
| 	struct f2fs_xattr_entry *here, *last; | 	struct f2fs_xattr_entry *here, *last; | ||||||
| 	void *base_addr, *last_base_addr; | 	void *base_addr, *last_base_addr; | ||||||
| 	nid_t xnid = F2FS_I(inode)->i_xattr_nid; |  | ||||||
| 	int found, newsize; | 	int found, newsize; | ||||||
| 	size_t len; | 	size_t len; | ||||||
| 	__u32 new_hsize; | 	__u32 new_hsize; | ||||||
|  | @ -633,7 +652,7 @@ static int __f2fs_setxattr(struct inode *inode, int index, | ||||||
| 	if (error) | 	if (error) | ||||||
| 		return error; | 		return error; | ||||||
| 
 | 
 | ||||||
| 	last_base_addr = (void *)base_addr + XATTR_SIZE(xnid, inode); | 	last_base_addr = (void *)base_addr + XATTR_SIZE(inode); | ||||||
| 
 | 
 | ||||||
| 	/* find entry with wanted name. */ | 	/* find entry with wanted name. */ | ||||||
| 	here = __find_xattr(base_addr, last_base_addr, index, len, name); | 	here = __find_xattr(base_addr, last_base_addr, index, len, name); | ||||||
|  | @ -758,14 +777,34 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name, | ||||||
| 	f2fs_balance_fs(sbi, true); | 	f2fs_balance_fs(sbi, true); | ||||||
| 
 | 
 | ||||||
| 	f2fs_lock_op(sbi); | 	f2fs_lock_op(sbi); | ||||||
| 	/* protect xattr_ver */ |  | ||||||
| 	down_write(&F2FS_I(inode)->i_sem); |  | ||||||
| 	down_write(&F2FS_I(inode)->i_xattr_sem); | 	down_write(&F2FS_I(inode)->i_xattr_sem); | ||||||
| 	err = __f2fs_setxattr(inode, index, name, value, size, ipage, flags); | 	err = __f2fs_setxattr(inode, index, name, value, size, ipage, flags); | ||||||
| 	up_write(&F2FS_I(inode)->i_xattr_sem); | 	up_write(&F2FS_I(inode)->i_xattr_sem); | ||||||
| 	up_write(&F2FS_I(inode)->i_sem); |  | ||||||
| 	f2fs_unlock_op(sbi); | 	f2fs_unlock_op(sbi); | ||||||
| 
 | 
 | ||||||
| 	f2fs_update_time(sbi, REQ_TIME); | 	f2fs_update_time(sbi, REQ_TIME); | ||||||
| 	return err; | 	return err; | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | int f2fs_init_xattr_caches(struct f2fs_sb_info *sbi) | ||||||
|  | { | ||||||
|  | 	dev_t dev = sbi->sb->s_bdev->bd_dev; | ||||||
|  | 	char slab_name[32]; | ||||||
|  | 
 | ||||||
|  | 	sprintf(slab_name, "f2fs_xattr_entry-%u:%u", MAJOR(dev), MINOR(dev)); | ||||||
|  | 
 | ||||||
|  | 	sbi->inline_xattr_slab_size = F2FS_OPTION(sbi).inline_xattr_size * | ||||||
|  | 					sizeof(__le32) + XATTR_PADDING_SIZE; | ||||||
|  | 
 | ||||||
|  | 	sbi->inline_xattr_slab = f2fs_kmem_cache_create(slab_name, | ||||||
|  | 					sbi->inline_xattr_slab_size); | ||||||
|  | 	if (!sbi->inline_xattr_slab) | ||||||
|  | 		return -ENOMEM; | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | void f2fs_destroy_xattr_caches(struct f2fs_sb_info *sbi) | ||||||
|  | { | ||||||
|  | 	kmem_cache_destroy(sbi->inline_xattr_slab); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | @ -49,7 +49,7 @@ struct f2fs_xattr_entry { | ||||||
| 	__u8    e_name_index; | 	__u8    e_name_index; | ||||||
| 	__u8    e_name_len; | 	__u8    e_name_len; | ||||||
| 	__le16  e_value_size;   /* size of attribute value */ | 	__le16  e_value_size;   /* size of attribute value */ | ||||||
| 	char    e_name[0];      /* attribute name */ | 	char    e_name[];      /* attribute name */ | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| #define XATTR_HDR(ptr)		((struct f2fs_xattr_header *)(ptr)) | #define XATTR_HDR(ptr)		((struct f2fs_xattr_header *)(ptr)) | ||||||
|  | @ -73,7 +73,8 @@ struct f2fs_xattr_entry { | ||||||
| 				entry = XATTR_NEXT_ENTRY(entry)) | 				entry = XATTR_NEXT_ENTRY(entry)) | ||||||
| #define VALID_XATTR_BLOCK_SIZE	(PAGE_SIZE - sizeof(struct node_footer)) | #define VALID_XATTR_BLOCK_SIZE	(PAGE_SIZE - sizeof(struct node_footer)) | ||||||
| #define XATTR_PADDING_SIZE	(sizeof(__u32)) | #define XATTR_PADDING_SIZE	(sizeof(__u32)) | ||||||
| #define XATTR_SIZE(x,i)		(((x) ? VALID_XATTR_BLOCK_SIZE : 0) +	\ | #define XATTR_SIZE(i)		((F2FS_I(i)->i_xattr_nid ?		\ | ||||||
|  | 					VALID_XATTR_BLOCK_SIZE : 0) +	\ | ||||||
| 						(inline_xattr_size(i))) | 						(inline_xattr_size(i))) | ||||||
| #define MIN_OFFSET(i)		XATTR_ALIGN(inline_xattr_size(i) +	\ | #define MIN_OFFSET(i)		XATTR_ALIGN(inline_xattr_size(i) +	\ | ||||||
| 						VALID_XATTR_BLOCK_SIZE) | 						VALID_XATTR_BLOCK_SIZE) | ||||||
|  | @ -130,6 +131,8 @@ extern int f2fs_setxattr(struct inode *, int, const char *, | ||||||
| extern int f2fs_getxattr(struct inode *, int, const char *, void *, | extern int f2fs_getxattr(struct inode *, int, const char *, void *, | ||||||
| 						size_t, struct page *); | 						size_t, struct page *); | ||||||
| extern ssize_t f2fs_listxattr(struct dentry *, char *, size_t); | extern ssize_t f2fs_listxattr(struct dentry *, char *, size_t); | ||||||
|  | extern int f2fs_init_xattr_caches(struct f2fs_sb_info *); | ||||||
|  | extern void f2fs_destroy_xattr_caches(struct f2fs_sb_info *); | ||||||
| #else | #else | ||||||
| 
 | 
 | ||||||
| #define f2fs_xattr_handlers	NULL | #define f2fs_xattr_handlers	NULL | ||||||
|  | @ -150,6 +153,8 @@ static inline ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, | ||||||
| { | { | ||||||
| 	return -EOPNOTSUPP; | 	return -EOPNOTSUPP; | ||||||
| } | } | ||||||
|  | static inline int f2fs_init_xattr_caches(struct f2fs_sb_info *sbi) { return 0; } | ||||||
|  | static inline void f2fs_destroy_xattr_caches(struct f2fs_sb_info *sbi) { } | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_F2FS_FS_SECURITY | #ifdef CONFIG_F2FS_FS_SECURITY | ||||||
|  |  | ||||||
|  | @ -125,6 +125,7 @@ struct f2fs_super_block { | ||||||
| /*
 | /*
 | ||||||
|  * For checkpoint |  * For checkpoint | ||||||
|  */ |  */ | ||||||
|  | #define CP_RESIZEFS_FLAG		0x00004000 | ||||||
| #define CP_DISABLED_QUICK_FLAG		0x00002000 | #define CP_DISABLED_QUICK_FLAG		0x00002000 | ||||||
| #define CP_DISABLED_FLAG		0x00001000 | #define CP_DISABLED_FLAG		0x00001000 | ||||||
| #define CP_QUOTA_NEED_FSCK_FLAG		0x00000800 | #define CP_QUOTA_NEED_FSCK_FLAG		0x00000800 | ||||||
|  |  | ||||||
|  | @ -153,7 +153,8 @@ TRACE_DEFINE_ENUM(CP_PAUSE); | ||||||
| #define show_compress_algorithm(type)					\ | #define show_compress_algorithm(type)					\ | ||||||
| 	__print_symbolic(type,						\ | 	__print_symbolic(type,						\ | ||||||
| 		{ COMPRESS_LZO,		"LZO" },			\ | 		{ COMPRESS_LZO,		"LZO" },			\ | ||||||
| 		{ COMPRESS_LZ4,		"LZ4" }) | 		{ COMPRESS_LZ4,		"LZ4" },			\ | ||||||
|  | 		{ COMPRESS_ZSTD,	"ZSTD" }) | ||||||
| 
 | 
 | ||||||
| struct f2fs_sb_info; | struct f2fs_sb_info; | ||||||
| struct f2fs_io_info; | struct f2fs_io_info; | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		
		Reference in a new issue
	
	 Linus Torvalds
						Linus Torvalds