|
|
5c8bd1f |
From 32ebffd3bbb4162da5ff88f9a35dd32d0a28ea70 Mon Sep 17 00:00:00 2001
|
|
|
5c8bd1f |
From: Jan Kara <jack@suse.com>
|
|
|
5c8bd1f |
Date: Mon, 7 Dec 2015 14:31:11 -0500
|
|
|
5c8bd1f |
Subject: [PATCH 3/4] ext4: fix races between buffered IO and collapse / insert
|
|
|
5c8bd1f |
range
|
|
|
5c8bd1f |
|
|
|
5c8bd1f |
Current code implementing FALLOC_FL_COLLAPSE_RANGE and
|
|
|
5c8bd1f |
FALLOC_FL_INSERT_RANGE is prone to races with buffered writes and page
|
|
|
5c8bd1f |
faults. If buffered write or write via mmap manages to squeeze between
|
|
|
5c8bd1f |
filemap_write_and_wait_range() and truncate_pagecache() in the fallocate
|
|
|
5c8bd1f |
implementations, the written data is simply discarded by
|
|
|
5c8bd1f |
truncate_pagecache() although it should have been shifted.
|
|
|
5c8bd1f |
|
|
|
5c8bd1f |
Fix the problem by moving filemap_write_and_wait_range() call inside
|
|
|
5c8bd1f |
i_mutex and i_mmap_sem. That way we are protected against races with
|
|
|
5c8bd1f |
both buffered writes and page faults.
|
|
|
5c8bd1f |
|
|
|
5c8bd1f |
Signed-off-by: Jan Kara <jack@suse.com>
|
|
|
5c8bd1f |
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
|
|
|
5c8bd1f |
---
|
|
|
5c8bd1f |
fs/ext4/extents.c | 59 +++++++++++++++++++++++++++++--------------------------
|
|
|
5c8bd1f |
1 file changed, 31 insertions(+), 28 deletions(-)
|
|
|
5c8bd1f |
|
|
|
5c8bd1f |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
|
|
|
5c8bd1f |
index 65b5ada2833f..4b105c96df08 100644
|
|
|
5c8bd1f |
--- a/fs/ext4/extents.c
|
|
|
5c8bd1f |
+++ b/fs/ext4/extents.c
|
|
|
5c8bd1f |
@@ -5487,21 +5487,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
|
|
|
5c8bd1f |
return ret;
|
|
|
5c8bd1f |
}
|
|
|
5c8bd1f |
|
|
|
5c8bd1f |
- /*
|
|
|
5c8bd1f |
- * Need to round down offset to be aligned with page size boundary
|
|
|
5c8bd1f |
- * for page size > block size.
|
|
|
5c8bd1f |
- */
|
|
|
5c8bd1f |
- ioffset = round_down(offset, PAGE_SIZE);
|
|
|
5c8bd1f |
-
|
|
|
5c8bd1f |
- /* Write out all dirty pages */
|
|
|
5c8bd1f |
- ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
|
|
|
5c8bd1f |
- LLONG_MAX);
|
|
|
5c8bd1f |
- if (ret)
|
|
|
5c8bd1f |
- return ret;
|
|
|
5c8bd1f |
-
|
|
|
5c8bd1f |
- /* Take mutex lock */
|
|
|
5c8bd1f |
mutex_lock(&inode->i_mutex);
|
|
|
5c8bd1f |
-
|
|
|
5c8bd1f |
/*
|
|
|
5c8bd1f |
* There is no need to overlap collapse range with EOF, in which case
|
|
|
5c8bd1f |
* it is effectively a truncate operation
|
|
|
5c8bd1f |
@@ -5526,6 +5512,27 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
|
|
|
5c8bd1f |
* page cache.
|
|
|
5c8bd1f |
*/
|
|
|
5c8bd1f |
down_write(&EXT4_I(inode)->i_mmap_sem);
|
|
|
5c8bd1f |
+ /*
|
|
|
5c8bd1f |
+ * Need to round down offset to be aligned with page size boundary
|
|
|
5c8bd1f |
+ * for page size > block size.
|
|
|
5c8bd1f |
+ */
|
|
|
5c8bd1f |
+ ioffset = round_down(offset, PAGE_SIZE);
|
|
|
5c8bd1f |
+ /*
|
|
|
5c8bd1f |
+ * Write tail of the last page before removed range since it will get
|
|
|
5c8bd1f |
+ * removed from the page cache below.
|
|
|
5c8bd1f |
+ */
|
|
|
5c8bd1f |
+ ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset);
|
|
|
5c8bd1f |
+ if (ret)
|
|
|
5c8bd1f |
+ goto out_mmap;
|
|
|
5c8bd1f |
+ /*
|
|
|
5c8bd1f |
+ * Write data that will be shifted to preserve them when discarding
|
|
|
5c8bd1f |
+ * page cache below. We are also protected from pages becoming dirty
|
|
|
5c8bd1f |
+ * by i_mmap_sem.
|
|
|
5c8bd1f |
+ */
|
|
|
5c8bd1f |
+ ret = filemap_write_and_wait_range(inode->i_mapping, offset + len,
|
|
|
5c8bd1f |
+ LLONG_MAX);
|
|
|
5c8bd1f |
+ if (ret)
|
|
|
5c8bd1f |
+ goto out_mmap;
|
|
|
5c8bd1f |
truncate_pagecache(inode, ioffset);
|
|
|
5c8bd1f |
|
|
|
5c8bd1f |
credits = ext4_writepage_trans_blocks(inode);
|
|
|
5c8bd1f |
@@ -5626,21 +5633,7 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
|
|
|
5c8bd1f |
return ret;
|
|
|
5c8bd1f |
}
|
|
|
5c8bd1f |
|
|
|
5c8bd1f |
- /*
|
|
|
5c8bd1f |
- * Need to round down to align start offset to page size boundary
|
|
|
5c8bd1f |
- * for page size > block size.
|
|
|
5c8bd1f |
- */
|
|
|
5c8bd1f |
- ioffset = round_down(offset, PAGE_SIZE);
|
|
|
5c8bd1f |
-
|
|
|
5c8bd1f |
- /* Write out all dirty pages */
|
|
|
5c8bd1f |
- ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
|
|
|
5c8bd1f |
- LLONG_MAX);
|
|
|
5c8bd1f |
- if (ret)
|
|
|
5c8bd1f |
- return ret;
|
|
|
5c8bd1f |
-
|
|
|
5c8bd1f |
- /* Take mutex lock */
|
|
|
5c8bd1f |
mutex_lock(&inode->i_mutex);
|
|
|
5c8bd1f |
-
|
|
|
5c8bd1f |
/* Currently just for extent based files */
|
|
|
5c8bd1f |
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
|
|
|
5c8bd1f |
ret = -EOPNOTSUPP;
|
|
|
5c8bd1f |
@@ -5668,6 +5661,16 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
|
|
|
5c8bd1f |
* page cache.
|
|
|
5c8bd1f |
*/
|
|
|
5c8bd1f |
down_write(&EXT4_I(inode)->i_mmap_sem);
|
|
|
5c8bd1f |
+ /*
|
|
|
5c8bd1f |
+ * Need to round down to align start offset to page size boundary
|
|
|
5c8bd1f |
+ * for page size > block size.
|
|
|
5c8bd1f |
+ */
|
|
|
5c8bd1f |
+ ioffset = round_down(offset, PAGE_SIZE);
|
|
|
5c8bd1f |
+ /* Write out all dirty pages */
|
|
|
5c8bd1f |
+ ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
|
|
|
5c8bd1f |
+ LLONG_MAX);
|
|
|
5c8bd1f |
+ if (ret)
|
|
|
5c8bd1f |
+ goto out_mmap;
|
|
|
5c8bd1f |
truncate_pagecache(inode, ioffset);
|
|
|
5c8bd1f |
|
|
|
5c8bd1f |
credits = ext4_writepage_trans_blocks(inode);
|
|
|
5c8bd1f |
--
|
|
|
5c8bd1f |
2.5.5
|
|
|
5c8bd1f |
|