You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
4862 lines
132 KiB
4862 lines
132 KiB
/* |
|
* Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com |
|
* Written by Alex Tomas <alex@clusterfs.com> |
|
* |
|
* Architecture independence: |
|
* Copyright (c) 2005, Bull S.A. |
|
* Written by Pierre Peiffer <pierre.peiffer@bull.net> |
|
* |
|
* This program is free software; you can redistribute it and/or modify |
|
* it under the terms of the GNU General Public License version 2 as |
|
* published by the Free Software Foundation. |
|
* |
|
* This program is distributed in the hope that it will be useful, |
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
* GNU General Public License for more details. |
|
* |
|
* You should have received a copy of the GNU General Public Licens |
|
* along with this program; if not, write to the Free Software |
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- |
|
*/ |
|
|
|
/* |
|
* Extents support for EXT4 |
|
* |
|
* TODO: |
|
* - ext4*_error() should be used in some situations |
|
* - analyze all BUG()/BUG_ON(), use -EIO where appropriate |
|
* - smart tree reduction |
|
*/ |
|
|
|
#include <linux/fs.h> |
|
#include <linux/time.h> |
|
#include <linux/jbd2.h> |
|
#include <linux/highuid.h> |
|
#include <linux/pagemap.h> |
|
#include <linux/quotaops.h> |
|
#include <linux/string.h> |
|
#include <linux/slab.h> |
|
#include <linux/falloc.h> |
|
#include <asm/uaccess.h> |
|
#include <linux/fiemap.h> |
|
#include "ext4_jbd2.h" |
|
#include "ext4_extents.h" |
|
#include "xattr.h" |
|
|
|
#include <trace/events/ext4.h> |
|
|
|
/* |
|
* used by extent splitting. |
|
*/ |
|
#define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ |
|
due to ENOSPC */ |
|
#define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ |
|
#define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ |
|
|
|
#define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */ |
|
#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */ |
|
|
|
static __le32 ext4_extent_block_csum(struct inode *inode, |
|
struct ext4_extent_header *eh) |
|
{ |
|
struct ext4_inode_info *ei = EXT4_I(inode); |
|
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
|
__u32 csum; |
|
|
|
csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, |
|
EXT4_EXTENT_TAIL_OFFSET(eh)); |
|
return cpu_to_le32(csum); |
|
} |
|
|
|
static int ext4_extent_block_csum_verify(struct inode *inode, |
|
struct ext4_extent_header *eh) |
|
{ |
|
struct ext4_extent_tail *et; |
|
|
|
if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, |
|
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) |
|
return 1; |
|
|
|
et = find_ext4_extent_tail(eh); |
|
if (et->et_checksum != ext4_extent_block_csum(inode, eh)) |
|
return 0; |
|
return 1; |
|
} |
|
|
|
static void ext4_extent_block_csum_set(struct inode *inode, |
|
struct ext4_extent_header *eh) |
|
{ |
|
struct ext4_extent_tail *et; |
|
|
|
if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, |
|
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) |
|
return; |
|
|
|
et = find_ext4_extent_tail(eh); |
|
et->et_checksum = ext4_extent_block_csum(inode, eh); |
|
} |
|
|
|
static int ext4_split_extent(handle_t *handle, |
|
struct inode *inode, |
|
struct ext4_ext_path *path, |
|
struct ext4_map_blocks *map, |
|
int split_flag, |
|
int flags); |
|
|
|
static int ext4_split_extent_at(handle_t *handle, |
|
struct inode *inode, |
|
struct ext4_ext_path *path, |
|
ext4_lblk_t split, |
|
int split_flag, |
|
int flags); |
|
|
|
static int ext4_find_delayed_extent(struct inode *inode, |
|
struct ext4_ext_cache *newex); |
|
|
|
static int ext4_ext_truncate_extend_restart(handle_t *handle, |
|
struct inode *inode, |
|
int needed) |
|
{ |
|
int err; |
|
|
|
if (!ext4_handle_valid(handle)) |
|
return 0; |
|
if (handle->h_buffer_credits > needed) |
|
return 0; |
|
err = ext4_journal_extend(handle, needed); |
|
if (err <= 0) |
|
return err; |
|
err = ext4_truncate_restart_trans(handle, inode, needed); |
|
if (err == 0) |
|
err = -EAGAIN; |
|
|
|
return err; |
|
} |
|
|
|
/* |
|
* could return: |
|
* - EROFS |
|
* - ENOMEM |
|
*/ |
|
static int ext4_ext_get_access(handle_t *handle, struct inode *inode, |
|
struct ext4_ext_path *path) |
|
{ |
|
if (path->p_bh) { |
|
/* path points to block */ |
|
return ext4_journal_get_write_access(handle, path->p_bh); |
|
} |
|
/* path points to leaf/index in inode body */ |
|
/* we use in-core data, no need to protect them */ |
|
return 0; |
|
} |
|
|
|
/* |
|
* could return: |
|
* - EROFS |
|
* - ENOMEM |
|
* - EIO |
|
*/ |
|
#define ext4_ext_dirty(handle, inode, path) \ |
|
__ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path)) |
|
static int __ext4_ext_dirty(const char *where, unsigned int line, |
|
handle_t *handle, struct inode *inode, |
|
struct ext4_ext_path *path) |
|
{ |
|
int err; |
|
if (path->p_bh) { |
|
ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); |
|
/* path points to block */ |
|
err = __ext4_handle_dirty_metadata(where, line, handle, |
|
inode, path->p_bh); |
|
} else { |
|
/* path points to leaf/index in inode body */ |
|
err = ext4_mark_inode_dirty(handle, inode); |
|
} |
|
return err; |
|
} |
|
|
|
static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, |
|
struct ext4_ext_path *path, |
|
ext4_lblk_t block) |
|
{ |
|
if (path) { |
|
int depth = path->p_depth; |
|
struct ext4_extent *ex; |
|
|
|
/* |
|
* Try to predict block placement assuming that we are |
|
* filling in a file which will eventually be |
|
* non-sparse --- i.e., in the case of libbfd writing |
|
* an ELF object sections out-of-order but in a way |
|
* the eventually results in a contiguous object or |
|
* executable file, or some database extending a table |
|
* space file. However, this is actually somewhat |
|
* non-ideal if we are writing a sparse file such as |
|
* qemu or KVM writing a raw image file that is going |
|
* to stay fairly sparse, since it will end up |
|
* fragmenting the file system's free space. Maybe we |
|
* should have some hueristics or some way to allow |
|
* userspace to pass a hint to file system, |
|
* especially if the latter case turns out to be |
|
* common. |
|
*/ |
|
ex = path[depth].p_ext; |
|
if (ex) { |
|
ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); |
|
ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); |
|
|
|
if (block > ext_block) |
|
return ext_pblk + (block - ext_block); |
|
else |
|
return ext_pblk - (ext_block - block); |
|
} |
|
|
|
/* it looks like index is empty; |
|
* try to find starting block from index itself */ |
|
if (path[depth].p_bh) |
|
return path[depth].p_bh->b_blocknr; |
|
} |
|
|
|
/* OK. use inode's group */ |
|
return ext4_inode_to_goal_block(inode); |
|
} |
|
|
|
/* |
|
* Allocation for a meta data block |
|
*/ |
|
static ext4_fsblk_t |
|
ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, |
|
struct ext4_ext_path *path, |
|
struct ext4_extent *ex, int *err, unsigned int flags) |
|
{ |
|
ext4_fsblk_t goal, newblock; |
|
|
|
goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); |
|
newblock = ext4_new_meta_blocks(handle, inode, goal, flags, |
|
NULL, err); |
|
return newblock; |
|
} |
|
|
|
static inline int ext4_ext_space_block(struct inode *inode, int check) |
|
{ |
|
int size; |
|
|
|
size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) |
|
/ sizeof(struct ext4_extent); |
|
#ifdef AGGRESSIVE_TEST |
|
if (!check && size > 6) |
|
size = 6; |
|
#endif |
|
return size; |
|
} |
|
|
|
static inline int ext4_ext_space_block_idx(struct inode *inode, int check) |
|
{ |
|
int size; |
|
|
|
size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) |
|
/ sizeof(struct ext4_extent_idx); |
|
#ifdef AGGRESSIVE_TEST |
|
if (!check && size > 5) |
|
size = 5; |
|
#endif |
|
return size; |
|
} |
|
|
|
static inline int ext4_ext_space_root(struct inode *inode, int check) |
|
{ |
|
int size; |
|
|
|
size = sizeof(EXT4_I(inode)->i_data); |
|
size -= sizeof(struct ext4_extent_header); |
|
size /= sizeof(struct ext4_extent); |
|
#ifdef AGGRESSIVE_TEST |
|
if (!check && size > 3) |
|
size = 3; |
|
#endif |
|
return size; |
|
} |
|
|
|
static inline int ext4_ext_space_root_idx(struct inode *inode, int check) |
|
{ |
|
int size; |
|
|
|
size = sizeof(EXT4_I(inode)->i_data); |
|
size -= sizeof(struct ext4_extent_header); |
|
size /= sizeof(struct ext4_extent_idx); |
|
#ifdef AGGRESSIVE_TEST |
|
if (!check && size > 4) |
|
size = 4; |
|
#endif |
|
return size; |
|
} |
|
|
|
/* |
|
* Calculate the number of metadata blocks needed |
|
* to allocate @blocks |
|
* Worse case is one block per extent |
|
*/ |
|
int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) |
|
{ |
|
struct ext4_inode_info *ei = EXT4_I(inode); |
|
int idxs; |
|
|
|
idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) |
|
/ sizeof(struct ext4_extent_idx)); |
|
|
|
/* |
|
* If the new delayed allocation block is contiguous with the |
|
* previous da block, it can share index blocks with the |
|
* previous block, so we only need to allocate a new index |
|
* block every idxs leaf blocks. At ldxs**2 blocks, we need |
|
* an additional index block, and at ldxs**3 blocks, yet |
|
* another index blocks. |
|
*/ |
|
if (ei->i_da_metadata_calc_len && |
|
ei->i_da_metadata_calc_last_lblock+1 == lblock) { |
|
int num = 0; |
|
|
|
if ((ei->i_da_metadata_calc_len % idxs) == 0) |
|
num++; |
|
if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0) |
|
num++; |
|
if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) { |
|
num++; |
|
ei->i_da_metadata_calc_len = 0; |
|
} else |
|
ei->i_da_metadata_calc_len++; |
|
ei->i_da_metadata_calc_last_lblock++; |
|
return num; |
|
} |
|
|
|
/* |
|
* In the worst case we need a new set of index blocks at |
|
* every level of the inode's extent tree. |
|
*/ |
|
ei->i_da_metadata_calc_len = 1; |
|
ei->i_da_metadata_calc_last_lblock = lblock; |
|
return ext_depth(inode) + 1; |
|
} |
|
|
|
static int |
|
ext4_ext_max_entries(struct inode *inode, int depth) |
|
{ |
|
int max; |
|
|
|
if (depth == ext_depth(inode)) { |
|
if (depth == 0) |
|
max = ext4_ext_space_root(inode, 1); |
|
else |
|
max = ext4_ext_space_root_idx(inode, 1); |
|
} else { |
|
if (depth == 0) |
|
max = ext4_ext_space_block(inode, 1); |
|
else |
|
max = ext4_ext_space_block_idx(inode, 1); |
|
} |
|
|
|
return max; |
|
} |
|
|
|
static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) |
|
{ |
|
ext4_fsblk_t block = ext4_ext_pblock(ext); |
|
int len = ext4_ext_get_actual_len(ext); |
|
|
|
if (len == 0) |
|
return 0; |
|
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); |
|
} |
|
|
|
static int ext4_valid_extent_idx(struct inode *inode, |
|
struct ext4_extent_idx *ext_idx) |
|
{ |
|
ext4_fsblk_t block = ext4_idx_pblock(ext_idx); |
|
|
|
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); |
|
} |
|
|
|
static int ext4_valid_extent_entries(struct inode *inode, |
|
struct ext4_extent_header *eh, |
|
int depth) |
|
{ |
|
unsigned short entries; |
|
if (eh->eh_entries == 0) |
|
return 1; |
|
|
|
entries = le16_to_cpu(eh->eh_entries); |
|
|
|
if (depth == 0) { |
|
/* leaf entries */ |
|
struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); |
|
while (entries) { |
|
if (!ext4_valid_extent(inode, ext)) |
|
return 0; |
|
ext++; |
|
entries--; |
|
} |
|
} else { |
|
struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); |
|
while (entries) { |
|
if (!ext4_valid_extent_idx(inode, ext_idx)) |
|
return 0; |
|
ext_idx++; |
|
entries--; |
|
} |
|
} |
|
return 1; |
|
} |
|
|
|
static int __ext4_ext_check(const char *function, unsigned int line, |
|
struct inode *inode, struct ext4_extent_header *eh, |
|
int depth) |
|
{ |
|
const char *error_msg; |
|
int max = 0; |
|
|
|
if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { |
|
error_msg = "invalid magic"; |
|
goto corrupted; |
|
} |
|
if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { |
|
error_msg = "unexpected eh_depth"; |
|
goto corrupted; |
|
} |
|
if (unlikely(eh->eh_max == 0)) { |
|
error_msg = "invalid eh_max"; |
|
goto corrupted; |
|
} |
|
max = ext4_ext_max_entries(inode, depth); |
|
if (unlikely(le16_to_cpu(eh->eh_max) > max)) { |
|
error_msg = "too large eh_max"; |
|
goto corrupted; |
|
} |
|
if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { |
|
error_msg = "invalid eh_entries"; |
|
goto corrupted; |
|
} |
|
if (!ext4_valid_extent_entries(inode, eh, depth)) { |
|
error_msg = "invalid extent entries"; |
|
goto corrupted; |
|
} |
|
/* Verify checksum on non-root extent tree nodes */ |
|
if (ext_depth(inode) != depth && |
|
!ext4_extent_block_csum_verify(inode, eh)) { |
|
error_msg = "extent tree corrupted"; |
|
goto corrupted; |
|
} |
|
return 0; |
|
|
|
corrupted: |
|
ext4_error_inode(inode, function, line, 0, |
|
"bad header/extent: %s - magic %x, " |
|
"entries %u, max %u(%u), depth %u(%u)", |
|
error_msg, le16_to_cpu(eh->eh_magic), |
|
le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), |
|
max, le16_to_cpu(eh->eh_depth), depth); |
|
|
|
return -EIO; |
|
} |
|
|
|
#define ext4_ext_check(inode, eh, depth) \ |
|
__ext4_ext_check(__func__, __LINE__, inode, eh, depth) |
|
|
|
int ext4_ext_check_inode(struct inode *inode) |
|
{ |
|
return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode)); |
|
} |
|
|
|
static int __ext4_ext_check_block(const char *function, unsigned int line, |
|
struct inode *inode, |
|
struct ext4_extent_header *eh, |
|
int depth, |
|
struct buffer_head *bh) |
|
{ |
|
int ret; |
|
|
|
if (buffer_verified(bh)) |
|
return 0; |
|
ret = ext4_ext_check(inode, eh, depth); |
|
if (ret) |
|
return ret; |
|
set_buffer_verified(bh); |
|
return ret; |
|
} |
|
|
|
#define ext4_ext_check_block(inode, eh, depth, bh) \ |
|
__ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh) |
|
|
|
#ifdef EXT_DEBUG |
|
static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) |
|
{ |
|
int k, l = path->p_depth; |
|
|
|
ext_debug("path:"); |
|
for (k = 0; k <= l; k++, path++) { |
|
if (path->p_idx) { |
|
ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), |
|
ext4_idx_pblock(path->p_idx)); |
|
} else if (path->p_ext) { |
|
ext_debug(" %d:[%d]%d:%llu ", |
|
le32_to_cpu(path->p_ext->ee_block), |
|
ext4_ext_is_uninitialized(path->p_ext), |
|
ext4_ext_get_actual_len(path->p_ext), |
|
ext4_ext_pblock(path->p_ext)); |
|
} else |
|
ext_debug(" []"); |
|
} |
|
ext_debug("\n"); |
|
} |
|
|
|
static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) |
|
{ |
|
int depth = ext_depth(inode); |
|
struct ext4_extent_header *eh; |
|
struct ext4_extent *ex; |
|
int i; |
|
|
|
if (!path) |
|
return; |
|
|
|
eh = path[depth].p_hdr; |
|
ex = EXT_FIRST_EXTENT(eh); |
|
|
|
ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); |
|
|
|
for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { |
|
ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), |
|
ext4_ext_is_uninitialized(ex), |
|
ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); |
|
} |
|
ext_debug("\n"); |
|
} |
|
|
|
static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, |
|
ext4_fsblk_t newblock, int level) |
|
{ |
|
int depth = ext_depth(inode); |
|
struct ext4_extent *ex; |
|
|
|
if (depth != level) { |
|
struct ext4_extent_idx *idx; |
|
idx = path[level].p_idx; |
|
while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { |
|
ext_debug("%d: move %d:%llu in new index %llu\n", level, |
|
le32_to_cpu(idx->ei_block), |
|
ext4_idx_pblock(idx), |
|
newblock); |
|
idx++; |
|
} |
|
|
|
return; |
|
} |
|
|
|
ex = path[depth].p_ext; |
|
while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { |
|
ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", |
|
le32_to_cpu(ex->ee_block), |
|
ext4_ext_pblock(ex), |
|
ext4_ext_is_uninitialized(ex), |
|
ext4_ext_get_actual_len(ex), |
|
newblock); |
|
ex++; |
|
} |
|
} |
|
|
|
#else |
|
#define ext4_ext_show_path(inode, path) |
|
#define ext4_ext_show_leaf(inode, path) |
|
#define ext4_ext_show_move(inode, path, newblock, level) |
|
#endif |
|
|
|
void ext4_ext_drop_refs(struct ext4_ext_path *path) |
|
{ |
|
int depth = path->p_depth; |
|
int i; |
|
|
|
for (i = 0; i <= depth; i++, path++) |
|
if (path->p_bh) { |
|
brelse(path->p_bh); |
|
path->p_bh = NULL; |
|
} |
|
} |
|
|
|
/* |
|
* ext4_ext_binsearch_idx: |
|
* binary search for the closest index of the given block |
|
* the header must be checked before calling this |
|
*/ |
|
static void |
|
ext4_ext_binsearch_idx(struct inode *inode, |
|
struct ext4_ext_path *path, ext4_lblk_t block) |
|
{ |
|
struct ext4_extent_header *eh = path->p_hdr; |
|
struct ext4_extent_idx *r, *l, *m; |
|
|
|
|
|
ext_debug("binsearch for %u(idx): ", block); |
|
|
|
l = EXT_FIRST_INDEX(eh) + 1; |
|
r = EXT_LAST_INDEX(eh); |
|
while (l <= r) { |
|
m = l + (r - l) / 2; |
|
if (block < le32_to_cpu(m->ei_block)) |
|
r = m - 1; |
|
else |
|
l = m + 1; |
|
ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), |
|
m, le32_to_cpu(m->ei_block), |
|
r, le32_to_cpu(r->ei_block)); |
|
} |
|
|
|
path->p_idx = l - 1; |
|
ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), |
|
ext4_idx_pblock(path->p_idx)); |
|
|
|
#ifdef CHECK_BINSEARCH |
|
{ |
|
struct ext4_extent_idx *chix, *ix; |
|
int k; |
|
|
|
chix = ix = EXT_FIRST_INDEX(eh); |
|
for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { |
|
if (k != 0 && |
|
le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { |
|
printk(KERN_DEBUG "k=%d, ix=0x%p, " |
|
"first=0x%p\n", k, |
|
ix, EXT_FIRST_INDEX(eh)); |
|
printk(KERN_DEBUG "%u <= %u\n", |
|
le32_to_cpu(ix->ei_block), |
|
le32_to_cpu(ix[-1].ei_block)); |
|
} |
|
BUG_ON(k && le32_to_cpu(ix->ei_block) |
|
<= le32_to_cpu(ix[-1].ei_block)); |
|
if (block < le32_to_cpu(ix->ei_block)) |
|
break; |
|
chix = ix; |
|
} |
|
BUG_ON(chix != path->p_idx); |
|
} |
|
#endif |
|
|
|
} |
|
|
|
/* |
|
* ext4_ext_binsearch: |
|
* binary search for closest extent of the given block |
|
* the header must be checked before calling this |
|
*/ |
|
static void |
|
ext4_ext_binsearch(struct inode *inode, |
|
struct ext4_ext_path *path, ext4_lblk_t block) |
|
{ |
|
struct ext4_extent_header *eh = path->p_hdr; |
|
struct ext4_extent *r, *l, *m; |
|
|
|
if (eh->eh_entries == 0) { |
|
/* |
|
* this leaf is empty: |
|
* we get such a leaf in split/add case |
|
*/ |
|
return; |
|
} |
|
|
|
ext_debug("binsearch for %u: ", block); |
|
|
|
l = EXT_FIRST_EXTENT(eh) + 1; |
|
r = EXT_LAST_EXTENT(eh); |
|
|
|
while (l <= r) { |
|
m = l + (r - l) / 2; |
|
if (block < le32_to_cpu(m->ee_block)) |
|
r = m - 1; |
|
else |
|
l = m + 1; |
|
ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), |
|
m, le32_to_cpu(m->ee_block), |
|
r, le32_to_cpu(r->ee_block)); |
|
} |
|
|
|
path->p_ext = l - 1; |
|
ext_debug(" -> %d:%llu:[%d]%d ", |
|
le32_to_cpu(path->p_ext->ee_block), |
|
ext4_ext_pblock(path->p_ext), |
|
ext4_ext_is_uninitialized(path->p_ext), |
|
ext4_ext_get_actual_len(path->p_ext)); |
|
|
|
#ifdef CHECK_BINSEARCH |
|
{ |
|
struct ext4_extent *chex, *ex; |
|
int k; |
|
|
|
chex = ex = EXT_FIRST_EXTENT(eh); |
|
for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { |
|
BUG_ON(k && le32_to_cpu(ex->ee_block) |
|
<= le32_to_cpu(ex[-1].ee_block)); |
|
if (block < le32_to_cpu(ex->ee_block)) |
|
break; |
|
chex = ex; |
|
} |
|
BUG_ON(chex != path->p_ext); |
|
} |
|
#endif |
|
|
|
} |
|
|
|
int ext4_ext_tree_init(handle_t *handle, struct inode *inode) |
|
{ |
|
struct ext4_extent_header *eh; |
|
|
|
eh = ext_inode_hdr(inode); |
|
eh->eh_depth = 0; |
|
eh->eh_entries = 0; |
|
eh->eh_magic = EXT4_EXT_MAGIC; |
|
eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); |
|
ext4_mark_inode_dirty(handle, inode); |
|
ext4_ext_invalidate_cache(inode); |
|
return 0; |
|
} |
|
|
|
struct ext4_ext_path * |
|
ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, |
|
struct ext4_ext_path *path) |
|
{ |
|
struct ext4_extent_header *eh; |
|
struct buffer_head *bh; |
|
short int depth, i, ppos = 0, alloc = 0; |
|
int ret; |
|
|
|
eh = ext_inode_hdr(inode); |
|
depth = ext_depth(inode); |
|
|
|
/* account possible depth increase */ |
|
if (!path) { |
|
path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), |
|
GFP_NOFS); |
|
if (!path) |
|
return ERR_PTR(-ENOMEM); |
|
alloc = 1; |
|
} |
|
path[0].p_hdr = eh; |
|
path[0].p_bh = NULL; |
|
|
|
i = depth; |
|
/* walk through the tree */ |
|
while (i) { |
|
ext_debug("depth %d: num %d, max %d\n", |
|
ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); |
|
|
|
ext4_ext_binsearch_idx(inode, path + ppos, block); |
|
path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); |
|
path[ppos].p_depth = i; |
|
path[ppos].p_ext = NULL; |
|
|
|
bh = sb_getblk(inode->i_sb, path[ppos].p_block); |
|
if (unlikely(!bh)) { |
|
ret = -ENOMEM; |
|
goto err; |
|
} |
|
if (!bh_uptodate_or_lock(bh)) { |
|
trace_ext4_ext_load_extent(inode, block, |
|
path[ppos].p_block); |
|
ret = bh_submit_read(bh); |
|
if (ret < 0) { |
|
put_bh(bh); |
|
goto err; |
|
} |
|
} |
|
eh = ext_block_hdr(bh); |
|
ppos++; |
|
if (unlikely(ppos > depth)) { |
|
put_bh(bh); |
|
EXT4_ERROR_INODE(inode, |
|
"ppos %d > depth %d", ppos, depth); |
|
ret = -EIO; |
|
goto err; |
|
} |
|
path[ppos].p_bh = bh; |
|
path[ppos].p_hdr = eh; |
|
i--; |
|
|
|
ret = ext4_ext_check_block(inode, eh, i, bh); |
|
if (ret < 0) |
|
goto err; |
|
} |
|
|
|
path[ppos].p_depth = i; |
|
path[ppos].p_ext = NULL; |
|
path[ppos].p_idx = NULL; |
|
|
|
/* find extent */ |
|
ext4_ext_binsearch(inode, path + ppos, block); |
|
/* if not an empty leaf */ |
|
if (path[ppos].p_ext) |
|
path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); |
|
|
|
ext4_ext_show_path(inode, path); |
|
|
|
return path; |
|
|
|
err: |
|
ext4_ext_drop_refs(path); |
|
if (alloc) |
|
kfree(path); |
|
return ERR_PTR(ret); |
|
} |
|
|
|
/* |
|
* ext4_ext_insert_index: |
|
* insert new index [@logical;@ptr] into the block at @curp; |
|
* check where to insert: before @curp or after @curp |
|
*/ |
|
static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, |
|
struct ext4_ext_path *curp, |
|
int logical, ext4_fsblk_t ptr) |
|
{ |
|
struct ext4_extent_idx *ix; |
|
int len, err; |
|
|
|
err = ext4_ext_get_access(handle, inode, curp); |
|
if (err) |
|
return err; |
|
|
|
if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { |
|
EXT4_ERROR_INODE(inode, |
|
"logical %d == ei_block %d!", |
|
logical, le32_to_cpu(curp->p_idx->ei_block)); |
|
return -EIO; |
|
} |
|
|
|
if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) |
|
>= le16_to_cpu(curp->p_hdr->eh_max))) { |
|
EXT4_ERROR_INODE(inode, |
|
"eh_entries %d >= eh_max %d!", |
|
le16_to_cpu(curp->p_hdr->eh_entries), |
|
le16_to_cpu(curp->p_hdr->eh_max)); |
|
return -EIO; |
|
} |
|
|
|
if (logical > le32_to_cpu(curp->p_idx->ei_block)) { |
|
/* insert after */ |
|
ext_debug("insert new index %d after: %llu\n", logical, ptr); |
|
ix = curp->p_idx + 1; |
|
} else { |
|
/* insert before */ |
|
ext_debug("insert new index %d before: %llu\n", logical, ptr); |
|
ix = curp->p_idx; |
|
} |
|
|
|
len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; |
|
BUG_ON(len < 0); |
|
if (len > 0) { |
|
ext_debug("insert new index %d: " |
|
"move %d indices from 0x%p to 0x%p\n", |
|
logical, len, ix, ix + 1); |
|
memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); |
|
} |
|
|
|
if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { |
|
EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); |
|
return -EIO; |
|
} |
|
|
|
ix->ei_block = cpu_to_le32(logical); |
|
ext4_idx_store_pblock(ix, ptr); |
|
le16_add_cpu(&curp->p_hdr->eh_entries, 1); |
|
|
|
if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { |
|
EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); |
|
return -EIO; |
|
} |
|
|
|
err = ext4_ext_dirty(handle, inode, curp); |
|
ext4_std_error(inode->i_sb, err); |
|
|
|
return err; |
|
} |
|
|
|
/* |
|
* ext4_ext_split: |
|
* inserts new subtree into the path, using free index entry |
|
* at depth @at: |
|
* - allocates all needed blocks (new leaf and all intermediate index blocks) |
|
* - makes decision where to split |
|
* - moves remaining extents and index entries (right to the split point) |
|
* into the newly allocated blocks |
|
* - initializes subtree |
|
*/ |
|
static int ext4_ext_split(handle_t *handle, struct inode *inode, |
|
unsigned int flags, |
|
struct ext4_ext_path *path, |
|
struct ext4_extent *newext, int at) |
|
{ |
|
struct buffer_head *bh = NULL; |
|
int depth = ext_depth(inode); |
|
struct ext4_extent_header *neh; |
|
struct ext4_extent_idx *fidx; |
|
int i = at, k, m, a; |
|
ext4_fsblk_t newblock, oldblock; |
|
__le32 border; |
|
ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ |
|
int err = 0; |
|
|
|
/* make decision: where to split? */ |
|
/* FIXME: now decision is simplest: at current extent */ |
|
|
|
/* if current leaf will be split, then we should use |
|
* border from split point */ |
|
if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { |
|
EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); |
|
return -EIO; |
|
} |
|
if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { |
|
border = path[depth].p_ext[1].ee_block; |
|
ext_debug("leaf will be split." |
|
" next leaf starts at %d\n", |
|
le32_to_cpu(border)); |
|
} else { |
|
border = newext->ee_block; |
|
ext_debug("leaf will be added." |
|
" next leaf starts at %d\n", |
|
le32_to_cpu(border)); |
|
} |
|
|
|
/* |
|
* If error occurs, then we break processing |
|
* and mark filesystem read-only. index won't |
|
* be inserted and tree will be in consistent |
|
* state. Next mount will repair buffers too. |
|
*/ |
|
|
|
/* |
|
* Get array to track all allocated blocks. |
|
* We need this to handle errors and free blocks |
|
* upon them. |
|
*/ |
|
ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); |
|
if (!ablocks) |
|
return -ENOMEM; |
|
|
|
/* allocate all needed blocks */ |
|
ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); |
|
for (a = 0; a < depth - at; a++) { |
|
newblock = ext4_ext_new_meta_block(handle, inode, path, |
|
newext, &err, flags); |
|
if (newblock == 0) |
|
goto cleanup; |
|
ablocks[a] = newblock; |
|
} |
|
|
|
/* initialize new leaf */ |
|
newblock = ablocks[--a]; |
|
if (unlikely(newblock == 0)) { |
|
EXT4_ERROR_INODE(inode, "newblock == 0!"); |
|
err = -EIO; |
|
goto cleanup; |
|
} |
|
bh = sb_getblk(inode->i_sb, newblock); |
|
if (unlikely(!bh)) { |
|
err = -ENOMEM; |
|
goto cleanup; |
|
} |
|
lock_buffer(bh); |
|
|
|
err = ext4_journal_get_create_access(handle, bh); |
|
if (err) |
|
goto cleanup; |
|
|
|
neh = ext_block_hdr(bh); |
|
neh->eh_entries = 0; |
|
neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); |
|
neh->eh_magic = EXT4_EXT_MAGIC; |
|
neh->eh_depth = 0; |
|
|
|
/* move remainder of path[depth] to the new leaf */ |
|
if (unlikely(path[depth].p_hdr->eh_entries != |
|
path[depth].p_hdr->eh_max)) { |
|
EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", |
|
path[depth].p_hdr->eh_entries, |
|
path[depth].p_hdr->eh_max); |
|
err = -EIO; |
|
goto cleanup; |
|
} |
|
/* start copy from next extent */ |
|
m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; |
|
ext4_ext_show_move(inode, path, newblock, depth); |
|
if (m) { |
|
struct ext4_extent *ex; |
|
ex = EXT_FIRST_EXTENT(neh); |
|
memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); |
|
le16_add_cpu(&neh->eh_entries, m); |
|
} |
|
|
|
ext4_extent_block_csum_set(inode, neh); |
|
set_buffer_uptodate(bh); |
|
unlock_buffer(bh); |
|
|
|
err = ext4_handle_dirty_metadata(handle, inode, bh); |
|
if (err) |
|
goto cleanup; |
|
brelse(bh); |
|
bh = NULL; |
|
|
|
/* correct old leaf */ |
|
if (m) { |
|
err = ext4_ext_get_access(handle, inode, path + depth); |
|
if (err) |
|
goto cleanup; |
|
le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); |
|
err = ext4_ext_dirty(handle, inode, path + depth); |
|
if (err) |
|
goto cleanup; |
|
|
|
} |
|
|
|
/* create intermediate indexes */ |
|
k = depth - at - 1; |
|
if (unlikely(k < 0)) { |
|
EXT4_ERROR_INODE(inode, "k %d < 0!", k); |
|
err = -EIO; |
|
goto cleanup; |
|
} |
|
if (k) |
|
ext_debug("create %d intermediate indices\n", k); |
|
/* insert new index into current index block */ |
|
/* current depth stored in i var */ |
|
i = depth - 1; |
|
while (k--) { |
|
oldblock = newblock; |
|
newblock = ablocks[--a]; |
|
bh = sb_getblk(inode->i_sb, newblock); |
|
if (unlikely(!bh)) { |
|
err = -ENOMEM; |
|
goto cleanup; |
|
} |
|
lock_buffer(bh); |
|
|
|
err = ext4_journal_get_create_access(handle, bh); |
|
if (err) |
|
goto cleanup; |
|
|
|
neh = ext_block_hdr(bh); |
|
neh->eh_entries = cpu_to_le16(1); |
|
neh->eh_magic = EXT4_EXT_MAGIC; |
|
neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); |
|
neh->eh_depth = cpu_to_le16(depth - i); |
|
fidx = EXT_FIRST_INDEX(neh); |
|
fidx->ei_block = border; |
|
ext4_idx_store_pblock(fidx, oldblock); |
|
|
|
ext_debug("int.index at %d (block %llu): %u -> %llu\n", |
|
i, newblock, le32_to_cpu(border), oldblock); |
|
|
|
/* move remainder of path[i] to the new index block */ |
|
if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != |
|
EXT_LAST_INDEX(path[i].p_hdr))) { |
|
EXT4_ERROR_INODE(inode, |
|
"EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", |
|
le32_to_cpu(path[i].p_ext->ee_block)); |
|
err = -EIO; |
|
goto cleanup; |
|
} |
|
/* start copy indexes */ |
|
m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; |
|
ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, |
|
EXT_MAX_INDEX(path[i].p_hdr)); |
|
ext4_ext_show_move(inode, path, newblock, i); |
|
if (m) { |
|
memmove(++fidx, path[i].p_idx, |
|
sizeof(struct ext4_extent_idx) * m); |
|
le16_add_cpu(&neh->eh_entries, m); |
|
} |
|
ext4_extent_block_csum_set(inode, neh); |
|
set_buffer_uptodate(bh); |
|
unlock_buffer(bh); |
|
|
|
err = ext4_handle_dirty_metadata(handle, inode, bh); |
|
if (err) |
|
goto cleanup; |
|
brelse(bh); |
|
bh = NULL; |
|
|
|
/* correct old index */ |
|
if (m) { |
|
err = ext4_ext_get_access(handle, inode, path + i); |
|
if (err) |
|
goto cleanup; |
|
le16_add_cpu(&path[i].p_hdr->eh_entries, -m); |
|
err = ext4_ext_dirty(handle, inode, path + i); |
|
if (err) |
|
goto cleanup; |
|
} |
|
|
|
i--; |
|
} |
|
|
|
/* insert new index */ |
|
err = ext4_ext_insert_index(handle, inode, path + at, |
|
le32_to_cpu(border), newblock); |
|
|
|
cleanup: |
|
if (bh) { |
|
if (buffer_locked(bh)) |
|
unlock_buffer(bh); |
|
brelse(bh); |
|
} |
|
|
|
if (err) { |
|
/* free all allocated blocks in error case */ |
|
for (i = 0; i < depth; i++) { |
|
if (!ablocks[i]) |
|
continue; |
|
ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, |
|
EXT4_FREE_BLOCKS_METADATA); |
|
} |
|
} |
|
kfree(ablocks); |
|
|
|
return err; |
|
} |
|
|
|
/* |
|
* ext4_ext_grow_indepth: |
|
* implements tree growing procedure: |
|
* - allocates new block |
|
* - moves top-level data (index block or leaf) into the new block |
|
* - initializes new top-level, creating index that points to the |
|
* just created block |
|
*/ |
|
static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, |
|
unsigned int flags, |
|
struct ext4_extent *newext) |
|
{ |
|
struct ext4_extent_header *neh; |
|
struct buffer_head *bh; |
|
ext4_fsblk_t newblock; |
|
int err = 0; |
|
|
|
newblock = ext4_ext_new_meta_block(handle, inode, NULL, |
|
newext, &err, flags); |
|
if (newblock == 0) |
|
return err; |
|
|
|
bh = sb_getblk(inode->i_sb, newblock); |
|
if (unlikely(!bh)) |
|
return -ENOMEM; |
|
lock_buffer(bh); |
|
|
|
err = ext4_journal_get_create_access(handle, bh); |
|
if (err) { |
|
unlock_buffer(bh); |
|
goto out; |
|
} |
|
|
|
/* move top-level index/leaf into new block */ |
|
memmove(bh->b_data, EXT4_I(inode)->i_data, |
|
sizeof(EXT4_I(inode)->i_data)); |
|
|
|
/* set size of new block */ |
|
neh = ext_block_hdr(bh); |
|
/* old root could have indexes or leaves |
|
* so calculate e_max right way */ |
|
if (ext_depth(inode)) |
|
neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); |
|
else |
|
neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); |
|
neh->eh_magic = EXT4_EXT_MAGIC; |
|
ext4_extent_block_csum_set(inode, neh); |
|
set_buffer_uptodate(bh); |
|
unlock_buffer(bh); |
|
|
|
err = ext4_handle_dirty_metadata(handle, inode, bh); |
|
if (err) |
|
goto out; |
|
|
|
/* Update top-level index: num,max,pointer */ |
|
neh = ext_inode_hdr(inode); |
|
neh->eh_entries = cpu_to_le16(1); |
|
ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); |
|
if (neh->eh_depth == 0) { |
|
/* Root extent block becomes index block */ |
|
neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); |
|
EXT_FIRST_INDEX(neh)->ei_block = |
|
EXT_FIRST_EXTENT(neh)->ee_block; |
|
} |
|
ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", |
|
le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), |
|
le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), |
|
ext4_idx_pblock(EXT_FIRST_INDEX(neh))); |
|
|
|
le16_add_cpu(&neh->eh_depth, 1); |
|
ext4_mark_inode_dirty(handle, inode); |
|
out: |
|
brelse(bh); |
|
|
|
return err; |
|
} |
|
|
|
/* |
|
* ext4_ext_create_new_leaf: |
|
* finds empty index and adds new leaf. |
|
* if no free index is found, then it requests in-depth growing. |
|
*/ |
|
static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, |
|
unsigned int flags, |
|
struct ext4_ext_path *path, |
|
struct ext4_extent *newext) |
|
{ |
|
struct ext4_ext_path *curp; |
|
int depth, i, err = 0; |
|
|
|
repeat: |
|
i = depth = ext_depth(inode); |
|
|
|
/* walk up to the tree and look for free index entry */ |
|
curp = path + depth; |
|
while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { |
|
i--; |
|
curp--; |
|
} |
|
|
|
/* we use already allocated block for index block, |
|
* so subsequent data blocks should be contiguous */ |
|
if (EXT_HAS_FREE_INDEX(curp)) { |
|
/* if we found index with free entry, then use that |
|
* entry: create all needed subtree and add new leaf */ |
|
err = ext4_ext_split(handle, inode, flags, path, newext, i); |
|
if (err) |
|
goto out; |
|
|
|
/* refill path */ |
|
ext4_ext_drop_refs(path); |
|
path = ext4_ext_find_extent(inode, |
|
(ext4_lblk_t)le32_to_cpu(newext->ee_block), |
|
path); |
|
if (IS_ERR(path)) |
|
err = PTR_ERR(path); |
|
} else { |
|
/* tree is full, time to grow in depth */ |
|
err = ext4_ext_grow_indepth(handle, inode, flags, newext); |
|
if (err) |
|
goto out; |
|
|
|
/* refill path */ |
|
ext4_ext_drop_refs(path); |
|
path = ext4_ext_find_extent(inode, |
|
(ext4_lblk_t)le32_to_cpu(newext->ee_block), |
|
path); |
|
if (IS_ERR(path)) { |
|
err = PTR_ERR(path); |
|
goto out; |
|
} |
|
|
|
/* |
|
* only first (depth 0 -> 1) produces free space; |
|
* in all other cases we have to split the grown tree |
|
*/ |
|
depth = ext_depth(inode); |
|
if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { |
|
/* now we need to split */ |
|
goto repeat; |
|
} |
|
} |
|
|
|
out: |
|
return err; |
|
} |
|
|
|
/* |
|
* search the closest allocated block to the left for *logical |
|
* and returns it at @logical + it's physical address at @phys |
|
* if *logical is the smallest allocated block, the function |
|
* returns 0 at @phys |
|
* return value contains 0 (success) or error code |
|
*/ |
|
static int ext4_ext_search_left(struct inode *inode, |
|
struct ext4_ext_path *path, |
|
ext4_lblk_t *logical, ext4_fsblk_t *phys) |
|
{ |
|
struct ext4_extent_idx *ix; |
|
struct ext4_extent *ex; |
|
int depth, ee_len; |
|
|
|
if (unlikely(path == NULL)) { |
|
EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); |
|
return -EIO; |
|
} |
|
depth = path->p_depth; |
|
*phys = 0; |
|
|
|
if (depth == 0 && path->p_ext == NULL) |
|
return 0; |
|
|
|
/* usually extent in the path covers blocks smaller |
|
* then *logical, but it can be that extent is the |
|
* first one in the file */ |
|
|
|
ex = path[depth].p_ext; |
|
ee_len = ext4_ext_get_actual_len(ex); |
|
if (*logical < le32_to_cpu(ex->ee_block)) { |
|
if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { |
|
EXT4_ERROR_INODE(inode, |
|
"EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", |
|
*logical, le32_to_cpu(ex->ee_block)); |
|
return -EIO; |
|
} |
|
while (--depth >= 0) { |
|
ix = path[depth].p_idx; |
|
if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { |
|
EXT4_ERROR_INODE(inode, |
|
"ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", |
|
ix != NULL ? le32_to_cpu(ix->ei_block) : 0, |
|
EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? |
|
le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0, |
|
depth); |
|
return -EIO; |
|
} |
|
} |
|
return 0; |
|
} |
|
|
|
if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { |
|
EXT4_ERROR_INODE(inode, |
|
"logical %d < ee_block %d + ee_len %d!", |
|
*logical, le32_to_cpu(ex->ee_block), ee_len); |
|
return -EIO; |
|
} |
|
|
|
*logical = le32_to_cpu(ex->ee_block) + ee_len - 1; |
|
*phys = ext4_ext_pblock(ex) + ee_len - 1; |
|
return 0; |
|
} |
|
|
|
/* |
|
* search the closest allocated block to the right for *logical |
|
* and returns it at @logical + it's physical address at @phys |
|
* if *logical is the largest allocated block, the function |
|
* returns 0 at @phys |
|
* return value contains 0 (success) or error code |
|
*/ |
|
static int ext4_ext_search_right(struct inode *inode, |
|
struct ext4_ext_path *path, |
|
ext4_lblk_t *logical, ext4_fsblk_t *phys, |
|
struct ext4_extent **ret_ex) |
|
{ |
|
struct buffer_head *bh = NULL; |
|
struct ext4_extent_header *eh; |
|
struct ext4_extent_idx *ix; |
|
struct ext4_extent *ex; |
|
ext4_fsblk_t block; |
|
int depth; /* Note, NOT eh_depth; depth from top of tree */ |
|
int ee_len; |
|
|
|
if (unlikely(path == NULL)) { |
|
EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); |
|
return -EIO; |
|
} |
|
depth = path->p_depth; |
|
*phys = 0; |
|
|
|
if (depth == 0 && path->p_ext == NULL) |
|
return 0; |
|
|
|
/* usually extent in the path covers blocks smaller |
|
* then *logical, but it can be that extent is the |
|
* first one in the file */ |
|
|
|
ex = path[depth].p_ext; |
|
ee_len = ext4_ext_get_actual_len(ex); |
|
if (*logical < le32_to_cpu(ex->ee_block)) { |
|
if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { |
|
EXT4_ERROR_INODE(inode, |
|
"first_extent(path[%d].p_hdr) != ex", |
|
depth); |
|
return -EIO; |
|
} |
|
while (--depth >= 0) { |
|
ix = path[depth].p_idx; |
|
if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { |
|
EXT4_ERROR_INODE(inode, |
|
"ix != EXT_FIRST_INDEX *logical %d!", |
|
*logical); |
|
return -EIO; |
|
} |
|
} |
|
goto found_extent; |
|
} |
|
|
|
if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { |
|
EXT4_ERROR_INODE(inode, |
|
"logical %d < ee_block %d + ee_len %d!", |
|
*logical, le32_to_cpu(ex->ee_block), ee_len); |
|
return -EIO; |
|
} |
|
|
|
if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { |
|
/* next allocated block in this leaf */ |
|
ex++; |
|
goto found_extent; |
|
} |
|
|
|
/* go up and search for index to the right */ |
|
while (--depth >= 0) { |
|
ix = path[depth].p_idx; |
|
if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) |
|
goto got_index; |
|
} |
|
|
|
/* we've gone up to the root and found no index to the right */ |
|
return 0; |
|
|
|
got_index: |
|
/* we've found index to the right, let's |
|
* follow it and find the closest allocated |
|
* block to the right */ |
|
ix++; |
|
block = ext4_idx_pblock(ix); |
|
while (++depth < path->p_depth) { |
|
bh = sb_bread(inode->i_sb, block); |
|
if (bh == NULL) |
|
return -EIO; |
|
eh = ext_block_hdr(bh); |
|
/* subtract from p_depth to get proper eh_depth */ |
|
if (ext4_ext_check_block(inode, eh, |
|
path->p_depth - depth, bh)) { |
|
put_bh(bh); |
|
return -EIO; |
|
} |
|
ix = EXT_FIRST_INDEX(eh); |
|
block = ext4_idx_pblock(ix); |
|
put_bh(bh); |
|
} |
|
|
|
bh = sb_bread(inode->i_sb, block); |
|
if (bh == NULL) |
|
return -EIO; |
|
eh = ext_block_hdr(bh); |
|
if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) { |
|
put_bh(bh); |
|
return -EIO; |
|
} |
|
ex = EXT_FIRST_EXTENT(eh); |
|
found_extent: |
|
*logical = le32_to_cpu(ex->ee_block); |
|
*phys = ext4_ext_pblock(ex); |
|
*ret_ex = ex; |
|
if (bh) |
|
put_bh(bh); |
|
return 0; |
|
} |
|
|
|
/* |
|
* ext4_ext_next_allocated_block: |
|
* returns allocated block in subsequent extent or EXT_MAX_BLOCKS. |
|
* NOTE: it considers block number from index entry as |
|
* allocated block. Thus, index entries have to be consistent |
|
* with leaves. |
|
*/ |
|
static ext4_lblk_t |
|
ext4_ext_next_allocated_block(struct ext4_ext_path *path) |
|
{ |
|
int depth; |
|
|
|
BUG_ON(path == NULL); |
|
depth = path->p_depth; |
|
|
|
if (depth == 0 && path->p_ext == NULL) |
|
return EXT_MAX_BLOCKS; |
|
|
|
while (depth >= 0) { |
|
if (depth == path->p_depth) { |
|
/* leaf */ |
|
if (path[depth].p_ext && |
|
path[depth].p_ext != |
|
EXT_LAST_EXTENT(path[depth].p_hdr)) |
|
return le32_to_cpu(path[depth].p_ext[1].ee_block); |
|
} else { |
|
/* index */ |
|
if (path[depth].p_idx != |
|
EXT_LAST_INDEX(path[depth].p_hdr)) |
|
return le32_to_cpu(path[depth].p_idx[1].ei_block); |
|
} |
|
depth--; |
|
} |
|
|
|
return EXT_MAX_BLOCKS; |
|
} |
|
|
|
/* |
|
* ext4_ext_next_leaf_block: |
|
* returns first allocated block from next leaf or EXT_MAX_BLOCKS |
|
*/ |
|
static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) |
|
{ |
|
int depth; |
|
|
|
BUG_ON(path == NULL); |
|
depth = path->p_depth; |
|
|
|
/* zero-tree has no leaf blocks at all */ |
|
if (depth == 0) |
|
return EXT_MAX_BLOCKS; |
|
|
|
/* go to index block */ |
|
depth--; |
|
|
|
while (depth >= 0) { |
|
if (path[depth].p_idx != |
|
EXT_LAST_INDEX(path[depth].p_hdr)) |
|
return (ext4_lblk_t) |
|
le32_to_cpu(path[depth].p_idx[1].ei_block); |
|
depth--; |
|
} |
|
|
|
return EXT_MAX_BLOCKS; |
|
} |
|
|
|
/* |
|
* ext4_ext_correct_indexes: |
|
* if leaf gets modified and modified extent is first in the leaf, |
|
* then we have to correct all indexes above. |
|
* TODO: do we need to correct tree in all cases? |
|
*/ |
|
static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, |
|
struct ext4_ext_path *path) |
|
{ |
|
struct ext4_extent_header *eh; |
|
int depth = ext_depth(inode); |
|
struct ext4_extent *ex; |
|
__le32 border; |
|
int k, err = 0; |
|
|
|
eh = path[depth].p_hdr; |
|
ex = path[depth].p_ext; |
|
|
|
if (unlikely(ex == NULL || eh == NULL)) { |
|
EXT4_ERROR_INODE(inode, |
|
"ex %p == NULL or eh %p == NULL", ex, eh); |
|
return -EIO; |
|
} |
|
|
|
if (depth == 0) { |
|
/* there is no tree at all */ |
|
return 0; |
|
} |
|
|
|
if (ex != EXT_FIRST_EXTENT(eh)) { |
|
/* we correct tree if first leaf got modified only */ |
|
return 0; |
|
} |
|
|
|
/* |
|
* TODO: we need correction if border is smaller than current one |
|
*/ |
|
k = depth - 1; |
|
border = path[depth].p_ext->ee_block; |
|
err = ext4_ext_get_access(handle, inode, path + k); |
|
if (err) |
|
return err; |
|
path[k].p_idx->ei_block = border; |
|
err = ext4_ext_dirty(handle, inode, path + k); |
|
if (err) |
|
return err; |
|
|
|
while (k--) { |
|
/* change all left-side indexes */ |
|
if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) |
|
break; |
|
err = ext4_ext_get_access(handle, inode, path + k); |
|
if (err) |
|
break; |
|
path[k].p_idx->ei_block = border; |
|
err = ext4_ext_dirty(handle, inode, path + k); |
|
if (err) |
|
break; |
|
} |
|
|
|
return err; |
|
} |
|
|
|
int |
|
ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, |
|
struct ext4_extent *ex2) |
|
{ |
|
unsigned short ext1_ee_len, ext2_ee_len, max_len; |
|
|
|
/* |
|
* Make sure that either both extents are uninitialized, or |
|
* both are _not_. |
|
*/ |
|
if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2)) |
|
return 0; |
|
|
|
if (ext4_ext_is_uninitialized(ex1)) |
|
max_len = EXT_UNINIT_MAX_LEN; |
|
else |
|
max_len = EXT_INIT_MAX_LEN; |
|
|
|
ext1_ee_len = ext4_ext_get_actual_len(ex1); |
|
ext2_ee_len = ext4_ext_get_actual_len(ex2); |
|
|
|
if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != |
|
le32_to_cpu(ex2->ee_block)) |
|
return 0; |
|
|
|
/* |
|
* To allow future support for preallocated extents to be added |
|
* as an RO_COMPAT feature, refuse to merge to extents if |
|
* this can result in the top bit of ee_len being set. |
|
*/ |
|
if (ext1_ee_len + ext2_ee_len > max_len) |
|
return 0; |
|
#ifdef AGGRESSIVE_TEST |
|
if (ext1_ee_len >= 4) |
|
return 0; |
|
#endif |
|
|
|
if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) |
|
return 1; |
|
return 0; |
|
} |
|
|
|
/* |
|
* This function tries to merge the "ex" extent to the next extent in the tree. |
|
* It always tries to merge towards right. If you want to merge towards |
|
* left, pass "ex - 1" as argument instead of "ex". |
|
* Returns 0 if the extents (ex and ex+1) were _not_ merged and returns |
|
* 1 if they got merged. |
|
*/ |
|
static int ext4_ext_try_to_merge_right(struct inode *inode, |
|
struct ext4_ext_path *path, |
|
struct ext4_extent *ex) |
|
{ |
|
struct ext4_extent_header *eh; |
|
unsigned int depth, len; |
|
int merge_done = 0; |
|
int uninitialized = 0; |
|
|
|
depth = ext_depth(inode); |
|
BUG_ON(path[depth].p_hdr == NULL); |
|
eh = path[depth].p_hdr; |
|
|
|
while (ex < EXT_LAST_EXTENT(eh)) { |
|
if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) |
|
break; |
|
/* merge with next extent! */ |
|
if (ext4_ext_is_uninitialized(ex)) |
|
uninitialized = 1; |
|
ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) |
|
+ ext4_ext_get_actual_len(ex + 1)); |
|
if (uninitialized) |
|
ext4_ext_mark_uninitialized(ex); |
|
|
|
if (ex + 1 < EXT_LAST_EXTENT(eh)) { |
|
len = (EXT_LAST_EXTENT(eh) - ex - 1) |
|
* sizeof(struct ext4_extent); |
|
memmove(ex + 1, ex + 2, len); |
|
} |
|
le16_add_cpu(&eh->eh_entries, -1); |
|
merge_done = 1; |
|
WARN_ON(eh->eh_entries == 0); |
|
if (!eh->eh_entries) |
|
EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); |
|
} |
|
|
|
return merge_done; |
|
} |
|
|
|
/* |
|
* This function does a very simple check to see if we can collapse |
|
* an extent tree with a single extent tree leaf block into the inode. |
|
*/ |
|
static void ext4_ext_try_to_merge_up(handle_t *handle, |
|
struct inode *inode, |
|
struct ext4_ext_path *path) |
|
{ |
|
size_t s; |
|
unsigned max_root = ext4_ext_space_root(inode, 0); |
|
ext4_fsblk_t blk; |
|
|
|
if ((path[0].p_depth != 1) || |
|
(le16_to_cpu(path[0].p_hdr->eh_entries) != 1) || |
|
(le16_to_cpu(path[1].p_hdr->eh_entries) > max_root)) |
|
return; |
|
|
|
/* |
|
* We need to modify the block allocation bitmap and the block |
|
* group descriptor to release the extent tree block. If we |
|
* can't get the journal credits, give up. |
|
*/ |
|
if (ext4_journal_extend(handle, 2)) |
|
return; |
|
|
|
/* |
|
* Copy the extent data up to the inode |
|
*/ |
|
blk = ext4_idx_pblock(path[0].p_idx); |
|
s = le16_to_cpu(path[1].p_hdr->eh_entries) * |
|
sizeof(struct ext4_extent_idx); |
|
s += sizeof(struct ext4_extent_header); |
|
|
|
memcpy(path[0].p_hdr, path[1].p_hdr, s); |
|
path[0].p_depth = 0; |
|
path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + |
|
(path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr)); |
|
path[0].p_hdr->eh_max = cpu_to_le16(max_root); |
|
|
|
brelse(path[1].p_bh); |
|
ext4_free_blocks(handle, inode, NULL, blk, 1, |
|
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); |
|
} |
|
|
|
/* |
|
* This function tries to merge the @ex extent to neighbours in the tree. |
|
* return 1 if merge left else 0. |
|
*/ |
|
static void ext4_ext_try_to_merge(handle_t *handle, |
|
struct inode *inode, |
|
struct ext4_ext_path *path, |
|
struct ext4_extent *ex) { |
|
struct ext4_extent_header *eh; |
|
unsigned int depth; |
|
int merge_done = 0; |
|
|
|
depth = ext_depth(inode); |
|
BUG_ON(path[depth].p_hdr == NULL); |
|
eh = path[depth].p_hdr; |
|
|
|
if (ex > EXT_FIRST_EXTENT(eh)) |
|
merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); |
|
|
|
if (!merge_done) |
|
(void) ext4_ext_try_to_merge_right(inode, path, ex); |
|
|
|
ext4_ext_try_to_merge_up(handle, inode, path); |
|
} |
|
|
|
/* |
|
* check if a portion of the "newext" extent overlaps with an |
|
* existing extent. |
|
* |
|
* If there is an overlap discovered, it updates the length of the newext |
|
* such that there will be no overlap, and then returns 1. |
|
* If there is no overlap found, it returns 0. |
|
*/ |
|
static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, |
|
struct inode *inode, |
|
struct ext4_extent *newext, |
|
struct ext4_ext_path *path) |
|
{ |
|
ext4_lblk_t b1, b2; |
|
unsigned int depth, len1; |
|
unsigned int ret = 0; |
|
|
|
b1 = le32_to_cpu(newext->ee_block); |
|
len1 = ext4_ext_get_actual_len(newext); |
|
depth = ext_depth(inode); |
|
if (!path[depth].p_ext) |
|
goto out; |
|
b2 = le32_to_cpu(path[depth].p_ext->ee_block); |
|
b2 &= ~(sbi->s_cluster_ratio - 1); |
|
|
|
/* |
|
* get the next allocated block if the extent in the path |
|
* is before the requested block(s) |
|
*/ |
|
if (b2 < b1) { |
|
b2 = ext4_ext_next_allocated_block(path); |
|
if (b2 == EXT_MAX_BLOCKS) |
|
goto out; |
|
b2 &= ~(sbi->s_cluster_ratio - 1); |
|
} |
|
|
|
/* check for wrap through zero on extent logical start block*/ |
|
if (b1 + len1 < b1) { |
|
len1 = EXT_MAX_BLOCKS - b1; |
|
newext->ee_len = cpu_to_le16(len1); |
|
ret = 1; |
|
} |
|
|
|
/* check for overlap */ |
|
if (b1 + len1 > b2) { |
|
newext->ee_len = cpu_to_le16(b2 - b1); |
|
ret = 1; |
|
} |
|
out: |
|
return ret; |
|
} |
|
|
|
/* |
|
* ext4_ext_insert_extent: |
|
* tries to merge requsted extent into the existing extent or |
|
* inserts requested extent as new one into the tree, |
|
* creating new leaf in the no-space case. |
|
*/ |
|
int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, |
|
struct ext4_ext_path *path, |
|
struct ext4_extent *newext, int flag) |
|
{ |
|
struct ext4_extent_header *eh; |
|
struct ext4_extent *ex, *fex; |
|
struct ext4_extent *nearex; /* nearest extent */ |
|
struct ext4_ext_path *npath = NULL; |
|
int depth, len, err; |
|
ext4_lblk_t next; |
|
unsigned uninitialized = 0; |
|
int flags = 0; |
|
|
|
if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { |
|
EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); |
|
return -EIO; |
|
} |
|
depth = ext_depth(inode); |
|
ex = path[depth].p_ext; |
|
if (unlikely(path[depth].p_hdr == NULL)) { |
|
EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); |
|
return -EIO; |
|
} |
|
|
|
/* try to insert block into found extent and return */ |
|
if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO) |
|
&& ext4_can_extents_be_merged(inode, ex, newext)) { |
|
ext_debug("append [%d]%d block to %u:[%d]%d (from %llu)\n", |
|
ext4_ext_is_uninitialized(newext), |
|
ext4_ext_get_actual_len(newext), |
|
le32_to_cpu(ex->ee_block), |
|
ext4_ext_is_uninitialized(ex), |
|
ext4_ext_get_actual_len(ex), |
|
ext4_ext_pblock(ex)); |
|
err = ext4_ext_get_access(handle, inode, path + depth); |
|
if (err) |
|
return err; |
|
|
|
/* |
|
* ext4_can_extents_be_merged should have checked that either |
|
* both extents are uninitialized, or both aren't. Thus we |
|
* need to check only one of them here. |
|
*/ |
|
if (ext4_ext_is_uninitialized(ex)) |
|
uninitialized = 1; |
|
ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) |
|
+ ext4_ext_get_actual_len(newext)); |
|
if (uninitialized) |
|
ext4_ext_mark_uninitialized(ex); |
|
eh = path[depth].p_hdr; |
|
nearex = ex; |
|
goto merge; |
|
} |
|
|
|
depth = ext_depth(inode); |
|
eh = path[depth].p_hdr; |
|
if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) |
|
goto has_space; |
|
|
|
/* probably next leaf has space for us? */ |
|
fex = EXT_LAST_EXTENT(eh); |
|
next = EXT_MAX_BLOCKS; |
|
if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) |
|
next = ext4_ext_next_leaf_block(path); |
|
if (next != EXT_MAX_BLOCKS) { |
|
ext_debug("next leaf block - %u\n", next); |
|
BUG_ON(npath != NULL); |
|
npath = ext4_ext_find_extent(inode, next, NULL); |
|
if (IS_ERR(npath)) |
|
return PTR_ERR(npath); |
|
BUG_ON(npath->p_depth != path->p_depth); |
|
eh = npath[depth].p_hdr; |
|
if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { |
|
ext_debug("next leaf isn't full(%d)\n", |
|
le16_to_cpu(eh->eh_entries)); |
|
path = npath; |
|
goto has_space; |
|
} |
|
ext_debug("next leaf has no free space(%d,%d)\n", |
|
le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); |
|
} |
|
|
|
/* |
|
* There is no free space in the found leaf. |
|
* We're gonna add a new leaf in the tree. |
|
*/ |
|
if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) |
|
flags = EXT4_MB_USE_ROOT_BLOCKS; |
|
err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext); |
|
if (err) |
|
goto cleanup; |
|
depth = ext_depth(inode); |
|
eh = path[depth].p_hdr; |
|
|
|
has_space: |
|
nearex = path[depth].p_ext; |
|
|
|
err = ext4_ext_get_access(handle, inode, path + depth); |
|
if (err) |
|
goto cleanup; |
|
|
|
if (!nearex) { |
|
/* there is no extent in this leaf, create first one */ |
|
ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n", |
|
le32_to_cpu(newext->ee_block), |
|
ext4_ext_pblock(newext), |
|
ext4_ext_is_uninitialized(newext), |
|
ext4_ext_get_actual_len(newext)); |
|
nearex = EXT_FIRST_EXTENT(eh); |
|
} else { |
|
if (le32_to_cpu(newext->ee_block) |
|
> le32_to_cpu(nearex->ee_block)) { |
|
/* Insert after */ |
|
ext_debug("insert %u:%llu:[%d]%d before: " |
|
"nearest %p\n", |
|
le32_to_cpu(newext->ee_block), |
|
ext4_ext_pblock(newext), |
|
ext4_ext_is_uninitialized(newext), |
|
ext4_ext_get_actual_len(newext), |
|
nearex); |
|
nearex++; |
|
} else { |
|
/* Insert before */ |
|
BUG_ON(newext->ee_block == nearex->ee_block); |
|
ext_debug("insert %u:%llu:[%d]%d after: " |
|
"nearest %p\n", |
|
le32_to_cpu(newext->ee_block), |
|
ext4_ext_pblock(newext), |
|
ext4_ext_is_uninitialized(newext), |
|
ext4_ext_get_actual_len(newext), |
|
nearex); |
|
} |
|
len = EXT_LAST_EXTENT(eh) - nearex + 1; |
|
if (len > 0) { |
|
ext_debug("insert %u:%llu:[%d]%d: " |
|
"move %d extents from 0x%p to 0x%p\n", |
|
le32_to_cpu(newext->ee_block), |
|
ext4_ext_pblock(newext), |
|
ext4_ext_is_uninitialized(newext), |
|
ext4_ext_get_actual_len(newext), |
|
len, nearex, nearex + 1); |
|
memmove(nearex + 1, nearex, |
|
len * sizeof(struct ext4_extent)); |
|
} |
|
} |
|
|
|
le16_add_cpu(&eh->eh_entries, 1); |
|
path[depth].p_ext = nearex; |
|
nearex->ee_block = newext->ee_block; |
|
ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); |
|
nearex->ee_len = newext->ee_len; |
|
|
|
merge: |
|
/* try to merge extents */ |
|
if (!(flag & EXT4_GET_BLOCKS_PRE_IO)) |
|
ext4_ext_try_to_merge(handle, inode, path, nearex); |
|
|
|
|
|
/* time to correct all indexes above */ |
|
err = ext4_ext_correct_indexes(handle, inode, path); |
|
if (err) |
|
goto cleanup; |
|
|
|
err = ext4_ext_dirty(handle, inode, path + path->p_depth); |
|
|
|
cleanup: |
|
if (npath) { |
|
ext4_ext_drop_refs(npath); |
|
kfree(npath); |
|
} |
|
ext4_ext_invalidate_cache(inode); |
|
return err; |
|
} |
|
|
|
static int ext4_fill_fiemap_extents(struct inode *inode, |
|
ext4_lblk_t block, ext4_lblk_t num, |
|
struct fiemap_extent_info *fieinfo) |
|
{ |
|
struct ext4_ext_path *path = NULL; |
|
struct ext4_ext_cache newex; |
|
struct ext4_extent *ex; |
|
ext4_lblk_t next, next_del, start = 0, end = 0; |
|
ext4_lblk_t last = block + num; |
|
int exists, depth = 0, err = 0; |
|
unsigned int flags = 0; |
|
unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; |
|
|
|
while (block < last && block != EXT_MAX_BLOCKS) { |
|
num = last - block; |
|
/* find extent for this block */ |
|
down_read(&EXT4_I(inode)->i_data_sem); |
|
|
|
if (path && ext_depth(inode) != depth) { |
|
/* depth was changed. we have to realloc path */ |
|
kfree(path); |
|
path = NULL; |
|
} |
|
|
|
path = ext4_ext_find_extent(inode, block, path); |
|
if (IS_ERR(path)) { |
|
up_read(&EXT4_I(inode)->i_data_sem); |
|
err = PTR_ERR(path); |
|
path = NULL; |
|
break; |
|
} |
|
|
|
depth = ext_depth(inode); |
|
if (unlikely(path[depth].p_hdr == NULL)) { |
|
up_read(&EXT4_I(inode)->i_data_sem); |
|
EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); |
|
err = -EIO; |
|
break; |
|
} |
|
ex = path[depth].p_ext; |
|
next = ext4_ext_next_allocated_block(path); |
|
ext4_ext_drop_refs(path); |
|
|
|
flags = 0; |
|
exists = 0; |
|
if (!ex) { |
|
/* there is no extent yet, so try to allocate |
|
* all requested space */ |
|
start = block; |
|
end = block + num; |
|
} else if (le32_to_cpu(ex->ee_block) > block) { |
|
/* need to allocate space before found extent */ |
|
start = block; |
|
end = le32_to_cpu(ex->ee_block); |
|
if (block + num < end) |
|
end = block + num; |
|
} else if (block >= le32_to_cpu(ex->ee_block) |
|
+ ext4_ext_get_actual_len(ex)) { |
|
/* need to allocate space after found extent */ |
|
start = block; |
|
end = block + num; |
|
if (end >= next) |
|
end = next; |
|
} else if (block >= le32_to_cpu(ex->ee_block)) { |
|
/* |
|
* some part of requested space is covered |
|
* by found extent |
|
*/ |
|
start = block; |
|
end = le32_to_cpu(ex->ee_block) |
|
+ ext4_ext_get_actual_len(ex); |
|
if (block + num < end) |
|
end = block + num; |
|
exists = 1; |
|
} else { |
|
BUG(); |
|
} |
|
BUG_ON(end <= start); |
|
|
|
if (!exists) { |
|
newex.ec_block = start; |
|
newex.ec_len = end - start; |
|
newex.ec_start = 0; |
|
} else { |
|
newex.ec_block = le32_to_cpu(ex->ee_block); |
|
newex.ec_len = ext4_ext_get_actual_len(ex); |
|
newex.ec_start = ext4_ext_pblock(ex); |
|
if (ext4_ext_is_uninitialized(ex)) |
|
flags |= FIEMAP_EXTENT_UNWRITTEN; |
|
} |
|
|
|
/* |
|
* Find delayed extent and update newex accordingly. We call |
|
* it even in !exists case to find out whether newex is the |
|
* last existing extent or not. |
|
*/ |
|
next_del = ext4_find_delayed_extent(inode, &newex); |
|
if (!exists && next_del) { |
|
exists = 1; |
|
flags |= FIEMAP_EXTENT_DELALLOC; |
|
} |
|
up_read(&EXT4_I(inode)->i_data_sem); |
|
|
|
if (unlikely(newex.ec_len == 0)) { |
|
EXT4_ERROR_INODE(inode, "newex.ec_len == 0"); |
|
err = -EIO; |
|
break; |
|
} |
|
|
|
/* This is possible iff next == next_del == EXT_MAX_BLOCKS */ |
|
if (next == next_del) { |
|
flags |= FIEMAP_EXTENT_LAST; |
|
if (unlikely(next_del != EXT_MAX_BLOCKS || |
|
next != EXT_MAX_BLOCKS)) { |
|
EXT4_ERROR_INODE(inode, |
|
"next extent == %u, next " |
|
"delalloc extent = %u", |
|
next, next_del); |
|
err = -EIO; |
|
break; |
|
} |
|
} |
|
|
|
if (exists) { |
|
err = fiemap_fill_next_extent(fieinfo, |
|
(__u64)newex.ec_block << blksize_bits, |
|
(__u64)newex.ec_start << blksize_bits, |
|
(__u64)newex.ec_len << blksize_bits, |
|
flags); |
|
if (err < 0) |
|
break; |
|
if (err == 1) { |
|
err = 0; |
|
break; |
|
} |
|
} |
|
|
|
block = newex.ec_block + newex.ec_len; |
|
} |
|
|
|
if (path) { |
|
ext4_ext_drop_refs(path); |
|
kfree(path); |
|
} |
|
|
|
return err; |
|
} |
|
|
|
static void |
|
ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block, |
|
__u32 len, ext4_fsblk_t start) |
|
{ |
|
struct ext4_ext_cache *cex; |
|
BUG_ON(len == 0); |
|
spin_lock(&EXT4_I(inode)->i_block_reservation_lock); |
|
trace_ext4_ext_put_in_cache(inode, block, len, start); |
|
cex = &EXT4_I(inode)->i_cached_extent; |
|
cex->ec_block = block; |
|
cex->ec_len = len; |
|
cex->ec_start = start; |
|
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
|
} |
|
|
|
/* |
|
* ext4_ext_put_gap_in_cache: |
|
* calculate boundaries of the gap that the requested block fits into |
|
* and cache this gap |
|
*/ |
|
static void |
|
ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, |
|
ext4_lblk_t block) |
|
{ |
|
int depth = ext_depth(inode); |
|
unsigned long len; |
|
ext4_lblk_t lblock; |
|
struct ext4_extent *ex; |
|
|
|
ex = path[depth].p_ext; |
|
if (ex == NULL) { |
|
/* there is no extent yet, so gap is [0;-] */ |
|
lblock = 0; |
|
len = EXT_MAX_BLOCKS; |
|
ext_debug("cache gap(whole file):"); |
|
} else if (block < le32_to_cpu(ex->ee_block)) { |
|
lblock = block; |
|
len = le32_to_cpu(ex->ee_block) - block; |
|
ext_debug("cache gap(before): %u [%u:%u]", |
|
block, |
|
le32_to_cpu(ex->ee_block), |
|
ext4_ext_get_actual_len(ex)); |
|
} else if (block >= le32_to_cpu(ex->ee_block) |
|
+ ext4_ext_get_actual_len(ex)) { |
|
ext4_lblk_t next; |
|
lblock = le32_to_cpu(ex->ee_block) |
|
+ ext4_ext_get_actual_len(ex); |
|
|
|
next = ext4_ext_next_allocated_block(path); |
|
ext_debug("cache gap(after): [%u:%u] %u", |
|
le32_to_cpu(ex->ee_block), |
|
ext4_ext_get_actual_len(ex), |
|
block); |
|
BUG_ON(next == lblock); |
|
len = next - lblock; |
|
} else { |
|
lblock = len = 0; |
|
BUG(); |
|
} |
|
|
|
ext_debug(" -> %u:%lu\n", lblock, len); |
|
ext4_ext_put_in_cache(inode, lblock, len, 0); |
|
} |
|
|
|
/* |
|
* ext4_ext_in_cache() |
|
* Checks to see if the given block is in the cache. |
|
* If it is, the cached extent is stored in the given |
|
* cache extent pointer. |
|
* |
|
* @inode: The files inode |
|
* @block: The block to look for in the cache |
|
* @ex: Pointer where the cached extent will be stored |
|
* if it contains block |
|
* |
|
* Return 0 if cache is invalid; 1 if the cache is valid |
|
*/ |
|
static int |
|
ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, |
|
struct ext4_extent *ex) |
|
{ |
|
struct ext4_ext_cache *cex; |
|
int ret = 0; |
|
|
|
/* |
|
* We borrow i_block_reservation_lock to protect i_cached_extent |
|
*/ |
|
spin_lock(&EXT4_I(inode)->i_block_reservation_lock); |
|
cex = &EXT4_I(inode)->i_cached_extent; |
|
|
|
/* has cache valid data? */ |
|
if (cex->ec_len == 0) |
|
goto errout; |
|
|
|
if (in_range(block, cex->ec_block, cex->ec_len)) { |
|
ex->ee_block = cpu_to_le32(cex->ec_block); |
|
ext4_ext_store_pblock(ex, cex->ec_start); |
|
ex->ee_len = cpu_to_le16(cex->ec_len); |
|
ext_debug("%u cached by %u:%u:%llu\n", |
|
block, |
|
cex->ec_block, cex->ec_len, cex->ec_start); |
|
ret = 1; |
|
} |
|
errout: |
|
trace_ext4_ext_in_cache(inode, block, ret); |
|
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
|
return ret; |
|
} |
|
|
|
/* |
|
* ext4_ext_rm_idx: |
|
* removes index from the index block. |
|
*/ |
|
static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, |
|
struct ext4_ext_path *path, int depth) |
|
{ |
|
int err; |
|
ext4_fsblk_t leaf; |
|
|
|
/* free index block */ |
|
depth--; |
|
path = path + depth; |
|
leaf = ext4_idx_pblock(path->p_idx); |
|
if (unlikely(path->p_hdr->eh_entries == 0)) { |
|
EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); |
|
return -EIO; |
|
} |
|
err = ext4_ext_get_access(handle, inode, path); |
|
if (err) |
|
return err; |
|
|
|
if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { |
|
int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; |
|
len *= sizeof(struct ext4_extent_idx); |
|
memmove(path->p_idx, path->p_idx + 1, len); |
|
} |
|
|
|
le16_add_cpu(&path->p_hdr->eh_entries, -1); |
|
err = ext4_ext_dirty(handle, inode, path); |
|
if (err) |
|
return err; |
|
ext_debug("index is empty, remove it, free block %llu\n", leaf); |
|
trace_ext4_ext_rm_idx(inode, leaf); |
|
|
|
ext4_free_blocks(handle, inode, NULL, leaf, 1, |
|
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); |
|
|
|
while (--depth >= 0) { |
|
if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) |
|
break; |
|
path--; |
|
err = ext4_ext_get_access(handle, inode, path); |
|
if (err) |
|
break; |
|
path->p_idx->ei_block = (path+1)->p_idx->ei_block; |
|
err = ext4_ext_dirty(handle, inode, path); |
|
if (err) |
|
break; |
|
} |
|
return err; |
|
} |
|
|
|
/* |
|
* ext4_ext_calc_credits_for_single_extent: |
|
* This routine returns max. credits that needed to insert an extent |
|
* to the extent tree. |
|
* When pass the actual path, the caller should calculate credits |
|
* under i_data_sem. |
|
*/ |
|
int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, |
|
struct ext4_ext_path *path) |
|
{ |
|
if (path) { |
|
int depth = ext_depth(inode); |
|
int ret = 0; |
|
|
|
/* probably there is space in leaf? */ |
|
if (le16_to_cpu(path[depth].p_hdr->eh_entries) |
|
< le16_to_cpu(path[depth].p_hdr->eh_max)) { |
|
|
|
/* |
|
* There are some space in the leaf tree, no |
|
* need to account for leaf block credit |
|
* |
|
* bitmaps and block group descriptor blocks |
|
* and other metadata blocks still need to be |
|
* accounted. |
|
*/ |
|
/* 1 bitmap, 1 block group descriptor */ |
|
ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); |
|
return ret; |
|
} |
|
} |
|
|
|
return ext4_chunk_trans_blocks(inode, nrblocks); |
|
} |
|
|
|
/* |
|
* How many index/leaf blocks need to change/allocate to modify nrblocks? |
|
* |
|
* if nrblocks are fit in a single extent (chunk flag is 1), then |
|
* in the worse case, each tree level index/leaf need to be changed |
|
* if the tree split due to insert a new extent, then the old tree |
|
* index/leaf need to be updated too |
|
* |
|
* If the nrblocks are discontiguous, they could cause |
|
* the whole tree split more than once, but this is really rare. |
|
*/ |
|
int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) |
|
{ |
|
int index; |
|
int depth; |
|
|
|
/* If we are converting the inline data, only one is needed here. */ |
|
if (ext4_has_inline_data(inode)) |
|
return 1; |
|
|
|
depth = ext_depth(inode); |
|
|
|
if (chunk) |
|
index = depth * 2; |
|
else |
|
index = depth * 3; |
|
|
|
return index; |
|
} |
|
|
|
static int ext4_remove_blocks(handle_t *handle, struct inode *inode, |
|
struct ext4_extent *ex, |
|
ext4_fsblk_t *partial_cluster, |
|
ext4_lblk_t from, ext4_lblk_t to) |
|
{ |
|
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
|
unsigned short ee_len = ext4_ext_get_actual_len(ex); |
|
ext4_fsblk_t pblk; |
|
int flags = 0; |
|
|
|
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) |
|
flags |= EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; |
|
else if (ext4_should_journal_data(inode)) |
|
flags |= EXT4_FREE_BLOCKS_FORGET; |
|
|
|
/* |
|
* For bigalloc file systems, we never free a partial cluster |
|
* at the beginning of the extent. Instead, we make a note |
|
* that we tried freeing the cluster, and check to see if we |
|
* need to free it on a subsequent call to ext4_remove_blocks, |
|
* or at the end of the ext4_truncate() operation. |
|
*/ |
|
flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; |
|
|
|
trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster); |
|
/* |
|
* If we have a partial cluster, and it's different from the |
|
* cluster of the last block, we need to explicitly free the |
|
* partial cluster here. |
|
*/ |
|
pblk = ext4_ext_pblock(ex) + ee_len - 1; |
|
if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) { |
|
ext4_free_blocks(handle, inode, NULL, |
|
EXT4_C2B(sbi, *partial_cluster), |
|
sbi->s_cluster_ratio, flags); |
|
*partial_cluster = 0; |
|
} |
|
|
|
#ifdef EXTENTS_STATS |
|
{ |
|
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
|
spin_lock(&sbi->s_ext_stats_lock); |
|
sbi->s_ext_blocks += ee_len; |
|
sbi->s_ext_extents++; |
|
if (ee_len < sbi->s_ext_min) |
|
sbi->s_ext_min = ee_len; |
|
if (ee_len > sbi->s_ext_max) |
|
sbi->s_ext_max = ee_len; |
|
if (ext_depth(inode) > sbi->s_depth_max) |
|
sbi->s_depth_max = ext_depth(inode |