Skip to content

Commit df2825e

Browse files
committed
btrfs: always pass readahead state to defrag
Defrag ioctl passes readahead from the file, but autodefrag does not have a file so the readahead state is allocated when needed. The autodefrag loop in cleaner thread iterates over inodes so we can simply provide an on-stack readahead state and will not need to allocate it in btrfs_defrag_file(). The size is 32 bytes which is acceptable. Reviewed-by: Qu Wenruo <[email protected]> Signed-off-by: David Sterba <[email protected]>
1 parent 11e3107 commit df2825e

File tree

1 file changed

+11
-21
lines changed

1 file changed

+11
-21
lines changed

fs/btrfs/defrag.c

Lines changed: 11 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,8 @@ void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
219219
#define BTRFS_DEFRAG_BATCH 1024
220220

221221
static int btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
222-
struct inode_defrag *defrag)
222+
struct inode_defrag *defrag,
223+
struct file_ra_state *ra)
223224
{
224225
struct btrfs_root *inode_root;
225226
struct inode *inode;
@@ -258,9 +259,10 @@ static int btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
258259
range.len = (u64)-1;
259260
range.start = cur;
260261
range.extent_thresh = defrag->extent_thresh;
262+
file_ra_state_init(ra, inode->i_mapping);
261263

262264
sb_start_write(fs_info->sb);
263-
ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
265+
ret = btrfs_defrag_file(inode, ra, &range, defrag->transid,
264266
BTRFS_DEFRAG_BATCH);
265267
sb_end_write(fs_info->sb);
266268
iput(inode);
@@ -287,6 +289,8 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
287289

288290
atomic_inc(&fs_info->defrag_running);
289291
while (1) {
292+
struct file_ra_state ra = { 0 };
293+
290294
/* Pause the auto defragger. */
291295
if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
292296
break;
@@ -309,7 +313,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
309313
first_ino = defrag->ino + 1;
310314
root_objectid = defrag->root;
311315

312-
btrfs_run_defrag_inode(fs_info, defrag);
316+
btrfs_run_defrag_inode(fs_info, defrag, &ra);
313317
}
314318
atomic_dec(&fs_info->defrag_running);
315319

@@ -1302,8 +1306,7 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
13021306
if (entry->start + range_len <= *last_scanned_ret)
13031307
continue;
13041308

1305-
if (ra)
1306-
page_cache_sync_readahead(inode->vfs_inode.i_mapping,
1309+
page_cache_sync_readahead(inode->vfs_inode.i_mapping,
13071310
ra, NULL, entry->start >> PAGE_SHIFT,
13081311
((entry->start + range_len - 1) >> PAGE_SHIFT) -
13091312
(entry->start >> PAGE_SHIFT) + 1);
@@ -1335,7 +1338,7 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
13351338
* Entry point to file defragmentation.
13361339
*
13371340
* @inode: inode to be defragged
1338-
* @ra: readahead state (can be NUL)
1341+
* @ra: readahead state
13391342
* @range: defrag options including range and flags
13401343
* @newer_than: minimum transid to defrag
13411344
* @max_to_defrag: max number of sectors to be defragged, if 0, the whole inode
@@ -1357,12 +1360,13 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
13571360
u64 cur;
13581361
u64 last_byte;
13591362
bool do_compress = (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS);
1360-
bool ra_allocated = false;
13611363
int compress_type = BTRFS_COMPRESS_ZLIB;
13621364
int ret = 0;
13631365
u32 extent_thresh = range->extent_thresh;
13641366
pgoff_t start_index;
13651367

1368+
ASSERT(ra);
1369+
13661370
if (isize == 0)
13671371
return 0;
13681372

@@ -1391,18 +1395,6 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
13911395
cur = round_down(range->start, fs_info->sectorsize);
13921396
last_byte = round_up(last_byte, fs_info->sectorsize) - 1;
13931397

1394-
/*
1395-
* If we were not given a ra, allocate a readahead context. As
1396-
* readahead is just an optimization, defrag will work without it so
1397-
* we don't error out.
1398-
*/
1399-
if (!ra) {
1400-
ra_allocated = true;
1401-
ra = kzalloc(sizeof(*ra), GFP_KERNEL);
1402-
if (ra)
1403-
file_ra_state_init(ra, inode->i_mapping);
1404-
}
1405-
14061398
/*
14071399
* Make writeback start from the beginning of the range, so that the
14081400
* defrag range can be written sequentially.
@@ -1457,8 +1449,6 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
14571449
cond_resched();
14581450
}
14591451

1460-
if (ra_allocated)
1461-
kfree(ra);
14621452
/*
14631453
* Update range.start for autodefrag, this will indicate where to start
14641454
* in next run.

0 commit comments

Comments
 (0)