22
22
#include "ctree.h"
23
23
#include "delayed-ref.h"
24
24
#include "transaction.h"
25
+ #include "qgroup.h"
25
26
26
27
struct kmem_cache * btrfs_delayed_ref_head_cachep ;
27
28
struct kmem_cache * btrfs_delayed_tree_ref_cachep ;
@@ -420,12 +421,14 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
420
421
static noinline struct btrfs_delayed_ref_head *
421
422
add_delayed_ref_head (struct btrfs_fs_info * fs_info ,
422
423
struct btrfs_trans_handle * trans ,
423
- struct btrfs_delayed_ref_node * ref , u64 bytenr ,
424
- u64 num_bytes , int action , int is_data )
424
+ struct btrfs_delayed_ref_node * ref ,
425
+ struct btrfs_qgroup_extent_record * qrecord ,
426
+ u64 bytenr , u64 num_bytes , int action , int is_data )
425
427
{
426
428
struct btrfs_delayed_ref_head * existing ;
427
429
struct btrfs_delayed_ref_head * head_ref = NULL ;
428
430
struct btrfs_delayed_ref_root * delayed_refs ;
431
+ struct btrfs_qgroup_extent_record * qexisting ;
429
432
int count_mod = 1 ;
430
433
int must_insert_reserved = 0 ;
431
434
@@ -474,6 +477,18 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
474
477
head_ref -> processing = 0 ;
475
478
head_ref -> total_ref_mod = count_mod ;
476
479
480
+ /* Record qgroup extent info if provided */
481
+ if (qrecord ) {
482
+ qrecord -> bytenr = bytenr ;
483
+ qrecord -> num_bytes = num_bytes ;
484
+ qrecord -> old_roots = NULL ;
485
+
486
+ qexisting = btrfs_qgroup_insert_dirty_extent (delayed_refs ,
487
+ qrecord );
488
+ if (qexisting )
489
+ kfree (qrecord );
490
+ }
491
+
477
492
spin_lock_init (& head_ref -> lock );
478
493
mutex_init (& head_ref -> mutex );
479
494
@@ -624,6 +639,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
624
639
struct btrfs_delayed_tree_ref * ref ;
625
640
struct btrfs_delayed_ref_head * head_ref ;
626
641
struct btrfs_delayed_ref_root * delayed_refs ;
642
+ struct btrfs_qgroup_extent_record * record = NULL ;
627
643
628
644
if (!is_fstree (ref_root ) || !fs_info -> quota_enabled )
629
645
no_quota = 0 ;
@@ -639,6 +655,15 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
639
655
return - ENOMEM ;
640
656
}
641
657
658
+ if (fs_info -> quota_enabled && is_fstree (ref_root )) {
659
+ record = kmalloc (sizeof (* record ), GFP_NOFS );
660
+ if (!record ) {
661
+ kmem_cache_free (btrfs_delayed_tree_ref_cachep , ref );
662
+ kmem_cache_free (btrfs_delayed_ref_head_cachep , ref );
663
+ return - ENOMEM ;
664
+ }
665
+ }
666
+
642
667
head_ref -> extent_op = extent_op ;
643
668
644
669
delayed_refs = & trans -> transaction -> delayed_refs ;
@@ -648,7 +673,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
648
673
* insert both the head node and the new ref without dropping
649
674
* the spin lock
650
675
*/
651
- head_ref = add_delayed_ref_head (fs_info , trans , & head_ref -> node ,
676
+ head_ref = add_delayed_ref_head (fs_info , trans , & head_ref -> node , record ,
652
677
bytenr , num_bytes , action , 0 );
653
678
654
679
add_delayed_tree_ref (fs_info , trans , head_ref , & ref -> node , bytenr ,
@@ -673,6 +698,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
673
698
struct btrfs_delayed_data_ref * ref ;
674
699
struct btrfs_delayed_ref_head * head_ref ;
675
700
struct btrfs_delayed_ref_root * delayed_refs ;
701
+ struct btrfs_qgroup_extent_record * record = NULL ;
676
702
677
703
if (!is_fstree (ref_root ) || !fs_info -> quota_enabled )
678
704
no_quota = 0 ;
@@ -688,6 +714,16 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
688
714
return - ENOMEM ;
689
715
}
690
716
717
+ if (fs_info -> quota_enabled && is_fstree (ref_root )) {
718
+ record = kmalloc (sizeof (* record ), GFP_NOFS );
719
+ if (!record ) {
720
+ kmem_cache_free (btrfs_delayed_data_ref_cachep , ref );
721
+ kmem_cache_free (btrfs_delayed_ref_head_cachep ,
722
+ head_ref );
723
+ return - ENOMEM ;
724
+ }
725
+ }
726
+
691
727
head_ref -> extent_op = extent_op ;
692
728
693
729
delayed_refs = & trans -> transaction -> delayed_refs ;
@@ -697,7 +733,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
697
733
* insert both the head node and the new ref without dropping
698
734
* the spin lock
699
735
*/
700
- head_ref = add_delayed_ref_head (fs_info , trans , & head_ref -> node ,
736
+ head_ref = add_delayed_ref_head (fs_info , trans , & head_ref -> node , record ,
701
737
bytenr , num_bytes , action , 1 );
702
738
703
739
add_delayed_data_ref (fs_info , trans , head_ref , & ref -> node , bytenr ,
@@ -725,9 +761,9 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
725
761
delayed_refs = & trans -> transaction -> delayed_refs ;
726
762
spin_lock (& delayed_refs -> lock );
727
763
728
- add_delayed_ref_head (fs_info , trans , & head_ref -> node , bytenr ,
729
- num_bytes , BTRFS_UPDATE_DELAYED_HEAD ,
730
- extent_op -> is_data );
764
+ add_delayed_ref_head (fs_info , trans , & head_ref -> node , NULL , bytenr ,
765
+ num_bytes , BTRFS_UPDATE_DELAYED_HEAD ,
766
+ extent_op -> is_data );
731
767
732
768
spin_unlock (& delayed_refs -> lock );
733
769
return 0 ;
0 commit comments