@@ -202,6 +202,7 @@ static int do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce,
202
202
203
203
ce -> ce_flags = (ce -> ce_flags & ~clear ) | set ;
204
204
return add_index_entry (& o -> result , ce ,
205
+ o -> extra_add_index_flags |
205
206
ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE );
206
207
}
207
208
@@ -344,6 +345,7 @@ static int check_updates(struct unpack_trees_options *o)
344
345
struct progress * progress = NULL ;
345
346
struct index_state * index = & o -> result ;
346
347
struct checkout state = CHECKOUT_INIT ;
348
+ uint64_t start = getnanotime ();
347
349
int i ;
348
350
349
351
state .force = 1 ;
@@ -414,6 +416,7 @@ static int check_updates(struct unpack_trees_options *o)
414
416
errs |= finish_delayed_checkout (& state );
415
417
if (o -> update )
416
418
git_attr_set_direction (GIT_ATTR_CHECKIN , NULL );
419
+ trace_performance_since (start , "update worktree after a merge" );
417
420
return errs != 0 ;
418
421
}
419
422
@@ -633,6 +636,132 @@ static inline int are_same_oid(struct name_entry *name_j, struct name_entry *nam
633
636
return name_j -> oid && name_k -> oid && !oidcmp (name_j -> oid , name_k -> oid );
634
637
}
635
638
639
+ static int all_trees_same_as_cache_tree (int n , unsigned long dirmask ,
640
+ struct name_entry * names ,
641
+ struct traverse_info * info )
642
+ {
643
+ struct unpack_trees_options * o = info -> data ;
644
+ int i ;
645
+
646
+ if (!o -> merge || dirmask != ((1 << n ) - 1 ))
647
+ return 0 ;
648
+
649
+ for (i = 1 ; i < n ; i ++ )
650
+ if (!are_same_oid (names , names + i ))
651
+ return 0 ;
652
+
653
+ return cache_tree_matches_traversal (o -> src_index -> cache_tree , names , info );
654
+ }
655
+
656
+ static int index_pos_by_traverse_info (struct name_entry * names ,
657
+ struct traverse_info * info )
658
+ {
659
+ struct unpack_trees_options * o = info -> data ;
660
+ int len = traverse_path_len (info , names );
661
+ char * name = xmalloc (len + 1 /* slash */ + 1 /* NUL */ );
662
+ int pos ;
663
+
664
+ make_traverse_path (name , info , names );
665
+ name [len ++ ] = '/' ;
666
+ name [len ] = '\0' ;
667
+ pos = index_name_pos (o -> src_index , name , len );
668
+ if (pos >= 0 )
669
+ BUG ("This is a directory and should not exist in index" );
670
+ pos = - pos - 1 ;
671
+ if (!starts_with (o -> src_index -> cache [pos ]-> name , name ) ||
672
+ (pos > 0 && starts_with (o -> src_index -> cache [pos - 1 ]-> name , name )))
673
+ BUG ("pos must point at the first entry in this directory" );
674
+ free (name );
675
+ return pos ;
676
+ }
677
+
678
+ /*
679
+ * Fast path if we detect that all trees are the same as cache-tree at this
680
+ * path. We'll walk these trees recursively using cache-tree/index instead of
681
+ * ODB since already know what these trees contain.
682
+ */
683
+ static int traverse_by_cache_tree (int pos , int nr_entries , int nr_names ,
684
+ struct name_entry * names ,
685
+ struct traverse_info * info )
686
+ {
687
+ struct cache_entry * src [MAX_UNPACK_TREES + 1 ] = { NULL , };
688
+ struct unpack_trees_options * o = info -> data ;
689
+ struct cache_entry * tree_ce = NULL ;
690
+ int ce_len = 0 ;
691
+ int i , d ;
692
+
693
+ if (!o -> merge )
694
+ BUG ("We need cache-tree to do this optimization" );
695
+
696
+ /*
697
+ * Try to keep add_index_entry() as fast as possible since
698
+ * we're going to do a lot of them.
699
+ *
700
+ * Skipping verify_path() should totally be safe because these
701
+ * paths are from the source index, which must have been
702
+ * verified.
703
+ *
704
+ * Skipping D/F and cache-tree validation checks is trickier
705
+ * because it assumes what n-merge code would do when all
706
+ * trees and the index are the same. We probably could just
707
+ * optimize those code instead (e.g. we don't invalidate that
708
+ * many cache-tree, but the searching for them is very
709
+ * expensive).
710
+ */
711
+ o -> extra_add_index_flags = ADD_CACHE_SKIP_DFCHECK ;
712
+ o -> extra_add_index_flags |= ADD_CACHE_SKIP_VERIFY_PATH ;
713
+
714
+ /*
715
+ * Do what unpack_callback() and unpack_nondirectories() normally
716
+ * do. But we walk all paths recursively in just one loop instead.
717
+ *
718
+ * D/F conflicts and staged entries are not a concern because
719
+ * cache-tree would be invalidated and we would never get here
720
+ * in the first place.
721
+ */
722
+ for (i = 0 ; i < nr_entries ; i ++ ) {
723
+ int new_ce_len , len , rc ;
724
+
725
+ src [0 ] = o -> src_index -> cache [pos + i ];
726
+
727
+ len = ce_namelen (src [0 ]);
728
+ new_ce_len = cache_entry_size (len );
729
+
730
+ if (new_ce_len > ce_len ) {
731
+ new_ce_len <<= 1 ;
732
+ tree_ce = xrealloc (tree_ce , new_ce_len );
733
+ memset (tree_ce , 0 , new_ce_len );
734
+ ce_len = new_ce_len ;
735
+
736
+ tree_ce -> ce_flags = create_ce_flags (0 );
737
+
738
+ for (d = 1 ; d <= nr_names ; d ++ )
739
+ src [d ] = tree_ce ;
740
+ }
741
+
742
+ tree_ce -> ce_mode = src [0 ]-> ce_mode ;
743
+ tree_ce -> ce_namelen = len ;
744
+ oidcpy (& tree_ce -> oid , & src [0 ]-> oid );
745
+ memcpy (tree_ce -> name , src [0 ]-> name , len + 1 );
746
+
747
+ rc = call_unpack_fn ((const struct cache_entry * const * )src , o );
748
+ if (rc < 0 ) {
749
+ free (tree_ce );
750
+ return rc ;
751
+ }
752
+
753
+ mark_ce_used (src [0 ], o );
754
+ }
755
+ o -> extra_add_index_flags = 0 ;
756
+ free (tree_ce );
757
+ if (o -> debug_unpack )
758
+ printf ("Unpacked %d entries from %s to %s using cache-tree\n" ,
759
+ nr_entries ,
760
+ o -> src_index -> cache [pos ]-> name ,
761
+ o -> src_index -> cache [pos + nr_entries - 1 ]-> name );
762
+ return 0 ;
763
+ }
764
+
636
765
static int traverse_trees_recursive (int n , unsigned long dirmask ,
637
766
unsigned long df_conflicts ,
638
767
struct name_entry * names ,
@@ -644,6 +773,17 @@ static int traverse_trees_recursive(int n, unsigned long dirmask,
644
773
void * buf [MAX_UNPACK_TREES ];
645
774
struct traverse_info newinfo ;
646
775
struct name_entry * p ;
776
+ int nr_entries ;
777
+
778
+ nr_entries = all_trees_same_as_cache_tree (n , dirmask , names , info );
779
+ if (nr_entries > 0 ) {
780
+ struct unpack_trees_options * o = info -> data ;
781
+ int pos = index_pos_by_traverse_info (names , info );
782
+
783
+ if (!o -> merge || df_conflicts )
784
+ BUG ("Wrong condition to get here buddy" );
785
+ return traverse_by_cache_tree (pos , nr_entries , n , names , info );
786
+ }
647
787
648
788
p = names ;
649
789
while (!p -> mode )
@@ -810,6 +950,11 @@ static struct cache_entry *create_ce_entry(const struct traverse_info *info,
810
950
return ce ;
811
951
}
812
952
953
+ /*
954
+ * Note that traverse_by_cache_tree() duplicates some logic in this funciton
955
+ * without actually calling it. If you change the logic here you may need to
956
+ * check and change there as well.
957
+ */
813
958
static int unpack_nondirectories (int n , unsigned long mask ,
814
959
unsigned long dirmask ,
815
960
struct cache_entry * * src ,
@@ -1002,6 +1147,11 @@ static void debug_unpack_callback(int n,
1002
1147
debug_name_entry (i , names + i );
1003
1148
}
1004
1149
1150
+ /*
1151
+ * Note that traverse_by_cache_tree() duplicates some logic in this funciton
1152
+ * without actually calling it. If you change the logic here you may need to
1153
+ * check and change there as well.
1154
+ */
1005
1155
static int unpack_callback (int n , unsigned long mask , unsigned long dirmask , struct name_entry * names , struct traverse_info * info )
1006
1156
{
1007
1157
struct cache_entry * src [MAX_UNPACK_TREES + 1 ] = { NULL , };
@@ -1281,9 +1431,10 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
1281
1431
int i , ret ;
1282
1432
static struct cache_entry * dfc ;
1283
1433
struct exclude_list el ;
1434
+ uint64_t start = getnanotime ();
1284
1435
1285
1436
if (len > MAX_UNPACK_TREES )
1286
- die ("unpack_trees takes at most %d trees" , MAX_UNPACK_TREES );
1437
+ die (_ ( "unpack_trees takes at most %d trees" ) , MAX_UNPACK_TREES );
1287
1438
1288
1439
memset (& el , 0 , sizeof (el ));
1289
1440
if (!core_apply_sparse_checkout || !o -> update )
@@ -1431,12 +1582,20 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
1431
1582
goto done ;
1432
1583
}
1433
1584
}
1585
+ trace_performance_since (start , "unpack trees" );
1434
1586
1435
1587
ret = check_updates (o ) ? (-2 ) : 0 ;
1436
1588
if (o -> dst_index ) {
1437
1589
if (!ret ) {
1438
1590
if (!o -> result .cache_tree )
1439
1591
o -> result .cache_tree = cache_tree ();
1592
+ /*
1593
+ * TODO: Walk o.src_index->cache_tree, quickly check
1594
+ * if o->result.cache has the exact same content for
1595
+ * any valid cache-tree in o.src_index, then we can
1596
+ * just copy the cache-tree over instead of hashing a
1597
+ * new tree object.
1598
+ */
1440
1599
if (!cache_tree_fully_valid (o -> result .cache_tree ))
1441
1600
cache_tree_update (& o -> result ,
1442
1601
WRITE_TREE_SILENT |
0 commit comments