176
176
--- }
177
177
--- }
178
178
179
- local json = require (' json' )
180
179
local utils = require (' graphql.utils' )
181
180
local core_util = require (' graphql.core.util' )
182
181
local core_types = require (' graphql.core.types' )
183
182
local core_query_util = require (' graphql.core.query_util' )
184
183
local core_validate_variables = require (' graphql.core.validate_variables' )
185
184
local core_introspection = require (' graphql.core.introspection' )
185
+ local request_batch = require (' graphql.request_batch' )
186
186
187
187
-- XXX: Possible cache_only requests refactoring. Maybe just set
188
188
-- `is_cache_only` flag in prepared_object, find such request in within the
@@ -201,13 +201,9 @@ local core_introspection = require('graphql.core.introspection')
201
201
-- Maybe we also can remove separate `is_list = true` case processing over the
202
202
-- code that can havily simplify things.
203
203
204
- -- XXX: Currently the executor cannot batch similar requests in case when a
205
- -- collection has two connections and so the requests to the different
206
- -- connected (destination) collections placed in the list each after each. The
207
- -- simplest way to mitigate this issue is to consume similar requests (in
208
- -- `fetch_first_same`) w/o respect to the order (traverse until end of the
209
- -- `open_set` or a count limit). The alternative is to introduce resorting
210
- -- prepared requests step.
204
+ -- XXX: It would be more natural to have list of tables with field_info content
205
+ -- + field_name instead of fields_info to handle each prepared resolve
206
+ -- separatelly and add ability to reorder it.
211
207
212
208
local bfs_executor = {}
213
209
@@ -784,34 +780,19 @@ fetch_first_same = function(open_set, opts)
784
780
size = i
785
781
break
786
782
end
787
- local prepared_select = prepared_resolve .prepared_select
788
- local request_opts = prepared_select .request_opts
789
- local collection_name = prepared_select .collection_name
790
- local index_name = request_opts .index_name
791
- local key = request_opts .index_value or box .NULL
792
- local iterator_opts = request_opts .iterator_opts
783
+ local batch = request_batch .from_prepared_resolve (prepared_resolve )
793
784
794
785
if i == 1 then
795
786
assert (batches [field_name ] == nil ,
796
787
(' internal error: %s: field names "%s" clash' ):format (
797
788
func_name , field_name ))
798
- batches [field_name ] = {
799
- collection_name = collection_name ,
800
- index_name = index_name ,
801
- keys = {key },
802
- iterator_opts = iterator_opts ,
803
- }
789
+ batches [field_name ] = batch
804
790
size = i
805
791
else
806
- local ok =
807
- batches [field_name ] ~= nil and
808
- batches [field_name ].collection_name == collection_name and
809
- batches [field_name ].index_name == index_name and
810
- utils .are_tables_same (batches [field_name ].iterator_opts or
811
- {}, iterator_opts or {})
812
- if not ok then break end -- XXX: continue here and return first
813
- -- non-match instead of size?
814
- table.insert (batches [field_name ].keys , key )
792
+ local ok = batches [field_name ] ~= nil and
793
+ batches [field_name ]:compare_bins (batch )
794
+ if not ok then break end
795
+ table.insert (batches [field_name ].keys , batch .keys [1 ])
815
796
size = i
816
797
end
817
798
end
@@ -850,45 +831,20 @@ fetch_resolve_list = function(prepared_object_list, opts)
850
831
size = i
851
832
break
852
833
end
853
- local prepared_select = prepared_resolve .prepared_select
854
- local request_opts = prepared_select .request_opts
855
- local collection_name = prepared_select .collection_name
856
- local index_name = request_opts .index_name
857
- local key = request_opts .index_value or box .NULL
858
- local iterator_opts = request_opts .iterator_opts
834
+ local batch = request_batch .from_prepared_resolve (prepared_resolve )
859
835
860
836
if i == 1 then
861
837
assert (batches [field_name ] == nil ,
862
838
(' internal error: %s: field names "%s" clash' ):format (
863
839
func_name , field_name ))
864
- batches [field_name ] = {
865
- collection_name = collection_name ,
866
- index_name = index_name ,
867
- keys = {key },
868
- iterator_opts = iterator_opts ,
869
- }
840
+ batches [field_name ] = batch
870
841
size = i
871
842
else
872
- assert (batches [field_name ].collection_name == collection_name ,
873
- (' internal error: %s: prepared object list has ' ..
874
- ' different collection names: "%s" and "%s"' ):format (
875
- func_name , batches [field_name ].collection_name ,
876
- collection_name ))
877
- assert (batches [field_name ].index_name == index_name ,
878
- (' internal error: %s: prepared object list has ' ..
879
- ' different index names: "%s" and "%s"' ):format (func_name ,
880
- tostring (batches [field_name ].index_name ),
881
- tostring (index_name )))
882
- local ok = utils .are_tables_same (batches [field_name ].iterator_opts or
883
- {}, iterator_opts or {})
884
- if not ok then -- avoid extra json.encode()
885
- assert (ok , (' internal error: %s: prepared object list ' ..
886
- ' has different iterator options: "%s" and "%s"' ):format (
887
- func_name ,
888
- json .encode (batches [field_name ].iterator_opts ),
889
- json .encode (iterator_opts )))
843
+ local ok , err = batches [field_name ]:compare_bins_extra (batch )
844
+ if not ok then
845
+ error ((' internal error: %s: %s' ):format (func_name , err ))
890
846
end
891
- table.insert (batches [field_name ].keys , key )
847
+ table.insert (batches [field_name ].keys , batch . keys [ 1 ] )
892
848
size = i
893
849
end
894
850
end
905
861
906
862
-- }}}
907
863
864
+ -- Reorder requests before add to open_set {{{
865
+
866
+ local function expand_open_set (open_set , child_open_set , opts )
867
+ local opts = opts or {}
868
+ local accessor = opts .accessor
869
+
870
+ if not accessor :cache_is_supported () then
871
+ utils .expand_list (open_set , child_open_set )
872
+ return
873
+ end
874
+
875
+ local item_bin_to_ordinal = {}
876
+ local items_per_ordinal = {}
877
+ local next_ordinal = 1
878
+
879
+ -- Create histogram-like 'items_per_ordinal' structure with lists of items.
880
+ -- Each list contain items of the same kind (with the same bin value).
881
+ -- Ordinals of the bins are assigned in order of appear in child_open_set.
882
+ for _ , item in ipairs (child_open_set ) do
883
+ if item .prepared_object_list ~= nil then
884
+ local ordinal = next_ordinal
885
+ assert (items_per_ordinal [ordinal ] == nil )
886
+ items_per_ordinal [ordinal ] = {}
887
+ next_ordinal = next_ordinal + 1
888
+ table.insert (items_per_ordinal [ordinal ], item )
889
+ else
890
+ local prepared_object = item .prepared_object
891
+ assert (prepared_object ~= nil )
892
+ assert (prepared_object .fields_info ~= nil )
893
+
894
+ local batch_bins = {}
895
+ for field_name , field_info in pairs (prepared_object .fields_info ) do
896
+ local prepared_resolve = field_info .prepared_resolve
897
+ if prepared_resolve .is_calculated then
898
+ table.insert (batch_bins , field_name .. ' :<calculated>' )
899
+ else
900
+ local batch = request_batch .from_prepared_resolve (
901
+ prepared_resolve )
902
+ table.insert (batch_bins , field_name .. ' :' .. batch :bin ())
903
+ end
904
+ end
905
+
906
+ local item_bin = table.concat (batch_bins , ' ;' )
907
+ local ordinal = item_bin_to_ordinal [item_bin ]
908
+ if ordinal == nil then
909
+ item_bin_to_ordinal [item_bin ] = next_ordinal
910
+ ordinal = next_ordinal
911
+ assert (items_per_ordinal [ordinal ] == nil )
912
+ items_per_ordinal [ordinal ] = {}
913
+ next_ordinal = next_ordinal + 1
914
+ end
915
+ table.insert (items_per_ordinal [ordinal ], item )
916
+ end
917
+ end
918
+
919
+ -- add items from child_open_set in ordinals order to open_set
920
+ for _ , items in ipairs (items_per_ordinal ) do
921
+ utils .expand_list (open_set , items )
922
+ end
923
+ end
924
+
925
+ -- }}}
926
+
908
927
-- Debugging {{{
909
928
910
929
local function prepared_object_digest (prepared_object )
@@ -1056,19 +1075,21 @@ function bfs_executor.execute(schema, query_ast, variables, operation_name, opts
1056
1075
local child = invoke_resolve (item .prepared_object , context ,
1057
1076
{qcontext = qcontext ,
1058
1077
is_item_cache_only = is_item_cache_only })
1059
- local child_open_set = child .open_set
1060
1078
local child_cache_only_open_set = child .cache_only_open_set
1061
- utils .expand_list (cache_only_open_set , child_cache_only_open_set )
1062
- utils .expand_list (open_set , child_open_set )
1079
+ local child_open_set = child .open_set
1080
+ expand_open_set (cache_only_open_set , child_cache_only_open_set ,
1081
+ {accessor = accessor })
1082
+ expand_open_set (open_set , child_open_set , {accessor = accessor })
1063
1083
elseif item .prepared_object_list ~= nil then
1064
1084
local child = invoke_resolve_list (item .prepared_object_list ,
1065
1085
context , {qcontext = qcontext , accessor = accessor ,
1066
1086
is_item_cache_only = is_item_cache_only ,
1067
1087
max_batch_size = max_batch_size })
1068
- local child_open_set = child .open_set
1069
1088
local child_cache_only_open_set = child .cache_only_open_set
1070
- utils .expand_list (cache_only_open_set , child_cache_only_open_set )
1071
- utils .expand_list (open_set , child_open_set )
1089
+ local child_open_set = child .open_set
1090
+ expand_open_set (cache_only_open_set , child_cache_only_open_set ,
1091
+ {accessor = accessor })
1092
+ expand_open_set (open_set , child_open_set , {accessor = accessor })
1072
1093
elseif item .squash_marker ~= nil then
1073
1094
local open_set_to_fetch = is_item_cache_only and
1074
1095
cache_only_open_set or open_set
0 commit comments