diff --git a/.luacheckrc b/.luacheckrc index 837371a..797d09a 100644 --- a/.luacheckrc +++ b/.luacheckrc @@ -5,6 +5,6 @@ std = { 'getmetatable', 'setmetatable', 'rawget', 'print', 'shard_status', 'loadstring', 'arg', }, - globals = {'package'} + globals = {'package', '_G'} } redefined = False diff --git a/README.md b/README.md index 7e7e90e..db1512e 100644 --- a/README.md +++ b/README.md @@ -124,6 +124,12 @@ local compiled_query = graphql_lib.compile(query) local result = compiled_query:execute(variables) ``` +### Shard + +One need to call `require('graphql.storage').init()` on each storage server to +use graphql with shard and BFS executor. Alternatively one can use +`graphql.new({..., use_bfs_executor = 'never'})` on a frontend server. + ### Multi-head connections A parent object is matching against a multi-head connection variants in the order of the variants. The parent object should match with a determinant of @@ -404,6 +410,14 @@ TEST_RUN_TESTS=common/mutation make test * For building apidoc (additionally to 'for use'): * ldoc. +## Hacking + +Enable debug log: + +```sh +export TARANTOOL_GRAPHQL_DEBUG=1 +``` + ## License Consider LICENSE file for details. In brief: diff --git a/graphql/accessor_general.lua b/graphql/accessor_general.lua index ecdf79d..50563c0 100644 --- a/graphql/accessor_general.lua +++ b/graphql/accessor_general.lua @@ -14,6 +14,7 @@ local rex, is_pcre2 = utils.optional_require_rex() local avro_helpers = require('graphql.avro_helpers') local db_schema_helpers = require('graphql.db_schema_helpers') local error_codes = require('graphql.error_codes') +local statistics = require('graphql.statistics') local check = utils.check local e = error_codes @@ -816,6 +817,21 @@ local function match_using_re(obj, pcre) return true end +--- Check whether we meet deadline time. +--- +--- The functions raises an exception in the case. +--- +--- @tparam table qcontext +--- +--- @return nothing +local function check_deadline_clock(qcontext) + if clock.monotonic64() > qcontext.deadline_clock then + error(e.timeout_exceeded(( + 'query execution timeout exceeded timeout_ms limit (%s ms)'):format( + tostring(qcontext.query_settings.timeout_ms)))) + end +end + --- Perform unflatten, skipping, filtering, limiting of objects. This is the --- core of the `select_internal` function. --- @@ -840,8 +856,6 @@ end --- objects), --- * `pivot_filter` (table, set of fields to match the objected pointed by --- `offset` arqument of the GraphQL query), ---- * `resulting_object_cnt_max` (number), ---- * `fetched_object_cnt_max` (number), --- * `resolveField` (function) for subrequests, see @{impl.new}. --- --- @return nil @@ -854,21 +868,23 @@ local function process_tuple(self, state, tuple, opts) local do_filter = opts.do_filter local pivot_filter = opts.pivot_filter local qcontext = state.qcontext - local qstats = qcontext.statistics - local resulting_object_cnt_max = opts.resulting_object_cnt_max - local fetched_object_cnt_max = opts.fetched_object_cnt_max - qstats.fetched_object_cnt = qstats.fetched_object_cnt + 1 - if qstats.fetched_object_cnt > fetched_object_cnt_max then - error(e.fetched_objects_limit_exceeded( - ('fetched objects count (%d) exceeds fetched_object_cnt_max ' .. - 'limit (%d)'):format(qstats.fetched_object_cnt, - fetched_object_cnt_max))) - end - if clock.monotonic64() > qcontext.deadline_clock then - error(e.timeout_exceeded(( - 'query execution timeout exceeded timeout_ms limit (%s ms)'):format( - tostring(state.qcontext.query_settings.timeout_ms)))) - end + + local full_scan_cnt = opts.is_full_scan and opts.fetches_cnt or 0 + local index_lookup_cnt = opts.is_full_scan and 0 or opts.fetches_cnt + qcontext.statistics:objects_fetched({ + fetches_cnt = opts.fetches_cnt, + fetched_objects_cnt = opts.fetched_tuples_cnt, + full_scan_cnt = full_scan_cnt, + index_lookup_cnt = index_lookup_cnt, + }) + + qcontext.statistics:cache_lookup({ + cache_hits_cnt = opts.cache_hits_cnt, + cache_hit_objects_cnt = opts.cache_hit_tuples_cnt, + }) + + check_deadline_clock(qcontext) + local collection_name = opts.collection_name local pcre = opts.pcre local resolveField = opts.resolveField @@ -885,12 +901,6 @@ local function process_tuple(self, state, tuple, opts) return true -- skip pivot item too end - -- Don't count subrequest resulting objects (needed for filtering) into - -- count of object we show to an user as a result. - -- XXX: It is better to have an option to control whether selected objects - -- will be counted as resulting ones. - local saved_resulting_object_cnt = qstats.resulting_object_cnt - -- make subrequests if needed for k, v in pairs(filter) do if obj[k] == nil then @@ -904,8 +914,6 @@ local function process_tuple(self, state, tuple, opts) end end - qstats.resulting_object_cnt = saved_resulting_object_cnt - -- filter out non-matching objects local match = utils.is_subtable(obj, filter) and match_using_re(obj, pcre) @@ -919,13 +927,13 @@ local function process_tuple(self, state, tuple, opts) -- add the matching object, update count and check limit state.objs[#state.objs + 1] = obj state.count = state.count + 1 - qstats.resulting_object_cnt = qstats.resulting_object_cnt + 1 - if qstats.resulting_object_cnt > resulting_object_cnt_max then - error(e.resulting_objects_limit_exceeded( - ('resulting objects count (%d) exceeds resulting_object_cnt_max ' .. - 'limit (%d)'):format(qstats.resulting_object_cnt, - resulting_object_cnt_max))) + + if not opts.is_hidden then + qcontext.statistics:objects_retired({ + retired_objects_cnt = 1, + }) end + if limit ~= nil and state.count >= limit then return false end @@ -975,8 +983,8 @@ local function perform_primary_key_operation(self, collection_name, schema_name, return new_objects end ---- The function is core of this module and implements logic of fetching and ---- filtering requested objects. +--- The function prepares context for tuples selection, postprocessing and +--- filtering. --- --- @tparam table self the data accessor created by the `new` function --- (directly or indirectly using the `accessor_space.new` or the @@ -996,8 +1004,10 @@ end --- @tparam table extra table which contains extra information related to --- current select and the whole query --- ---- @treturn table list of matching objects -local function select_internal(self, collection_name, from, filter, args, extra) +--- @treturn table `res` with `request_opts`, `select_state`, `select_opts` and +--- `args` fields +local function prepare_select_internal(self, collection_name, from, filter, + args, extra) check(self, 'self', 'table') check(collection_name, 'collection_name', 'string') check(from, 'from', 'table') @@ -1028,7 +1038,7 @@ local function select_internal(self, collection_name, from, filter, args, extra) index_name, collection_name)) end - -- lookup functions for unflattening + -- lookup function for unflattening local schema_name = collection.schema_name assert(type(schema_name) == 'string', 'schema_name must be a string, got ' .. type(schema_name)) @@ -1047,20 +1057,18 @@ local function select_internal(self, collection_name, from, filter, args, extra) } -- read only process_tuple options - local query_settings = qcontext.query_settings local select_opts = { limit = args.limit, filter = filter, do_filter = not full_match, pivot_filter = nil, -- filled later if needed - resulting_object_cnt_max = query_settings.resulting_object_cnt_max, - fetched_object_cnt_max = query_settings.fetched_object_cnt_max, collection_name = collection_name, unflatten_tuple = self.funcs.unflatten_tuple, use_tomap = self.collection_use_tomap[collection_name] or false, default_unflatten_tuple = default_unflatten_tuple, pcre = args.pcre, resolveField = extra.resolveField, + is_hidden = extra.is_hidden, } -- assert that connection constraint applied only to objects got from the @@ -1075,27 +1083,17 @@ local function select_internal(self, collection_name, from, filter, args, extra) pivot.filter ~= nil), err) end + local iterator_opts = nil + local is_full_scan + if index == nil then - -- fullscan - local primary_index = self.funcs.get_primary_index(self, - collection_name) - - -- count full scan select request - qcontext.statistics.select_requests_cnt = - qcontext.statistics.select_requests_cnt + 1 - qcontext.statistics.full_scan_select_requests_cnt = - qcontext.statistics.full_scan_select_requests_cnt + 1 - - for _, tuple in primary_index:pairs() do - assert(pivot == nil, - 'offset for top-level objects must use a primary index') - local continue = process_tuple(self, select_state, tuple, - select_opts) - if not continue then break end - end + assert(pivot == nil, + 'offset for top-level objects must use a primary index') + index = self.funcs.get_primary_index(self, collection_name) + index_value = nil + is_full_scan = true else - -- select by index - local iterator_opts = {} + iterator_opts = iterator_opts or {} if pivot ~= nil then -- handle case when there is pivot item (offset was passed) if pivot.value_list ~= nil then @@ -1126,33 +1124,96 @@ local function select_internal(self, collection_name, from, filter, args, extra) iterator_opts.limit = args.limit end - local tuple_count = 0 - - -- count index select request - qcontext.statistics.select_requests_cnt = - qcontext.statistics.select_requests_cnt + 1 - qcontext.statistics.index_select_requests_cnt = - qcontext.statistics.index_select_requests_cnt + 1 - - for _, tuple in index:pairs(index_value, iterator_opts) do - tuple_count = tuple_count + 1 - -- check full match constraint - if extra.exp_tuple_count ~= nil and - tuple_count > extra.exp_tuple_count then - error(('FULL MATCH constraint was failed: we got more then ' .. - '%d tuples'):format(extra.exp_tuple_count)) - end - local continue = process_tuple(self, select_state, tuple, - select_opts) - if not continue then break end - end + is_full_scan = false + end + + -- request options can be changed below + local request_opts = { + index = index, + index_name = index_name, + index_value = index_value, + iterator_opts = iterator_opts, + is_full_scan = is_full_scan, + } + + return { + request_opts = request_opts, + select_state = select_state, + select_opts = select_opts, + collection_name = collection_name, + from = from, + filter = filter, + args = args, + extra = extra, + } +end + +--- XXX +local function invoke_select_internal(self, prepared_select) + local request_opts = prepared_select.request_opts + local select_state = prepared_select.select_state + local select_opts = prepared_select.select_opts + local collection_name = prepared_select.collection_name + local args = prepared_select.args + local extra = prepared_select.extra + + local index = request_opts.index + local index_name = request_opts.index_name + local index_value = request_opts.index_value + local iterator_opts = request_opts.iterator_opts + local is_full_scan = request_opts.is_full_scan + + local tuple_count = 0 + local out = {} + + -- lookup for needed data in the cache if it is supported + local iterable + if self:cache_is_supported() then + iterable = self.funcs.cache_lookup(self, collection_name, index_name, + index_value, iterator_opts) + end + if iterable == nil then + iterable = index + end + + for _, tuple in iterable:pairs(index_value, iterator_opts, out) do + local fetches_cnt = out.fetches_cnt or 0 + local fetched_tuples_cnt = out.fetched_tuples_cnt or 0 + local cache_hits_cnt = out.cache_hits_cnt or 0 + local cache_hit_tuples_cnt = out.cache_hit_tuples_cnt or 0 + check(fetches_cnt, 'fetches_cnt', 'number') + check(fetched_tuples_cnt, 'fetched_tuples_cnt', 'number') + check(cache_hits_cnt, 'cache_hits_cnt', 'number') + check(cache_hit_tuples_cnt, 'cache_hit_tuples_cnt', 'number') + out.fetches_cnt = 0 + out.fetched_tuples_cnt = 0 + out.cache_hits_cnt = 0 + out.cache_hit_tuples_cnt = 0 + + tuple_count = tuple_count + 1 -- check full match constraint if extra.exp_tuple_count ~= nil and - tuple_count ~= extra.exp_tuple_count then - error(('FULL MATCH constraint was failed: we expect %d tuples, ' .. - 'got %d'):format(extra.exp_tuple_count, tuple_count)) + tuple_count > extra.exp_tuple_count then + error(('FULL MATCH constraint was failed: we got more then ' .. + '%d tuples'):format(extra.exp_tuple_count)) end + + select_opts.is_full_scan = is_full_scan + select_opts.fetches_cnt = fetches_cnt + select_opts.fetched_tuples_cnt = fetched_tuples_cnt + select_opts.cache_hits_cnt = cache_hits_cnt + select_opts.cache_hit_tuples_cnt = cache_hit_tuples_cnt + local continue = process_tuple(self, select_state, tuple, + select_opts) + if not continue then break end + end + + -- check full match constraint + if extra.exp_tuple_count ~= nil and + tuple_count ~= extra.exp_tuple_count then + error(('FULL MATCH constraint was failed: we expect %d tuples, ' .. + 'got %d'):format(extra.exp_tuple_count, tuple_count)) end local count = select_state.count @@ -1307,6 +1368,10 @@ local function validate_funcs(funcs) assert(type(funcs.delete_tuple) == 'function', 'funcs.delete_tuple must be a function, got ' .. type(funcs.delete_tuple)) + check(funcs.cache_fetch, 'funcs.cache_fetch', 'function', 'nil') + -- check(funcs.cache_delete, 'funcs.cache_delete', 'function', 'nil') + check(funcs.cache_truncate, 'funcs.cache_truncate', 'function', 'nil') + check(funcs.cache_lookup, 'funcs.cache_lookup', 'function', 'nil') end local function validate_query_settings(query_settings, opts) @@ -1344,6 +1409,8 @@ end --- all neccessary initialization of this parameter should be performed by this -- function local function init_qcontext(accessor, qcontext) + if qcontext.initialized then return end + for k, v in pairs(accessor.query_settings_default) do if qcontext.query_settings[k] == nil then qcontext.query_settings[k] = v @@ -1354,13 +1421,13 @@ local function init_qcontext(accessor, qcontext) qcontext.deadline_clock = clock.monotonic64() + qcontext.query_settings.timeout_ms * 1000 * 1000 - qcontext.statistics = { - resulting_object_cnt = 0, - fetched_object_cnt = 0, - select_requests_cnt = 0, - full_scan_select_requests_cnt = 0, - index_select_requests_cnt = 0, - } + local query_settings = qcontext.query_settings + qcontext.statistics = statistics.new({ + resulting_object_cnt_max = query_settings.resulting_object_cnt_max, + fetched_object_cnt_max = query_settings.fetched_object_cnt_max, + }) + + qcontext.initialized = true end --- Create default unflatten/flatten/xflatten functions, that can be called @@ -1438,7 +1505,9 @@ end --- both)_, --- * `timeout_ms` _(default is 1000)_, --- * `enable_mutations`: boolean flag _(default is `false` for avro-schema-2* ---- and `true` for avro-schema-3*)_. +--- and `true` for avro-schema-3*)_, +--- * `name` is 'space' or 'shard', +--- * `data_cache` (optional) is accessor_shard_cache instance. --- --- For examples of `opts.schemas` and `opts.collections` consider the --- @{impl.new} function description. @@ -1472,16 +1541,19 @@ end --- * `get_primary_index`, --- * `unflatten_tuple`, --- * `flatten_object`, ---- * `insert_tuple`. +--- * `insert_tuple`, +--- * `cache_fetch` (optional), +--- -- * `cache_delete` (optional), +--- * `cache_truncate` (optional), +--- * `cache_lookup` (optional). --- --- They allows this abstract data accessor behaves in the certain way (say, --- like space data accessor or shard data accessor); consider the --- `accessor_space` and the `accessor_shard` modules documentation for these --- functions description. --- ---- @treturn table data accessor instance, a table with the two methods ---- (`select` and `arguments`) as described in the @{impl.new} function ---- description. +--- @treturn table data accessor instance, a table with the methods as +--- described in the @{impl.new} function description. --- --- Brief explanation of some select function parameters: --- @@ -1539,6 +1611,11 @@ function accessor_general.new(opts, funcs) end check(enable_mutations, 'enable_mutations', 'boolean') + local name = opts.name + local data_cache = opts.data_cache + check(name, 'name', 'string') + check(data_cache, 'data_cache', 'table', 'nil') + local models, service_fields_defaults = compile_schemas(schemas, service_fields) validate_collections(collections, schemas, indexes) @@ -1568,25 +1645,19 @@ function accessor_general.new(opts, funcs) enable_mutations = enable_mutations, }, query_settings_default = query_settings_default, + name = name, + data_cache = data_cache, }, { __index = { select = function(self, parent, collection_name, from, filter, args, extra) - check(parent, 'parent', 'table') - validate_from_parameter(from) - - --`qcontext` initialization - if extra.qcontext.initialized ~= true then - init_qcontext(self, extra.qcontext) - extra.qcontext.initialized = true - end - local inserted = insert_internal(self, collection_name, from, filter, args, extra) if inserted ~= nil then return inserted end - local selected = select_internal(self, collection_name, from, filter, - args, extra) + local prepared_select = self:prepare_select(parent, + collection_name, from, filter, args, extra) + local selected = self:invoke_select(prepared_select) local updated = update_internal(self, collection_name, extra, selected) @@ -1598,6 +1669,66 @@ function accessor_general.new(opts, funcs) return selected end, + prepare_select = function(self, parent, collection_name, from, + filter, args, extra) + check(parent, 'parent', 'table') + validate_from_parameter(from) + + init_qcontext(self, extra.qcontext) + + return prepare_select_internal(self, collection_name, from, + filter, args, extra) + end, + invoke_select = invoke_select_internal, + cache_is_supported = function(self) + return self.data_cache ~= nil + end, + cache_fetch = function(self, batches, qcontext) + if not self:cache_is_supported() then + return nil + end + + local res = self.funcs.cache_fetch(self, batches) + if res == nil then + return nil + end + + local fetch_id = res.fetch_id + local stat = res.stat + check(fetch_id, 'fetch_id', 'number') + check(stat, 'stat', 'table') + check(stat.fetches_cnt, 'stat.fetches_cnt', 'number') + check(stat.fetched_tuples_cnt, 'stat.fetched_tuples_cnt', + 'number') + check(stat.full_scan_cnt, 'stat.full_scan_cnt', 'number') + check(stat.index_lookup_cnt, 'stat.index_lookup_cnt', 'number') + + -- update statistics + init_qcontext(self, qcontext) + qcontext.statistics:objects_fetched({ + fetches_cnt = stat.fetches_cnt, + fetched_objects_cnt = stat.fetched_tuples_cnt, + full_scan_cnt = stat.full_scan_cnt, + index_lookup_cnt = stat.index_lookup_cnt + }) + + check_deadline_clock(qcontext) + + return fetch_id + end, + -- Unused for now. + -- cache_delete = function(self, fetch_id) + -- if not self:cache_is_supported() then + -- return + -- end + -- self.funcs.cache_delete(self, fetch_id) + -- end, + cache_truncate = function(self) + if not self:cache_is_supported() then + return + end + self.funcs.cache_truncate(self) + end, } }) end diff --git a/graphql/accessor_shard.lua b/graphql/accessor_shard.lua index 5b67675..c01192a 100644 --- a/graphql/accessor_shard.lua +++ b/graphql/accessor_shard.lua @@ -3,11 +3,13 @@ --- `accessor_shard.new` function to create a new shard data accessor instance. local json = require('json') -local yaml = require('yaml') local digest = require('digest') local utils = require('graphql.utils') local shard = utils.optional_require('shard') local accessor_general = require('graphql.accessor_general') +local accessor_shard_helpers = require('graphql.accessor_shard_helpers') +local accessor_shard_index_info = require('graphql.accessor_shard_index_info') +local accessor_shard_cache = require('graphql.accessor_shard_cache') local check = utils.check @@ -17,27 +19,8 @@ local LIMIT = 100000 -- XXX: we need to raise an error when a limit reached -- shard module calculates sharding key by the first field of a tuple local SHARD_KEY_FIELD_NO = 1 -local index_info_cache = {} - -- {{{ helpers -local function shard_check_error(func_name, result, err) - if result ~= nil then return end - - -- avoid json encoding of an error message (when the error is in the known - -- format) - if type(err) == 'table' and type(err.error) == 'string' then - error({ - message = err.error, - extensions = { - shard_error = err, - } - }) - end - - error(('%s: %s'):format(func_name, json.encode(err))) -end - -- Should work for shard-1.2 and shard-2.1 both. local function shard_check_status(func_name) if box.space._shard == nil then return end @@ -52,89 +35,6 @@ local function shard_check_status(func_name) end end ---- Determines whether certain fields of two tables are the same. ---- ---- Table fields of t1 and t2 are compared recursively by all its fields. ---- ---- @tparam table t1 ---- ---- @tparam table t2 ---- ---- @tparam table fields list of fields like {'a', 'b', 'c'} ---- ---- @treturn boolean -local function compare_table_by_fields(t1, t2, fields) - for _, field_name in ipairs(fields) do - local v1 = t1[field_name] - local v2 = t2[field_name] - if type(v1) ~= type(v2) then - return false - elseif type(v1) == 'table' then - local ok = utils.is_subtable(v1, v2) - ok = ok and utils.is_subtable(v2, v1) - if not ok then return false end - else - if v1 ~= v2 then return false end - end - end - return true -end - ---- Get index object from net_box under the shard module. ---- ---- The function performs some optimistic consistency checks and raises an ---- error in the case. It caches results and returns a result from the cache ---- for succeeding calls. ---- ---- XXX: Implement some cache clean up strategy and a way to manual cache ---- purge. ---- ---- @tparam string collection_name ---- ---- @tparam string index_name ---- ---- @return index object -local function get_index_info(collection_name, index_name) - local func_name = 'accessor_shard.get_index_info' - local index_info - - -- get from the cache if exists - if index_info_cache[collection_name] ~= nil then - index_info = index_info_cache[collection_name][index_name] - if index_info ~= nil then - return index_info - end - end - - local fields_to_compare = {'unique', 'parts', 'id', 'type', 'name'} - - for _, zone in ipairs(shard.shards) do - for _, node in ipairs(zone) do - local result, err = shard:space_call(collection_name, node, - function(space_obj) - return space_obj.index[index_name] - end) - shard_check_error(func_name, result, err) - if index_info == nil then - index_info = result - end - local ok = compare_table_by_fields(index_info, result, - fields_to_compare) - assert(ok, ('index %s of space "%s" is different between ' .. - 'nodes:\n%s\n%s'):format(json.encode(index_name), - collection_name, yaml.encode(index_info), yaml.encode(result))) - end - end - - -- write to the cache - if index_info_cache[collection_name] == nil then - index_info_cache[collection_name] = {} - end - index_info_cache[collection_name][index_name] = index_info - - return index_info -end - --- Internal function to use in @{get_index}; it is necessary because --- determining whether the index exists within a shard cluster is --- not-so-trivial as for local spaces. @@ -147,7 +47,7 @@ local function is_index_exists(collection_name, index_name) function(space_obj) return space_obj.index[index_name] ~= nil end) - shard_check_error(func_name, cur, err) + accessor_shard_helpers.shard_check_error(func_name, cur, err) assert(exists == nil or cur == exists, ('index "%s" of space "%s" exists on some shards, ' .. 'but does not on others'):format(index_name, collection_name)) @@ -178,9 +78,11 @@ local function get_tuple(self, collection_name, key) check(self, 'self', 'table') local index = self.funcs.get_primary_index(self, collection_name) local tuples = {} - for _, t in index:pairs(key, {limit = 2}) do + local out = {} -- XXX: count fetched_tuples_cnt in statistics + for _, t in index:pairs(key, {limit = 2}, out) do table.insert(tuples, t) end + check(out.fetched_tuples_cnt, 'out.fetched_tuples_cnt', 'number') assert(#tuples ~= 0, ('%s: expected one tuple by the primary key %s, got 0'):format( func_name, json.encode(key))) @@ -217,7 +119,7 @@ local function space_operation(collection_name, nodes, operation, ...) for _, node in ipairs(nodes) do local result, err = shard:single_call(collection_name, node, operation, ...) - shard_check_error(func_name, result, err) + accessor_shard_helpers.shard_check_error(func_name, result, err) if master_result == nil then master_result = result end @@ -253,7 +155,7 @@ local function is_collection_exists(self, collection_name) function(space_obj) return space_obj ~= nil end) - shard_check_error(func_name, cur, err) + accessor_shard_helpers.shard_check_error(func_name, cur, err) assert(exists == nil or cur == exists, ('space "%s" exists on some shards, ' .. 'but does not on others'):format(collection_name)) @@ -278,15 +180,23 @@ local function get_index(self, collection_name, index_name) return nil end + -- XXX: wrap all data into the table, don't create the capture local index = setmetatable({}, { __index = { - pairs = function(self, value, opts) + pairs = function(_, value, opts, out) local func_name = 'accessor_shard.get_index..pairs' + + -- perform select local opts = opts or {} opts.limit = opts.limit or LIMIT local tuples, err = shard:secondary_select(collection_name, index_name, opts, value, 0) - shard_check_error(func_name, tuples, err) + accessor_shard_helpers.shard_check_error(func_name, + tuples, err) + out.fetches_cnt = 1 + out.fetched_tuples_cnt = #tuples + + -- create iterator local cur = 1 local function gen() if cur > #tuples then return nil end @@ -294,6 +204,7 @@ local function get_index(self, collection_name, index_name) cur = cur + 1 return cur, res end + return gen, nil, nil end } @@ -398,7 +309,7 @@ local function insert_tuple(self, collection_name, tuple) shard_check_status(func_name) local result, err = shard:insert(collection_name, tuple) - shard_check_error(func_name, result, err) + accessor_shard_helpers.shard_check_error(func_name, result, err) if major_shard_version() == 2 then -- result is the inserted tuple @@ -480,7 +391,8 @@ local function update_tuple(self, collection_name, key, statements, opts) shard_check_status(func_name) -- We follow tarantool convention and disallow update of primary key parts. - local primary_index_info = get_index_info(collection_name, 0) + local primary_index_info = accessor_shard_index_info.get_index_info( + collection_name, 0) for _, statement in ipairs(statements) do -- statement is {operator, field_no, value} local field_no = statement[2] @@ -565,6 +477,57 @@ local function delete_tuple(self, collection_name, key, opts) return tuple end +--- Fetch data to the cache. +--- +--- @tparam table self accessor_general instance +--- +--- @tparam table batches see @{accessor_shard_cache.cache_fetch} +--- +--- @treturn table see @{accessor_shard_cache.cache_fetch} +local function cache_fetch(self, batches) + return self.data_cache:fetch(batches) +end + +-- Unused for now. +-- --- Delete fetched data by fetch_id. +-- --- +-- --- @tparam table self accessor_general instance +-- --- +-- --- @tparam number fetch_id identifier of the fetched data +-- --- +-- --- @return nothing +-- local function cache_delete(self, fetch_id) +-- self.data_cache:delete(fetch_id) +-- end + +--- Delete all fetched data. +--- +--- @tparam table self accessor_general instance +--- +--- @return nothing +local function cache_truncate(self) + self.data_cache:truncate() +end + +--- Lookup for data in the cache. +--- +--- @tparam table self accessor_general instance +--- +--- @tparam string collection_name +--- +--- @tparam string index_name +--- +--- @param key +--- +--- @tparam table iterator_opts e.g. {} or {iterator = 'GT'} +--- +--- @return luafun iterator (one value) to fetched data or nil +local function cache_lookup(self, collection_name, index_name, + key, iterator_opts) + return self.data_cache:lookup(collection_name, index_name, key, + iterator_opts) +end + --- Create a new shard data accessor instance. function accessor_shard.new(opts, funcs) local funcs = funcs or {} @@ -588,8 +551,15 @@ function accessor_shard.new(opts, funcs) insert_tuple = funcs.insert_tuple or insert_tuple, update_tuple = funcs.update_tuple or update_tuple, delete_tuple = funcs.delete_tuple or delete_tuple, + cache_fetch = funcs.cache_fetch or cache_fetch, + -- cache_delete = funcs.cache_delete or cache_delete, + cache_truncate = funcs.cache_truncate or cache_truncate, + cache_lookup = funcs.cache_lookup or cache_lookup, } + local opts = table.copy(opts) + opts.name = 'shard' + opts.data_cache = accessor_shard_cache.new() return accessor_general.new(opts, res_funcs) end diff --git a/graphql/accessor_shard_cache.lua b/graphql/accessor_shard_cache.lua new file mode 100644 index 0000000..8d767bd --- /dev/null +++ b/graphql/accessor_shard_cache.lua @@ -0,0 +1,315 @@ +local json = require('json') +local shard = require('shard') +local utils = require('graphql.utils') +local request_batch = require('graphql.request_batch') +local accessor_shard_index_info = require('graphql.accessor_shard_index_info') + +local check = utils.check + +local accessor_shard_cache = {} + +local function get_select_ids(self, batch, opts) + local opts = opts or {} + local skip_cached = opts.skip_cached or false + + local skip_function + if skip_cached then + skip_function = function(id) + return self.cache[id] ~= nil + end + end + + return batch:select_ids(skip_function) +end + +local function net_box_call_wrapper(conn, func_name, call_args) + local ok, result = pcall(conn.call, conn, func_name, call_args, + {is_async = true}) + + if not ok then + return nil, { + message = ('%s: %s'):format(func_name, json.encode(result)), + extensions = { + net_box_error = result, + } + } + end + + return result +end + +local function is_future(result) + return type(result) == 'table' and type(result.wait_result) == 'function' +end + +local function cache_fetch_batch(self, batch, fetch_id, stat) + local ids = get_select_ids(self, batch, {skip_cached = true}) + if next(ids) == nil then return end + + -- perform requests + local results_per_replica_set = {} + for i, replica_set in ipairs(shard.shards) do + local first_err + + -- perform the request on each node in a replica set starting from + -- a master node until success or end of the nodes + for n = #replica_set, 1, -1 do + local node = replica_set[n] + local conn = node.conn + local call_args = { + batch.collection_name, + batch.index_name, + batch.keys, + batch.iterator_opts + } + local node_results, node_err = net_box_call_wrapper(conn, + 'batch_select', call_args) + + if node_results == nil then + if first_err == nil then + first_err = node_err + end + else + results_per_replica_set[i] = node_results + break -- go to the next replica_set + end + end + + -- no successful requests, return the error from the master node + if first_err ~= nil then + error(first_err) + end + end + + -- merge results without sorting: transform + -- results_per_replica_set[replica_set_num][request_num] 2d-array into + -- results[request_num] 1d-array + local results = {} + for _, node_results in ipairs(results_per_replica_set) do + if is_future(node_results) then + node_results = node_results:wait_result()[1] + end + -- we cannot use C merger for now, because buffers will contain + -- list of list of tuples instead of list of tuples + for j, node_result in ipairs(node_results) do + results[j] = results[j] or {} + for _, tuple in ipairs(node_result) do + table.insert(results[j], tuple) + end + end + end + + -- count fetches: assume a fetch as one request across cluster (don't + -- count individulal requests to storages) + stat.fetches_cnt = stat.fetches_cnt + 1 + -- count full scan and index lookup counts + for i, key in ipairs(batch.keys) do + if key == nil or (type(key) == 'table' and + next(key) == nil) then + stat.full_scan_cnt = stat.full_scan_cnt + 1 + else + stat.index_lookup_cnt = stat.index_lookup_cnt + 1 + end + end + + -- sort by a primary key + local primary_index_info = accessor_shard_index_info.get_index_info( + batch.collection_name, 0) + for _, result in ipairs(results) do + -- count fetched tuples + stat.fetched_tuples_cnt = stat.fetched_tuples_cnt + #result + table.sort(result, function(a, b) + for i, part in pairs(primary_index_info.parts) do + if a[part.fieldno] ~= b[part.fieldno] then + return a[part.fieldno] < b[part.fieldno] + end + end + return false + end) + end + + -- write to cache + assert(#results == #batch.keys, + ('results count %d is not the same as requests count %d'):format( + #results, #batch.keys)) + for i = 1, #ids do + if self.cache[ids[i]] == nil then + self.cache[ids[i]] = { + result = results[i], + fetch_ids = {fetch_id} + } + else + -- XXX: we can skip this key from a request + for j, tuple in ipairs(self.cache[ids[i]].result) do + if not utils.are_tables_same(results[i][j], tuple) then + error(('fetched tuple %s is not the same as one in ' .. + 'the cache: %s'):format(json.encode(tuple), + json.encode(results[i][j]))) + end + end + table.insert(self.cache[ids[i]].fetch_ids, fetch_id) + end + end +end + +--- Fetch data to the cache. +--- +--- @tparam table self accessor_shard_cache instance +--- +--- @tparam table batches requests batches in the following format: +--- +--- batches = { +--- [field_name] = { +--- collection_name = , +--- index_name = , +--- keys = <...>, +--- iterator_opts = <...>, +--- }, +--- ... +--- } +--- +--- @treturn table the following structure or nil: +--- +--- { +--- fetch_id = , -- identifier of the fetched data +--- stat = { -- data to update statistics +--- fetches_cnt = , +--- fetched_tuples_cnt = , +--- full_scan_cnt = , +--- index_lookup_cnt = , +--- } +--- } +local function cache_fetch(self, batches) + local fetch_id = self.fetch_id_next + self.fetch_id_next = self.fetch_id_next + 1 + + local stat = { + fetches_cnt = 0, + fetched_tuples_cnt = 0, + full_scan_cnt = 0, + index_lookup_cnt = 0, + } + + for _, batch in pairs(batches) do + cache_fetch_batch(self, batch, fetch_id, stat) + end + + return { + fetch_id = fetch_id, + stat = stat, + } +end + +-- Unused for now. +-- --- Delete fetched data by fetch_id. +-- --- +-- --- @tparam table self accessor_shard_cache instance +-- --- +-- --- @tparam number fetch_id identifier of the fetched data +-- --- +-- --- @return nothing +-- local function cache_delete(self, fetch_id) +-- local ids_to_remove = {} +-- +-- for id, item in pairs(self.cache) do +-- if #item.fetch_ids == 1 and item.fetch_ids[1] == fetch_id then +-- table.insert(ids_to_remove, id) +-- elseif #item.fetch_ids > 1 then +-- local fetch_ids_to_remove = {} +-- for i, fid in ipairs(item.fetch_ids) do +-- if fid == fetch_id then +-- table.insert(fetch_ids_to_remove, i) +-- end +-- end +-- table.sort(fetch_ids_to_remove, function(a, b) return a > b end) +-- for _, i in ipairs(fetch_ids_to_remove) do +-- table.remove(item.fetch_ids, i) +-- end +-- end +-- end +-- +-- for _, id in ipairs(ids_to_remove) do +-- self.cache[id] = nil +-- end +-- end + +--- Delete all fetched data. +--- +--- @tparam table self accessor_shard_cache instance +--- +--- @return nothing +local function cache_truncate(self) + self.cache = {} +end + +--- Lookup for data in the cache. +--- +--- @tparam table self accessor_shard_cache instance +--- +--- @tparam string collection_name +--- +--- @tparam string index_name +--- +--- @param key +--- +--- @tparam table iterator_opts e.g. {} or {iterator = 'GT'} +--- +--- @return luafun iterator (one value) to fetched data or nil +local function cache_lookup(self, collection_name, index_name, key, + iterator_opts) + local batch = request_batch.new(collection_name, index_name, + {key or box.NULL}, iterator_opts) + local id = get_select_ids(self, batch)[1] + if self.cache[id] == nil then + return nil + end + + local tuples = self.cache[id].result + check(tuples, 'tuples', 'table') + -- XXX: wrap all data into the table, don't create the capture + return setmetatable({tuples = tuples}, { + __index = { + pairs = function(self, value, opts, out) + assert(value == key, 'expected the same key in ' .. + 'cache_lookup call and :pairs() on returned iterable') + assert(opts == iterator_opts, 'expected the same ' .. + 'iterator_opts in cache_lookup call and :pairs() on ' .. + 'returned iterable') + + out.cache_hits_cnt = 1 + out.cache_hit_tuples_cnt = #self.tuples + + -- create iterator + local cur = 1 + local function gen() + if cur > #self.tuples then return nil end + local res = tuples[cur] + cur = cur + 1 + return cur, res + end + + return gen, nil, nil + end + } + }) +end + +--- Create new accessor_shard_cache instance. +--- +--- @treturn table accessor_shard_cache instance +function accessor_shard_cache.new() + return setmetatable({ + cache = {}, + fetch_id_next = 1, + }, { + __index = { + fetch = cache_fetch, + -- Unused for now. + -- delete = cache_delete, + truncate = cache_truncate, + lookup = cache_lookup, + }, + }) +end + +return accessor_shard_cache diff --git a/graphql/accessor_shard_helpers.lua b/graphql/accessor_shard_helpers.lua new file mode 100644 index 0000000..8080fb1 --- /dev/null +++ b/graphql/accessor_shard_helpers.lua @@ -0,0 +1,24 @@ +--- Auxiliary functions for shard module usage needed across several modules. + +local json = require('json') + +local accessor_shard_helpers = {} + +function accessor_shard_helpers.shard_check_error(func_name, result, err) + if result ~= nil then return end + + -- avoid json encoding of an error message (when the error is in the known + -- format) + if type(err) == 'table' and type(err.error) == 'string' then + error({ + message = err.error, + extensions = { + shard_error = err, + } + }) + end + + error(('%s: %s'):format(func_name, json.encode(err))) +end + +return accessor_shard_helpers diff --git a/graphql/accessor_shard_index_info.lua b/graphql/accessor_shard_index_info.lua new file mode 100644 index 0000000..0c5b7f4 --- /dev/null +++ b/graphql/accessor_shard_index_info.lua @@ -0,0 +1,98 @@ +--- Implements cache of index information (parts and its properties). + +local json = require('json') +local yaml = require('yaml') +local utils = require('graphql.utils') +local shard = utils.optional_require('shard') +local accessor_shard_helpers = require('graphql.accessor_shard_helpers') + +local accessor_shard_index_info = {} + +-- XXX: accessor_shard_index_info.new() + +local index_info_cache = {} + +--- Determines whether certain fields of two tables are the same. +--- +--- Table fields of t1 and t2 are compared recursively by all its fields. +--- +--- @tparam table t1 +--- +--- @tparam table t2 +--- +--- @tparam table fields list of fields like {'a', 'b', 'c'} +--- +--- @treturn boolean +local function compare_table_by_fields(t1, t2, fields) + for _, field_name in ipairs(fields) do + local v1 = t1[field_name] + local v2 = t2[field_name] + if type(v1) ~= type(v2) then + return false + elseif type(v1) == 'table' then + local ok = utils.is_subtable(v1, v2) + ok = ok and utils.is_subtable(v2, v1) + if not ok then return false end + else + if v1 ~= v2 then return false end + end + end + return true +end + +--- Get index object from net_box under the shard module. +--- +--- The function performs some optimistic consistency checks and raises an +--- error in the case. It caches results and returns a result from the cache +--- for succeeding calls. +--- +--- XXX: Implement some cache clean up strategy and a way to manual cache +--- purge. +--- +--- @tparam string collection_name +--- +--- @tparam string index_name +--- +--- @return index object +function accessor_shard_index_info.get_index_info(collection_name, index_name) + local func_name = 'accessor_shard.get_index_info' + local index_info + + -- get from the cache if exists + if index_info_cache[collection_name] ~= nil then + index_info = index_info_cache[collection_name][index_name] + if index_info ~= nil then + return index_info + end + end + + local fields_to_compare = {'unique', 'parts', 'id', 'type', 'name'} + + for _, zone in ipairs(shard.shards) do + for _, node in ipairs(zone) do + local result, err = shard:space_call(collection_name, node, + function(space_obj) + return space_obj.index[index_name] + end) + accessor_shard_helpers.shard_check_error(func_name, result, err) + if index_info == nil then + index_info = result + end + local ok = compare_table_by_fields(index_info, result, + fields_to_compare) + assert(ok, ('index %s of space "%s" is different between ' .. + 'nodes:\n%s\n%s'):format(json.encode(index_name), + collection_name, yaml.encode(index_info), yaml.encode(result))) + end + end + + -- write to the cache + if index_info_cache[collection_name] == nil then + index_info_cache[collection_name] = {} + end + index_info_cache[collection_name][index_name] = index_info + + return index_info +end + +return accessor_shard_index_info diff --git a/graphql/accessor_space.lua b/graphql/accessor_space.lua index c10cf87..8eaf79e 100644 --- a/graphql/accessor_space.lua +++ b/graphql/accessor_space.lua @@ -32,7 +32,27 @@ end --- @return index or nil local function get_index(self, collection_name, index_name) check(self, 'self', 'table') - return box.space[collection_name].index[index_name] + local index = box.space[collection_name].index[index_name] + if index == nil then + return nil + end + return setmetatable({}, { + __index = { + pairs = function(_, value, opts, out) + out.fetches_cnt = 1 + out.fetched_tuples_cnt = 0 + local gen, param, state = index:pairs(value, opts) + local function new_gen(param, state) + local new_state, tuple = gen(param, state) + if tuple ~= nil then + out.fetched_tuples_cnt = out.fetched_tuples_cnt + 1 + end + return new_state, tuple + end + return new_gen, param, state + end + } + }) end --- Get primary index to perform `:pairs()` (fullscan). @@ -44,7 +64,7 @@ end --- @return index or nil local function get_primary_index(self, collection_name) check(self, 'self', 'table') - return box.space[collection_name].index[0] + return self.funcs.get_index(self, collection_name, 0) end --- Convert a tuple to an object. @@ -192,8 +212,14 @@ function accessor_space.new(opts, funcs) insert_tuple = funcs.insert_tuple or insert_tuple, update_tuple = funcs.update_tuple or update_tuple, delete_tuple = funcs.delete_tuple or delete_tuple, + cache_fetch = funcs.cache_fetch or nil, + -- cache_delete = funcs.cache_delete or nil, + cache_truncate = funcs.cache_truncate or nil, + cache_lookup = funcs.cache_lookup or nil, } + local opts = table.copy(opts) + opts.name = 'space' return accessor_general.new(opts, res_funcs) end diff --git a/graphql/bfs_executor.lua b/graphql/bfs_executor.lua new file mode 100644 index 0000000..473759b --- /dev/null +++ b/graphql/bfs_executor.lua @@ -0,0 +1,1120 @@ +--- Breadth-first executor with support of batching of similar requests. +--- +--- ## The execution loop +--- +--- The execution loop comprises the series of two main steps: preparing +--- get/select requests for a data accessor and actual performing of this +--- requests. The executor traverses the query tree in the breadth-first order, +--- so it keeps the list of requests prepared on the previous iteration called +--- `open_set`. +--- +--- The iteration starts with extracting a prepared request (this data +--- structure called `prepared_object`) from `open_set`, then the executor +--- resolves it (performs the actual data access request using +--- `prepared_resolve:invoke()`), filters needed fields (inside +--- `filter_object()`) and 'opens' child nodes with preparing of further +--- requests (forming `fields_info` structure in `filter_object()`). The +--- prepared requests then added to the end of `open_set` to be processed on +--- the next iterations. +--- +--- The filtering of an object (`filter_object()`) performs deep filtering of +--- inner fields which are requested by a user in a query (see +--- `filter_value()`). This auxiliary traversal does not involves a data +--- accessor calls and made in depth-first order. +--- +--- Additional steps are made to minimize actual count of network requests, +--- they are described in the following sections. +--- +--- ## Batching similar requests +--- +--- The executor performs analyzing of prepared requests (`open_set`) and forms +--- so called 'batches' to pull the results of similar data accessor requests +--- within one network request. This is why the prepared request stage is +--- needed. The similar requests are ones which have the same collection, the +--- same index and the same iterator options (say, 'GT'), but different keys. +--- The analysing is performed for each level of the traversed tree, so +--- `squash_marker` item is added to the `open_set` to distinguish one level of +--- the following one. +--- +--- The analyzing generates 'batches' (in `fetch_first_same()`) and passes it +--- to the `fetch()` function of `accessor_shard_cache` instance, which +--- performs the requests using a stored procedure on storage servers and saves +--- the results into the cache. Then the executor continue the loop as usual. +--- Data access requests which will be performed inside +--- `prepared_resolve:invoke()` will use the cached data (when `accessor_shard` +--- is used). The cached data are cleared at the end of the query execution +--- (because of cache only requests described later). +--- +--- This approach reduces overall count of network requests (but makes they +--- heavier), so the time of stalls on awaiting of network requests is reduced +--- too. The economy is more when round trip time (delay to the first byte of a +--- request result) is larger. +--- +--- The analyzing and the prefetching are performed separately for so called +--- `prepared_object_list` structure (in `fetch_resolve_list()`), because +--- `prepared_object`s from one 'list' request are processed inside one +--- iteration of the execution loop (in `invoke_resolve_list()`). This is the +--- tech debt and hopefully will be changed in the future. +--- +--- ## Filtering over a connection +--- +--- Normally a request fetches objects from one collection and the prefetching +--- effectively reduces count of network requests. But there is the special +--- kind of requests that involves fetching of objects from another +--- collections. They are requests with an argument named as a connection with +--- semantic 'select such objects from the current collection for which the +--- connected object matches the argument'. +--- +--- The resolving function (`prepared_resolve:invoke()`) of a prepared request +--- performs such auxiliary requests itself, but we need to prefetch the +--- connected objects to still effectively use the network. +--- +--- The executor generates so called cache only prepared requests which are +--- needed only to analyze and prefetch needed objects from connected +--- collections using `fetch_first_same()` function. Such requests are added to +--- the separate queue `cache_only_open_set` which have precedence over +--- the `open_set` one. So needed objects will be prefetched in a batch and +--- cached before the 'real' request will try to access it. +--- +--- The important detail here is that we need an actual object to fetch its +--- connected object. So the executor generates a request without +--- 'connection arguments' for the current object and place it to +--- `cache_only_open_set`. The request is formed in such way that leads to deep +--- fetching of its connected objects on further iterations of the execution +--- loop (see `generate_cache_only_request()`). The child requests of a cache +--- only request are added to the end of the `cache_only_open_set` queue. +--- +--- The cache only requests are the reason why we cannot clear the data cache +--- at end of the processing of the current tree level: the executor going down +--- by the tree with cache only requests and then continue processing the +--- current level with 'real' requests. This is why the cached data are cleared +--- at end of the query execution. +--- +--- ## Data structures +--- +--- The main data structure of this module is the list of requests to be +--- processed: +--- +--- open_set = { +--- prepared_object_list = , +--- ... or ... +--- prepared_object = { +--- filtered_object = <...>, +--- fields_info = { +--- [field_name] = { +--- is_list = +--- kind = <...>, +--- prepared_resolve = { +--- is_calculated = true, +--- objs = <...>, +--- ... or ... +--- is_calculated = false, +--- prepared_select = { +--- request_opts = { +--- index = , +--- index_name = , +--- index_value = , +--- iterator_opts = <...>, +--- is_full_scan = , +--- } +--- select_state = { +--- count = , +--- objs = , +--- pivot_found = , +--- qcontext = , +--- }, +--- select_opts = { +--- model = , +--- limit = , +--- filter =
, +--- do_filter = , +--- pivot_filter =
, +--- resolveField = , +--- is_hidden = , +--- }, +--- collection_name = , +--- from = <...>, +--- filter = <...>, +--- args = <...>, +--- extra = { +--- qcontext =
, +--- resolveField = , -- for +--- -- subrequests +--- extra_args =
, +--- exp_tuple_count = , +--- }, +--- }, +--- accessor = <...>, +--- connection = <...>, +--- invoke = , +--- }, +--- selections = { +--- { +--- name = { +--- value = , -- field name +--- }, +--- kind = 'field', +--- selectionSet = { +--- selections = <...>, +--- }, +--- arguments = { +--- { +--- name = { +--- value = , -- argument name +--- }, +--- value = <...>, +--- }, +--- ... +--- }, +--- coerced_arguments = <...>, +--- }, +--- ... +--- } +--- }, +--- ... +--- } +--- } +--- } + +local utils = require('graphql.utils') +local core_util = require('graphql.core.util') +local core_types = require('graphql.core.types') +local core_query_util = require('graphql.core.query_util') +local core_validate_variables = require('graphql.core.validate_variables') +local core_introspection = require('graphql.core.introspection') +local request_batch = require('graphql.request_batch') + +-- XXX: Possible cache_only requests refactoring. Maybe just set +-- `is_cache_only` flag in prepared_object, find such request in within the +-- execution loop and perform before others. This can heavily simplify the +-- code, because allows to avoid separate `cache_only_*` structures, but +-- involves extra `open_set` scan. + +-- XXX: Possible singleton/list requests refactoring. We can add +-- `prepared_object`s of a list request to `open_set` as separate items and +-- work with them as with singleton ones. It allows to remove +-- `fetch_resolve_list` function and just use `fetch_first_same`, but requires +-- to 'open' all list requests up to the marker to saturate +-- `cache_only_open_set` and only then switch between cache_only / plain +-- requests (make it on the marker). +-- +-- Maybe we also can remove separate `is_list = true` case processing over the +-- code that can havily simplify things. + +-- XXX: It would be more natural to have list of tables with field_info content +-- + field_name instead of fields_info to handle each prepared resolve +-- separatelly and add ability to reorder it. + +local bfs_executor = {} + +-- forward declarations +local fetch_first_same +local fetch_resolve_list + +-- Generate cache only requests for filters over connections {{{ + +--- Convert per-connection filters to selections (recursively). +local function filters_to_selections(bare_object_type, filters) + local selections = {} + for k, v in pairs(filters) do + local field_type = bare_object_type.fields[k] + assert(field_type ~= nil, 'field_type must not be nil') + if field_type.prepare_resolve ~= nil then + -- per-connection field + assert(type(v) == 'table', + 'per-connection field filter must be a table, got ' .. type(v)) + + local new_selection = { + name = { + value = k + }, + kind = 'field', + -- selectionSet is nil + -- coerced_arguments is nil + } + table.insert(selections, new_selection) + + local bare_inner_type = core_types.bare(field_type.kind) + local child_selections = filters_to_selections(bare_inner_type, v) + if next(child_selections) ~= nil then + new_selection.selectionSet = {} + new_selection.selectionSet.selections = child_selections + end + + -- add arguments on local collection fields + for child_k, child_v in pairs(v) do + local is_sel_found = false + for _, sel in ipairs(child_selections) do + if sel.name.value == child_k then + is_sel_found = true + end + end + if not is_sel_found then + if new_selection.coerced_arguments == nil then + new_selection.coerced_arguments = {} + end + assert(new_selection.coerced_arguments[child_k] == nil, + 'found two selections with the same name') + new_selection.coerced_arguments[child_k] = child_v + end + end + end + end + return selections +end + +--- Generate a cache only request for a filter over a connection. +local function generate_cache_only_request(prepared_resolve, field_type, + object, is_list, args, info) + if prepared_resolve.is_calculated then + return nil + end + + local inner_type = field_type.kind + local bare_inner_type = core_types.bare(inner_type) + local connection_filters = {} + local local_args = {} + + local filter = prepared_resolve.prepared_select.filter + for k, v in pairs(filter) do + local connection_field_type = bare_inner_type.fields[k] + assert(connection_field_type ~= nil, + 'internal error: connection_field_type should not be nil') + local is_connection_field = + connection_field_type.prepare_resolve ~= nil + if is_connection_field then + connection_filters[k] = v + end + end + + for k, v in pairs(args) do + if connection_filters[k] == nil then + local_args[k] = v + end + end + + -- create cache only requests for requests with 'by connection' filters and + -- without extra arguments (like mutation arguments) + local extra_args = prepared_resolve.prepared_select.extra.extra_args or {} + if next(connection_filters) == nil or next(extra_args) ~= nil then + return nil + end + + -- cache_only_prepared_resolve is the same as prepared_resolve, but it does + -- not require fetching connected objects to fetch and return this one + local cache_only_prepared_resolve = field_type.prepare_resolve( + object, local_args, info, {is_hidden = true}) + local cache_only_selections = filters_to_selections( + bare_inner_type, filter) + return { + is_list = is_list, + kind = inner_type, + prepared_resolve = cache_only_prepared_resolve, + selections = cache_only_selections, + } +end + +-- }}} + +-- Filter object / create prepared resolve {{{ + +local function get_argument_values(field_type, selection, variables) + local args = {} + for argument_name, value in pairs(selection.coerced_arguments or {}) do + args[argument_name] = value + end + for _, argument in ipairs(selection.arguments or {}) do + local argument_name = argument.name.value + assert(argument_name ~= nil, 'argument_name must not be nil') + local argument_type = field_type.arguments[argument_name] + assert(argument_type ~= nil, + ('cannot find argument "%s"'):format(argument_name)) + local value = core_util.coerceValue(argument.value, argument_type, + variables, {strict_non_null = true}) + args[argument_name] = value + end + return args +end + +local function evaluate_selections(object_type, selections, context) + assert(object_type.__type == 'Object') + + local selections_per_fields = {} + + local fields = core_query_util.collectFields(object_type, selections, {}, + {}, context) + + for _, field in ipairs(fields) do + assert(selections_per_fields[field.name] == nil, + 'two selections into the one field: ' .. field.name) + assert(field.selection ~= nil) + selections_per_fields[field.name] = field.selection + end + + return selections_per_fields +end + +--- Select fields from an object value, preprocess an other value +--- appropriately. +local function filter_value(value, value_type, selections, context) + if value_type.__type == 'NonNull' then + if value == nil then + error('No value provided for non-null ' .. + (value_type.name or value_type.__type)) + end + value_type = core_types.nullable(value_type) + end + + if value == nil then + return nil + end + + if value_type.__type == 'Scalar' or value_type.__type == 'Enum' then + return value_type.serialize(value) + elseif value_type.__type == 'List' then + local child_type = value_type.ofType + assert(child_type ~= nil) + assert(type(value) == 'table') + assert(utils.is_array(value)) + + local res = {} + for _, child_value in ipairs(value) do + table.insert(res, filter_value(child_value, child_type, selections, + context)) + end + + return res + elseif value_type.__type == 'Object' then + -- note: the code is pretty same as filter_object, but forbid + -- prepare_resolve attribute (because we have no such nested objects) + -- and avoid construction of fields_info and similar structures + assert(type(value) == 'table') + local selections_per_fields = evaluate_selections(value_type, + selections, context) + local res = {} + + for field_name, selection in pairs(selections_per_fields) do + local field_type = core_introspection.fieldMap[field_name] or + value_type.fields[field_name] + assert(field_type ~= nil) + assert(field_type.prepare_resolve == nil, + 'resolving inside nested records in not supported') + + local child_value = value[field_name] + local child_type = field_type.kind + + if field_type.resolve ~= nil then + local info = { + schema = context.schema, + parentObject = value_type, + } + -- args parameter is always empty list + child_value = field_type.resolve(value, {}, info) + end + + local child_selections = selection.selectionSet ~= nil and + selection.selectionSet.selections or {} + + assert(res[field_name] == nil) + res[field_name] = filter_value(child_value, child_type, + child_selections, context) + end + + return res + elseif value_type.__type == 'Union' then + local resolved_type = value_type.resolveType(value) + return filter_value(value, resolved_type, selections, context) + end + + error('Unknown type: ' .. tostring(value_type.__type)) +end + +--- Select fields from fetched object and create prepared resolve functions for +--- connection fields. +--- +--- @tparam table object (can be nil) +--- +--- @tparam table object_type GraphQL type +--- +--- @tparam table selections structure describing fields should be shown in the +--- query result and arguments to pass to these fields +--- +--- @tparam table context the following structure: +--- +--- { +--- schema = schema, +--- variables = variables, +--- fragmentMap = fragmentMap, +--- } +--- +--- @tparam table qcontext the following options: +--- +--- * is_item_cache_only +--- * qcontext: query-local storage for various purposes +--- +--- @treturn table `prepared` of the following format: +--- +--- prepared = { +-- cache_only_prepared_object = <...>, +-- prepared_object = <...>, +--- } +--- +--- `cache_only_prepared_object` and `prepared_object` has the following +--- structure: +--- +--- [cache_only_]prepared_object = { +--- filtered_object = <...fields from ...>, +--- fields_info = +--- [field_name] = { +--- is_list = , +--- kind = <...>, +--- prepared_resolve = <...>, +--- selections = <...>, +--- }, +--- ... +--- } +--- } +local function filter_object(object, object_type, selections, context, opts) + local opts = opts or {} + local qcontext = opts.qcontext + local is_item_cache_only = opts.is_item_cache_only or false + + local nullable_object_type = core_types.nullable(object_type) + + if object_type.__type == 'NonNull' then + if object == nil then + error('No value provided for non-null ' .. + (nullable_object_type.name or nullable_object_type.__type)) + end + object_type = nullable_object_type + end + + if object == nil then + return { + -- cache_only_prepared_object is nil + -- prepared_object is nil + } + end + + assert(object_type.__type == 'Object') + + local selections_per_fields = evaluate_selections(object_type, selections, + context) + + local filtered_object = {} + local cache_only_fields_info = {} + local fields_info = {} + + for field_name, selection in pairs(selections_per_fields) do + local field_type = core_introspection.fieldMap[field_name] or + object_type.fields[field_name] + assert(field_type ~= nil) + local child_selections = selection.selectionSet ~= nil and + selection.selectionSet.selections or {} + + local inner_type = field_type.kind + local nullable_inner_type = core_types.nullable(inner_type) + local is_list = nullable_inner_type.__type == 'List' + + local args + local info + + if field_type.prepare_resolve ~= nil or field_type.resolve ~= nil then + args = get_argument_values(field_type, selection, context.variables) + info = { + qcontext = qcontext, + schema = context.schema, + parentObject = object_type, + } + end + + if field_type.prepare_resolve then + local prepared_resolve = field_type.prepare_resolve(object, args, + info, {is_hidden = is_item_cache_only}) + + fields_info[field_name] = { + is_list = is_list, + kind = inner_type, + prepared_resolve = prepared_resolve, + selections = child_selections, + } + + local cache_only_fields_info_item = + generate_cache_only_request(prepared_resolve, field_type, + object, is_list, args, info) + if cache_only_fields_info_item ~= nil then + cache_only_fields_info[field_name] = cache_only_fields_info_item + end + else + local value = object[field_name] + if field_type.resolve ~= nil then + value = field_type.resolve(object, args, info) + end + assert(filtered_object[field_name] == nil) + filtered_object[field_name] = filter_value( + value, inner_type, child_selections, context) + end + end + + local cache_only_prepared_object + if next(cache_only_fields_info) ~= nil then + cache_only_prepared_object = { + filtered_object = filtered_object, + fields_info = cache_only_fields_info, + } + end + local prepared_object = { + filtered_object = filtered_object, + fields_info = fields_info, + } + return { + cache_only_prepared_object = cache_only_prepared_object, + prepared_object = prepared_object, + } +end + +local function filter_object_list(object_list, object_type, selections, context, + opts) + local opts = opts or {} + local qcontext = opts.qcontext + local is_item_cache_only = opts.is_item_cache_only or false + + local nullable_object_type = core_types.nullable(object_type) + assert(nullable_object_type.__type == 'List') + + if object_type.__type == 'NonNull' then + if object_list == nil then + error('No value provided for non-null ' .. + (nullable_object_type.name or nullable_object_type.__type)) + end + object_type = nullable_object_type + end + + assert(object_type.__type == 'List') + object_type = object_type.ofType + assert(object_type ~= nil) + + local prepared_object_list = {} + local cache_only_prepared_object_list = {} + + if object_list == nil then + object_list = nil -- box.NULL -> nil + end + + for _, object in ipairs(object_list or {}) do + local prepared = filter_object(object, object_type, selections, context, + {qcontext = qcontext, is_item_cache_only = is_item_cache_only}) + local cache_only_prepared_object = prepared.cache_only_prepared_object + local prepared_object = prepared.prepared_object + table.insert(cache_only_prepared_object_list, + cache_only_prepared_object) + table.insert(prepared_object_list, prepared_object) + end + + return { + prepared_object_list = prepared_object_list, + cache_only_prepared_object_list = cache_only_prepared_object_list, + } +end + +-- }}} + +-- Resolve prepared requests and call object filtering {{{ + +local function invoke_resolve(prepared_object, context, opts) + local opts = opts or {} + local qcontext = opts.qcontext + local is_item_cache_only = opts.is_item_cache_only or false + + local cache_only_open_set = {} + local open_set = {} + + for field_name, field_info in pairs(prepared_object.fields_info) do + local object_or_list + local object_type + if field_info.prepared_resolve.is_calculated then + object_or_list = field_info.prepared_resolve.objs + else + object_or_list, object_type = field_info.prepared_resolve:invoke() + end + object_type = object_type or field_info.kind + local selections = field_info.selections + + local child_cache_only + local child + + if field_info.is_list then + local child_prepared_list = filter_object_list( + object_or_list, object_type, selections, context, + {qcontext = qcontext, is_item_cache_only = is_item_cache_only}) + -- don't perform construction for cache_only objects + local child_cache_only_prepared_object_list = + child_prepared_list.cache_only_prepared_object_list + local child_prepared_object_list = + child_prepared_list.prepared_object_list + + -- construction + if not is_item_cache_only then + prepared_object.filtered_object[field_name] = {} + for _, child_prepared_object in + ipairs(child_prepared_object_list) do + table.insert(prepared_object.filtered_object[field_name], + child_prepared_object.filtered_object) + end + end + + if next(child_prepared_object_list) ~= nil then + child = { + prepared_object_list = child_prepared_object_list, + } + end + + if next(child_cache_only_prepared_object_list) ~= nil then + child_cache_only = { + prepared_object_list = child_cache_only_prepared_object_list, + } + end + else + local child_prepared = filter_object(object_or_list, + object_type, selections, context, {qcontext = qcontext, + is_item_cache_only = is_item_cache_only}) + -- don't perform construction for cache_only objects + local child_cache_only_prepared_object = + child_prepared.cache_only_prepared_object + local child_prepared_object = child_prepared.prepared_object + + if child_prepared_object ~= nil then + -- construction + if not is_item_cache_only then + prepared_object.filtered_object[field_name] = + child_prepared_object.filtered_object + end + + child = { + prepared_object = child_prepared_object, + } + end + + if child_cache_only_prepared_object ~= nil then + child_cache_only = { + prepared_object = child_cache_only_prepared_object, + } + end + end + + -- add to cache_only_open_set when we catch the object from it + if is_item_cache_only then + table.insert(cache_only_open_set, child) + else + table.insert(open_set, child) + end + table.insert(cache_only_open_set, child_cache_only) + end + + return { + cache_only_open_set = cache_only_open_set, + open_set = open_set, + } +end + +local function invoke_resolve_list(prepared_object_list, context, opts) + local opts = opts or {} + local qcontext = opts.qcontext + local accessor = opts.accessor + local is_item_cache_only = opts.is_item_cache_only or false + local max_batch_size = opts.max_batch_size + + local open_set = {} + local cache_only_open_set = {} + + local last_fetched_object_num = 0 + for i, prepared_object in ipairs(prepared_object_list) do + if i > last_fetched_object_num then + local _, size = fetch_resolve_list(prepared_object_list, + {accessor = accessor, qcontext = qcontext, + max_batch_size = max_batch_size, start_from = i, + force_caching = is_item_cache_only}) + last_fetched_object_num = last_fetched_object_num + size + end + + local child = invoke_resolve(prepared_object, context, + {qcontext = qcontext, is_item_cache_only = is_item_cache_only}) + local child_open_set = child.open_set + local child_cache_only_open_set = child.cache_only_open_set + + utils.expand_list(open_set, child_open_set) + utils.expand_list(cache_only_open_set, child_cache_only_open_set) + end + + return { + open_set = open_set, + cache_only_open_set = cache_only_open_set, + } +end + +-- }}} + +-- Analyze prepared requests and prefetch in batches {{{ + +fetch_first_same = function(open_set, opts) + local func_name = 'bfs_executor.fetch_first_same' + local opts = opts or {} + local accessor = opts.accessor + local qcontext = opts.qcontext + local max_batch_size = opts.max_batch_size + local force_caching = opts.force_caching or false + + if not accessor:cache_is_supported() then return nil, 0 end + + local size = 0 + + local batches = {} + for i, item in ipairs(open_set) do + if i > max_batch_size then break end + if item.prepared_object == nil then break end + local prepared_object = item.prepared_object + + for field_name, field_info in pairs(prepared_object.fields_info) do + local prepared_resolve = field_info.prepared_resolve + if prepared_resolve.is_calculated then + size = i + break + end + local batch = request_batch.from_prepared_resolve(prepared_resolve) + + if i == 1 then + assert(batches[field_name] == nil, + ('internal error: %s: field names "%s" clash'):format( + func_name, field_name)) + batches[field_name] = batch + size = i + else + local ok = batches[field_name] ~= nil and + batches[field_name]:compare_bins(batch) + if not ok then break end + table.insert(batches[field_name].keys, batch.keys[1]) + size = i + end + end + end + + -- don't flood cache with single-key (non-batch) select results + if not force_caching and size <= 1 then + return nil, size + end + + local fetch_id = accessor:cache_fetch(batches, qcontext) + return fetch_id, size +end + +fetch_resolve_list = function(prepared_object_list, opts) + local func_name = 'bfs_executor.fetch_resolve_list' + local opts = opts or {} + local accessor = opts.accessor + local qcontext = opts.qcontext + local max_batch_size = opts.max_batch_size + local start_from = opts.start_from or 1 + local force_caching = opts.force_caching or false + + if not accessor:cache_is_supported() then return nil, 0 end + + local size = 0 + + local batches = {} + for i = 1, #prepared_object_list - start_from + 1 do + if i > max_batch_size then break end + local prepared_object = prepared_object_list[i + start_from - 1] + + for field_name, field_info in pairs(prepared_object.fields_info) do + local prepared_resolve = field_info.prepared_resolve + if prepared_resolve.is_calculated then + size = i + break + end + local batch = request_batch.from_prepared_resolve(prepared_resolve) + + if i == 1 then + assert(batches[field_name] == nil, + ('internal error: %s: field names "%s" clash'):format( + func_name, field_name)) + batches[field_name] = batch + size = i + else + local ok, err = batches[field_name]:compare_bins_extra(batch) + if not ok then + error(('internal error: %s: %s'):format(func_name, err)) + end + table.insert(batches[field_name].keys, batch.keys[1]) + size = i + end + end + end + + -- don't flood cache with single-key (non-batch) select results + if not force_caching and size <= 1 then + return nil, size + end + + local fetch_id = accessor:cache_fetch(batches, qcontext) + return fetch_id, size +end + +-- }}} + +-- Reorder requests before add to open_set {{{ + +local function expand_open_set(open_set, child_open_set, opts) + local opts = opts or {} + local accessor = opts.accessor + + if not accessor:cache_is_supported() then + utils.expand_list(open_set, child_open_set) + return + end + + local item_bin_to_ordinal = {} + local items_per_ordinal = {} + local next_ordinal = 1 + + -- Create histogram-like 'items_per_ordinal' structure with lists of items. + -- Each list contain items of the same kind (with the same bin value). + -- Ordinals of the bins are assigned in order of appear in child_open_set. + for _, item in ipairs(child_open_set) do + if item.prepared_object_list ~= nil then + local ordinal = next_ordinal + assert(items_per_ordinal[ordinal] == nil) + items_per_ordinal[ordinal] = {} + next_ordinal = next_ordinal + 1 + table.insert(items_per_ordinal[ordinal], item) + else + local prepared_object = item.prepared_object + assert(prepared_object ~= nil) + assert(prepared_object.fields_info ~= nil) + + local batch_bins = {} + for field_name, field_info in pairs(prepared_object.fields_info) do + local prepared_resolve = field_info.prepared_resolve + if prepared_resolve.is_calculated then + table.insert(batch_bins, field_name .. ':') + else + local batch = request_batch.from_prepared_resolve( + prepared_resolve) + table.insert(batch_bins, field_name .. ':' .. batch:bin()) + end + end + + local item_bin = table.concat(batch_bins, ';') + local ordinal = item_bin_to_ordinal[item_bin] + if ordinal == nil then + item_bin_to_ordinal[item_bin] = next_ordinal + ordinal = next_ordinal + assert(items_per_ordinal[ordinal] == nil) + items_per_ordinal[ordinal] = {} + next_ordinal = next_ordinal + 1 + end + table.insert(items_per_ordinal[ordinal], item) + end + end + + -- add items from child_open_set in ordinals order to open_set + for _, items in ipairs(items_per_ordinal) do + utils.expand_list(open_set, items) + end +end + +-- }}} + +-- Debugging {{{ + +local function prepared_object_digest(prepared_object) + local json = require('json') + local digest = { + ['='] = prepared_object.filtered_object, + } + for k, v in pairs(prepared_object.fields_info) do + if v.prepared_resolve.is_calculated then + digest[k] = '' + else + local prepared_select = v.prepared_resolve.prepared_select + local collection_name = prepared_select.collection_name + local request_opts = prepared_select.request_opts + local key = request_opts.index_value or box.NULL + local filter = prepared_select.filter + digest[k] = { + c = collection_name, + k = key, + f = filter, + } + end + end + return json.encode(digest) +end + +local function open_set_tostring(open_set, name) + local res = ('\n==== %s ====\n'):format(name) + for _, item in ipairs(open_set) do + if item.prepared_object ~= nil then + local digest = prepared_object_digest(item.prepared_object) + res = res .. '\nprepared_object: ' .. digest + elseif item.prepared_object_list ~= nil then + res = res .. '\nprepared_object_list:' + for _, prepared_object in ipairs(item.prepared_object_list) do + local digest = prepared_object_digest(prepared_object) + res = res .. '\n ' .. digest + end + elseif item.squash_marker ~= nil then + if item.fetch_id ~= nil then + res = res .. '\nsquash marker: ' .. tostring(item.fetch_id) + else + res = res .. '\nsquash marker' + end + else + res = res .. '\nunknown open_set item' + end + end + return res +end + +-- }}} + +-- The main execution loop {{{ + +--- Execute a GraphQL query. +--- +--- @tparam table schema +--- +--- @tparam table query_ast +--- +--- @tparam table variables +--- +--- @tparam string operation_name +--- +--- @tparam table opts the following options: +--- +--- * qcontext +--- * accessor +--- * max_batch_size +--- +--- @treturn table result of the query +function bfs_executor.execute(schema, query_ast, variables, operation_name, opts) + local opts = opts or {} + local qcontext = opts.qcontext + local accessor = opts.accessor + local max_batch_size = opts.max_batch_size + + local operation = core_query_util.getOperation(query_ast, operation_name) + local root_object_type = schema[operation.operation] + assert(root_object_type ~= nil, + ('cannot find root type for operation "%s"'):format(operation_name)) + local root_selections = operation.selectionSet.selections + + local fragmentMap = core_query_util.getFragmentDefinitions(query_ast) + local context = { + schema = schema, + variables = variables, + fragmentMap = fragmentMap, + } + + local root_object = {} + + -- validate variables + local variableTypes = core_query_util.getVariableTypes(schema, operation) + core_validate_variables.validate_variables({ + variables = variables, + variableTypes = variableTypes, + }) + + local prepared_root = filter_object( + root_object, root_object_type, root_selections, context, + {qcontext = qcontext}) + local prepared_root_object = prepared_root.prepared_object + local cache_only_prepared_root_object = + prepared_root.cache_only_prepared_object + local filtered_root_object = prepared_root_object.filtered_object + + local cache_only_open_set = {} + if cache_only_prepared_root_object ~= nil then + table.insert(cache_only_open_set, { + prepared_object = cache_only_prepared_root_object + }) + end + + local open_set = {} + if prepared_root_object ~= nil then + table.insert(open_set, { + prepared_object = prepared_root_object + }) + end + + table.insert(cache_only_open_set, 1, {squash_marker = true}) + table.insert(open_set, 1, {squash_marker = true}) + + while true do + -- don't perform cache only requests if cache is not supported by the + -- accessor + if not accessor:cache_is_supported() then + cache_only_open_set = {} + end + + utils.debug(open_set_tostring, cache_only_open_set, + 'cache only open set') + utils.debug(open_set_tostring, open_set, 'open set') + + local item + local is_item_cache_only = next(cache_only_open_set) ~= nil + if is_item_cache_only then + item = table.remove(cache_only_open_set, 1) + else + item = table.remove(open_set, 1) + end + + utils.debug(open_set_tostring, {item}, 'item (before)') + + if item == nil then break end + if item.prepared_object ~= nil then + local child = invoke_resolve(item.prepared_object, context, + {qcontext = qcontext, + is_item_cache_only = is_item_cache_only}) + local child_cache_only_open_set = child.cache_only_open_set + local child_open_set = child.open_set + expand_open_set(cache_only_open_set, child_cache_only_open_set, + {accessor = accessor}) + expand_open_set(open_set, child_open_set, {accessor = accessor}) + elseif item.prepared_object_list ~= nil then + local child = invoke_resolve_list(item.prepared_object_list, + context, {qcontext = qcontext, accessor = accessor, + is_item_cache_only = is_item_cache_only, + max_batch_size = max_batch_size}) + local child_cache_only_open_set = child.cache_only_open_set + local child_open_set = child.open_set + expand_open_set(cache_only_open_set, child_cache_only_open_set, + {accessor = accessor}) + expand_open_set(open_set, child_open_set, {accessor = accessor}) + elseif item.squash_marker ~= nil then + local open_set_to_fetch = is_item_cache_only and + cache_only_open_set or open_set + local fetch_id, size = fetch_first_same(open_set_to_fetch, + {accessor = accessor, qcontext = qcontext, + max_batch_size = max_batch_size, + force_caching = is_item_cache_only}) + if #open_set_to_fetch > 0 then + table.insert(open_set_to_fetch, math.max(2, size + 1), { + squash_marker = true, + fetch_id = fetch_id, + }) + end + else + assert(false, 'unknown open_set item format') + end + + utils.debug(open_set_tostring, {item}, 'item (after)') + end + + accessor:cache_truncate() + + return filtered_root_object +end + +-- }}} + +return bfs_executor diff --git a/graphql/convert_schema/resolve.lua b/graphql/convert_schema/resolve.lua index 557d9bc..4e6dd0e 100644 --- a/graphql/convert_schema/resolve.lua +++ b/graphql/convert_schema/resolve.lua @@ -85,11 +85,33 @@ local function separate_args_instance(args_instance, arguments) } end +local function invoke_resolve(prepared_resolve) + if prepared_resolve.is_calculated then + return prepared_resolve.objs + end + + local prepared_select = prepared_resolve.prepared_select + -- local opts = prepared_resolve.opts + local accessor = prepared_resolve.accessor + local c = prepared_resolve.connection + + local objs = accessor:invoke_select(prepared_select) + assert(type(objs) == 'table', + 'objs list received from an accessor ' .. + 'must be a table, got ' .. type(objs)) + if c.type == '1:1' then + return objs[1] -- nil for empty list of matching objects + else -- c.type == '1:N' + return objs + end +end + function resolve.gen_resolve_function(collection_name, connection, destination_type, arguments, accessor, opts) local c = connection local opts = opts or {} local disable_dangling_check = opts.disable_dangling_check or false + local gen_prepare = opts.gen_prepare or false local bare_destination_type = core_types.bare(destination_type) -- capture `bare_destination_type` @@ -99,6 +121,8 @@ function resolve.gen_resolve_function(collection_name, connection, ('performing a subrequest by the non-existent ' .. 'field "%s" of the collection "%s"'):format(field_name, c.destination_collection)) + local opts = table.copy(opts or {}) + opts.is_hidden = true return bare_destination_type.fields[field_name].resolve( object, filter, info, opts) end @@ -108,9 +132,9 @@ function resolve.gen_resolve_function(collection_name, connection, -- genResolveField, arguments, accessor return function(parent, args_instance, info, opts) local opts = opts or {} - assert(type(opts) == 'table', - 'opts must be nil or a table, got ' .. type(opts)) - -- no opts for now + check(opts, 'opts', 'table') + local is_hidden = opts.is_hidden or false + check(is_hidden, 'is_hidden', 'boolean') local from = gen_from_parameter(collection_name, parent, c) @@ -120,7 +144,19 @@ function resolve.gen_resolve_function(collection_name, connection, -- * return {} for 1:N connection (except the case when source -- collection is the query or the mutation pseudo-collection). if collection_name ~= nil and are_all_parts_null(parent, c.parts) then - return c.type == '1:N' and {} or nil + local objs = c.type == '1:N' and {} or nil + if gen_prepare then + return { + is_calculated = true, + objs = objs, + invoke = function() + -- error('internal error: should not be called') -- XXX: remove it? + return objs + end, + } + else + return objs + end end local exp_tuple_count @@ -134,6 +170,7 @@ function resolve.gen_resolve_function(collection_name, connection, resolveField = resolveField, -- for subrequests extra_args = {}, exp_tuple_count = exp_tuple_count, + is_hidden = opts.is_hidden, } -- object_args_instance will be passed to 'filter' @@ -143,16 +180,30 @@ function resolve.gen_resolve_function(collection_name, connection, arguments) extra.extra_args = arguments_instance.extra - local objs = accessor:select(parent, - c.destination_collection, from, - arguments_instance.object, arguments_instance.list, extra) - assert(type(objs) == 'table', - 'objs list received from an accessor ' .. - 'must be a table, got ' .. type(objs)) - if c.type == '1:1' then - return objs[1] -- nil for empty list of matching objects - else -- c.type == '1:N' - return objs + if gen_prepare then + local prepared_select = accessor:prepare_select(parent, + c.destination_collection, from, + arguments_instance.object, arguments_instance.list, extra) + return { + is_calculated = false, + prepared_select = prepared_select, + -- opts = opts, + accessor = accessor, + connection = c, + invoke = invoke_resolve, + } + else + local objs = accessor:select(parent, + c.destination_collection, from, + arguments_instance.object, arguments_instance.list, extra) + assert(type(objs) == 'table', + 'objs list received from an accessor ' .. + 'must be a table, got ' .. type(objs)) + if c.type == '1:1' then + return objs[1] -- nil for empty list of matching objects + else -- c.type == '1:N' + return objs + end end end end @@ -161,6 +212,7 @@ function resolve.gen_resolve_function_multihead(collection_name, connection, union_types, var_num_to_box_field_name, accessor, opts) local opts = opts or {} local disable_dangling_check = opts.disable_dangling_check or false + local gen_prepare = opts.gen_prepare or false local c = connection local determinant_keys = utils.get_keys(c.variants[1].determinant) @@ -191,7 +243,12 @@ function resolve.gen_resolve_function_multihead(collection_name, connection, return res_var, var_idx, box_field_name end - return function(parent, _, info) + return function(parent, _, info, opts) + local opts = opts or {} + check(opts, 'opts', 'table') + local is_hidden = opts.is_hidden or false + check(is_hidden, 'is_hidden', 'boolean') + -- If a parent object does not have all source fields (for any of -- variants) non-null then we do not resolve variant and just return -- box.NULL. @@ -205,7 +262,14 @@ function resolve.gen_resolve_function_multihead(collection_name, connection, end if not is_source_fields_found then - return box.NULL, nil + if gen_prepare then + return { + is_calculated = true, + objs = box.NULL, + } + else + return box.NULL, nil + end end local v, variant_num, box_field_name = resolve_variant(parent) @@ -217,18 +281,32 @@ function resolve.gen_resolve_function_multihead(collection_name, connection, name = c.name, destination_collection = v.destination_collection, } - local opts = { + local gen_opts = { disable_dangling_check = disable_dangling_check, + gen_prepare = gen_prepare, } - -- XXX: generate a function for each variant at schema generation time - local result = resolve.gen_resolve_function(collection_name, - quazi_connection, destination_type, {}, accessor, opts)( - parent, {}, info) - - -- This 'wrapping' is needed because we use 'select' on 'collection' - -- GraphQL type and the result of the resolve function must be in - -- {'collection_name': {result}} format to be avro-valid. - return {[box_field_name] = result}, destination_type + -- XXX: generate a function (using gen_resolve_function) for each + -- variant once at schema generation time + if gen_prepare then + local result = resolve.gen_resolve_function(collection_name, + quazi_connection, destination_type, {}, accessor, gen_opts)( + parent, {}, info, opts) + result.connection = quazi_connection + result.invoke = function(prepared_resolve) + local result = invoke_resolve(prepared_resolve) + -- see comment below + return {[box_field_name] = result}, destination_type + end + return result + else + local result = resolve.gen_resolve_function(collection_name, + quazi_connection, destination_type, {}, accessor, gen_opts)( + parent, {}, info, opts) + -- This 'wrapping' is needed because we use 'select' on 'collection' + -- GraphQL type and the result of the resolve function must be in + -- {'collection_name': {result}} format to be avro-valid. + return {[box_field_name] = result}, destination_type + end end end diff --git a/graphql/convert_schema/types.lua b/graphql/convert_schema/types.lua index e409d0f..45d7aeb 100644 --- a/graphql/convert_schema/types.lua +++ b/graphql/convert_schema/types.lua @@ -158,14 +158,18 @@ local function convert_simple_connection(state, connection, collection_name) local opts = { disable_dangling_check = state.disable_dangling_check, } - local resolve_function = resolve.gen_resolve_function(collection_name, c, - destination_type, arguments, state.accessor, opts) + local resolve_function = resolve.gen_resolve_function( + collection_name, c, destination_type, arguments, state.accessor, opts) + opts.gen_prepare = true + local prepare_resolve_function = resolve.gen_resolve_function( + collection_name, c, destination_type, arguments, state.accessor, opts) local field = { name = c.name, kind = destination_type, arguments = c_args, resolve = resolve_function, + prepare_resolve = prepare_resolve_function, } return field @@ -285,6 +289,10 @@ local function convert_multihead_connection(state, connection, collection_name, local resolve_function = resolve.gen_resolve_function_multihead( collection_name, c, union_types, var_num_to_box_field_name, state.accessor, opts) + opts.gen_prepare = true + local prepare_resolve_function = resolve.gen_resolve_function_multihead( + collection_name, c, union_types, var_num_to_box_field_name, + state.accessor, opts) local field = { name = c.name, @@ -295,6 +303,7 @@ local function convert_multihead_connection(state, connection, collection_name, arguments = nil, -- see Border cases/Unions at the top of -- tarantool_graphql module description resolve = resolve_function, + prepare_resolve = prepare_resolve_function, } return field end diff --git a/graphql/core/query_util.lua b/graphql/core/query_util.lua index 16f13db..7538cf4 100644 --- a/graphql/core/query_util.lua +++ b/graphql/core/query_util.lua @@ -105,31 +105,22 @@ function query_util.mergeSelectionSets(fields) return selections end -function query_util.buildContext(schema, tree, rootValue, variables, operationName) - local context = { - schema = schema, - rootValue = rootValue, - variables = variables, - operation = nil, - fragmentMap = {}, - variableTypes = {}, - } +function query_util.getOperation(tree, operationName) + local operation for _, definition in ipairs(tree.definitions) do if definition.kind == 'operation' then - if not operationName and context.operation then + if not operationName and operation then error('Operation name must be specified if more than one operation exists.') end if not operationName or definition.name.value == operationName then - context.operation = definition + operation = definition end - elseif definition.kind == 'fragmentDefinition' then - context.fragmentMap[definition.name.value] = definition end end - if not context.operation then + if not operation then if operationName then error('Unknown operation "' .. operationName .. '"') else @@ -137,13 +128,45 @@ function query_util.buildContext(schema, tree, rootValue, variables, operationNa end end - -- Save variableTypes for the operation. - for _, definition in ipairs(context.operation.variableDefinitions or {}) do - context.variableTypes[definition.variable.name.value] = - query_util.typeFromAST(definition.type, context.schema) + return operation +end + +function query_util.getFragmentDefinitions(tree) + local fragmentMap = {} + + for _, definition in ipairs(tree.definitions) do + if definition.kind == 'fragmentDefinition' then + fragmentMap[definition.name.value] = definition + end + end + + return fragmentMap +end + +-- Extract variableTypes from the operation. +function query_util.getVariableTypes(schema, operation) + local variableTypes = {} + + for _, definition in ipairs(operation.variableDefinitions or {}) do + variableTypes[definition.variable.name.value] = + query_util.typeFromAST(definition.type, schema) end - return context + return variableTypes +end + +function query_util.buildContext(schema, tree, rootValue, variables, operationName) + local operation = query_util.getOperation(tree, operationName) + local fragmentMap = query_util.getFragmentDefinitions(tree) + local variableTypes = query_util.getVariableTypes(schema, operation) + return { + schema = schema, + rootValue = rootValue, + variables = variables, + operation = operation, + fragmentMap = fragmentMap, + variableTypes = variableTypes, + } end return query_util diff --git a/graphql/core/types.lua b/graphql/core/types.lua index 683ac31..6ee8a17 100644 --- a/graphql/core/types.lua +++ b/graphql/core/types.lua @@ -132,7 +132,9 @@ function initFields(kind, fields) description = field.description, deprecationReason = field.deprecationReason, arguments = field.arguments or {}, - resolve = kind == 'Object' and field.resolve or nil + resolve = kind == 'Object' and field.resolve or nil, + prepare_resolve = kind == 'Object' and field.prepare_resolve or nil, + invoke_resolve = kind == 'Object' and field.invoke_resolve or nil, } end diff --git a/graphql/core/util.lua b/graphql/core/util.lua index 9f6e826..5ae5a2d 100644 --- a/graphql/core/util.lua +++ b/graphql/core/util.lua @@ -177,8 +177,8 @@ function util.coerceValue(node, schemaType, variables, opts) if schemaType.__type == 'Scalar' then if schemaType.parseLiteral(node) == nil then - error(e.wrong_value('Could not coerce "%s" to "%s"'):format( - tostring(node.value), schemaType.name)) + error(e.wrong_value(('Could not coerce "%s" to "%s"'):format( + tostring(node.value), schemaType.name))) end return schemaType.parseLiteral(node) diff --git a/graphql/impl.lua b/graphql/impl.lua index d839c30..7656703 100644 --- a/graphql/impl.lua +++ b/graphql/impl.lua @@ -6,7 +6,9 @@ local accessor_shard = require('graphql.accessor_shard') local accessor_general = require('graphql.accessor_general') local parse = require('graphql.core.parse') local validate = require('graphql.core.validate') +local query_util = require('graphql.core.query_util') local execute = require('graphql.core.execute') +local bfs_executor = require('graphql.bfs_executor') local query_to_avro = require('graphql.query_to_avro') local simple_config = require('graphql.simple_config') local config_complement = require('graphql.config_complement') @@ -18,6 +20,9 @@ local check = utils.check local impl = {} +-- constants +local DEF_MAX_BATCH_SIZE = 1000 + -- Instance of the library to provide graphql:compile() and graphql:execute() -- method (with creating zero configuration graphql instance under hood when -- calling compile() for the first time). @@ -34,23 +39,70 @@ local default_instance --- @treturn table result of the operation local function gql_execute(qstate, variables, operation_name) assert(qstate.state) + assert(qstate.query_settings) local state = qstate.state assert(state.schema) + local max_batch_size = qstate.query_settings.max_batch_size or + state.max_batch_size check(variables, 'variables', 'table') check(operation_name, 'operation_name', 'string', 'nil') + check(max_batch_size, 'max_batch_size', 'number') - assert(qstate.query_settings) - local root_value = {} local qcontext = { query_settings = qstate.query_settings, } local traceback - local ok, data = xpcall(function() - return execute(state.schema, qstate.ast, root_value, variables, - operation_name, {qcontext = qcontext}) + local ok, data, executor_metainfo = xpcall(function() + assert(qstate.ast) + local operation = query_util.getOperation(qstate.ast, operation_name) + local operation_type = operation.operation + check(operation_type, 'operation_type', 'string') + assert(operation_type == 'query' or operation_type == 'mutation', + 'only "query" and "mutation" operation types are supported') + local accessor = state.accessor + local cfg_use_bfs_executor = qstate.query_settings.use_bfs_executor or + state.use_bfs_executor + + local use_bfs_executor = operation_type == 'query' + if use_bfs_executor then + if cfg_use_bfs_executor == 'never' then + use_bfs_executor = false + elseif cfg_use_bfs_executor == 'shard' then + use_bfs_executor = accessor.name == 'shard' + elseif cfg_use_bfs_executor == 'always' then + use_bfs_executor = true + else + error('Unknown use_bfs_executor: ' .. + tostring(state.use_bfs_executor)) + end + end + if use_bfs_executor then + local result = bfs_executor.execute(state.schema, qstate.ast, + variables, operation_name, { + qcontext = qcontext, + accessor = accessor, + max_batch_size = max_batch_size, + }) + local executor_metainfo = { + name = 'bfs', + } + return result, executor_metainfo + else + local root_value = {} + local result = execute(state.schema, qstate.ast, root_value, + variables, operation_name, {qcontext = qcontext}) + local executor_metainfo = { + name = 'dfs', + } + return result, executor_metainfo + end end, function(err) + -- XXX: store cache into query-local storage to ensure in will be + -- cleaned anyway; it is matter if some data will be changed between + -- GraphQL queries + state.accessor:cache_truncate() traceback = debug.traceback() return err end) @@ -62,6 +114,7 @@ local function gql_execute(qstate, variables, operation_name) data = data, meta = { statistics = qcontext.statistics, + executor = executor_metainfo, } } end @@ -72,19 +125,40 @@ end --- --- @treturn table result of the operation local function compile_and_execute(state, query, variables, operation_name, - opts) + compile_opts) assert(type(state) == 'table', 'use :compile_and_execute(...) ' .. 'instead of .compile_and_execute(...)') assert(state.schema ~= nil, 'have not compiled schema') check(query, 'query', 'string') check(variables, 'variables', 'table', 'nil') check(operation_name, 'operation_name', 'string', 'nil') - check(opts, 'opts', 'table', 'nil') + check(compile_opts, 'compile_opts', 'table', 'nil') - local compiled_query = state:compile(query, opts) + local compiled_query = state:compile(query, compile_opts) return compiled_query:execute(variables, operation_name) end +local function validate_query_settings(query_settings, opts) + local opts = opts or {} + local allow_nil = opts.allow_nil or false + + local use_bfs_executor = query_settings.use_bfs_executor + local max_batch_size = query_settings.max_batch_size + + if not allow_nil or type(use_bfs_executor) ~= 'nil' then + check(use_bfs_executor, 'use_bfs_executor', 'string') + assert(use_bfs_executor == 'never' or + use_bfs_executor == 'shard' or + use_bfs_executor == 'always', + "use_bfs_executor must be 'never', 'shard' (default) or " .. + "'always', 'got " .. tostring(use_bfs_executor)) + end + + if not allow_nil or type(max_batch_size) ~= 'nil' then + check(max_batch_size, 'max_batch_size', 'number') + end +end + --- Parse GraphQL query string, validate against the GraphQL schema and --- provide an object with the function to execute an operation from the --- request with specific variables values. @@ -94,11 +168,13 @@ end --- @tparam string query text of a GraphQL query --- --- @tparam[opt] table opts the following options (described in ---- @{accessor_general.new}): +--- @{accessor_general.new} and @{impl.new}): --- --- * resulting_object_cnt_max --- * fetched_object_cnt_max --- * timeout_ms +--- * use_bfs_executor +--- * max_batch_size --- --- @treturn table compiled query with `execute` and `avro_schema` functions local function gql_compile(state, query, opts) @@ -120,11 +196,14 @@ local function gql_compile(state, query, opts) resulting_object_cnt_max = opts.resulting_object_cnt_max, fetched_object_cnt_max = opts.fetched_object_cnt_max, timeout_ms = opts.timeout_ms, + use_bfs_executor = opts.use_bfs_executor, + max_batch_size = opts.max_batch_size, } } accessor_general.validate_query_settings(qstate.query_settings, {allow_nil = true}) + validate_query_settings(qstate.query_settings, {allow_nil = true}) local gql_query = setmetatable(qstate, { __index = { @@ -135,14 +214,14 @@ local function gql_compile(state, query, opts) return gql_query end -local function start_server(gql, host, port) +local function start_server(gql, host, port, compile_opts) assert(type(gql) == 'table', 'use :start_server(...) instead of .start_server(...)') check(host, 'host', 'nil', 'string') check(port, 'port', 'nil', 'number') - gql.server = server.init(gql, host, port) + gql.server = server.init(gql, host, port, compile_opts) gql.server:start() return ('The GraphQL server started at http://%s:%s'):format( @@ -196,26 +275,27 @@ local function create_default_accessor(cfg) end end -function impl.compile(query) +function impl.compile(query, opts) if default_instance == nil then default_instance = impl.new() end - return default_instance:compile(query) + return default_instance:compile(query, opts) end -function impl.execute(query, variables, operation_name) +function impl.execute(query, variables, operation_name, compile_opts) if default_instance == nil then default_instance = impl.new() end - return default_instance:execute(query, variables, operation_name) + return default_instance:execute(query, variables, operation_name, + compile_opts) end -function impl.start_server() +function impl.start_server(host, port, compile_opts) if default_instance == nil then default_instance = impl.new() end - return default_instance:start_server() + return default_instance:start_server(host, port, compile_opts) end function impl.stop_server() @@ -267,23 +347,32 @@ end --- service_fields =
, --- accessor =
or , --- accessor_funcs =
, +--- connections =
, -- for auto configuration from space formats --- collection_use_tomap = , --- resulting_object_cnt_max = , --- fetched_object_cnt_max = , --- timeout_ms = , --- enable_mutations = , --- disable_dangling_check = , +--- use_bfs_executor = 'never' | 'shard' (default) | 'always', +--- max_batch_size = , --- }) function impl.new(cfg) local cfg = cfg or {} cfg = table.deepcopy(cfg) -- prevent change of user's data -- auto config case - if not next(cfg) or utils.has_only(cfg, 'connections') then + local perform_auto_configuration = + cfg['schemas'] == nil and + cfg['indexes'] == nil and + cfg['service_fields'] == nil and + cfg['accessor'] == nil and + cfg['accessor_funcs'] == nil + if perform_auto_configuration then local generated_cfg = simple_config.graphql_cfg_from_tarantool() generated_cfg.accessor = 'space' generated_cfg.connections = cfg.connections or {} - cfg = generated_cfg + cfg = utils.merge_tables(cfg, generated_cfg) cfg = config_complement.complement_cfg(cfg) end @@ -302,9 +391,14 @@ function impl.new(cfg) cfg.indexes = cfg.accessor.indexes end + check(cfg.disable_dangling_check, 'disable_dangling_check', 'boolean', + 'nil') local state = { disable_dangling_check = cfg.disable_dangling_check, + use_bfs_executor = cfg.use_bfs_executor or 'shard', + max_batch_size = cfg.max_batch_size or DEF_MAX_BATCH_SIZE, } + validate_query_settings(state) convert_schema.convert(state, cfg) return setmetatable(state, { __index = { diff --git a/graphql/init.lua b/graphql/init.lua index 39f0e39..052e696 100644 --- a/graphql/init.lua +++ b/graphql/init.lua @@ -35,6 +35,7 @@ local accessor_space = require('graphql.accessor_space') local accessor_shard = require('graphql.accessor_shard') local impl = require('graphql.impl') local error_codes = require('graphql.error_codes') +local storage = require('graphql.storage') local graphql = {} @@ -49,6 +50,9 @@ for k, v in pairs(error_codes) do end end +-- submodules +graphql.storage = storage + -- for backward compatibility graphql.accessor_general = accessor_general graphql.accessor_space = accessor_space diff --git a/graphql/request_batch.lua b/graphql/request_batch.lua new file mode 100644 index 0000000..0c08ed3 --- /dev/null +++ b/graphql/request_batch.lua @@ -0,0 +1,113 @@ +local json = require('json') +local utils = require('graphql.utils') + +local request_batch = {} + +local function iterator_opts_tostring(iterator_opts) + return ('%s,%s'):format( + iterator_opts.iterator or iterator_opts[1] or 'EQ', + iterator_opts.limit or '') +end + +--- List of strings, each uniquely identifies select request in the batch. +local function batch_select_ids(self, skip_function) + local ids = {} + + local collection_name = self.collection_name + local index_name = self.index_name or '' + local iterator_opts_str = iterator_opts_tostring(self.iterator_opts) + + for _, key in ipairs(self.keys) do + local key_str + if type(key) ~= 'table' then + key_str = tostring(key) + else + assert(utils.is_array(key), 'compound key must be an array') + key_str = table.concat(key, ',') + end + local id = ('%s.%s.%s.%s'):format(collection_name, index_name, key_str, + iterator_opts_str) + if skip_function == nil or not skip_function(id) then + table.insert(ids, id) + end + end + + return ids +end + +--- String uniquely identifies the batch information except keys. +local function batch_bin(self) + return ('%s.%s.%s'):format( + self.collection_name, + self.index_name or '', + iterator_opts_tostring(self.iterator_opts)) +end + +--- Compare batches by bin. +local function batch_compare_bins(self, other) + return self.collection_name == other.collection_name and + self.index_name == other.index_name and + utils.are_tables_same(self.iterator_opts, other.iterator_opts) +end + +--- Compare batches by bin with detailed error in case they don't match. +local function batch_compare_bins_extra(self, other) + if self.collection_name ~= other.collection_name then + local err = ('prepared object list has different collection names: ' .. + '"%s" and "%s"'):format(self.collection_name, other.collection_name) + return false, err + end + + if self.index_name ~= other.index_name then + local err = ('prepared object list has different index names: ' .. + '"%s" and "%s"'):format(tostring(self.index_name), + tostring(other.index_name)) + return false, err + end + + if not utils.are_tables_same(self.iterator_opts, other.iterator_opts) then + local err = ('prepared object list has different iterator options: ' .. + '"%s" and "%s"'):format(json.encode(self.iterator_opts), + json.encode(other.iterator_opts)) + return false, err + end + + return true +end + +local request_batch_mt = { + __index = { + bin = batch_bin, + select_ids = batch_select_ids, + compare_bins = batch_compare_bins, + compare_bins_extra = batch_compare_bins_extra, + } +} + +function request_batch.from_prepared_resolve(prepared_resolve) + assert(not prepared_resolve.is_calculated) + local prepared_select = prepared_resolve.prepared_select + local request_opts = prepared_select.request_opts + local collection_name = prepared_select.collection_name + local index_name = request_opts.index_name + local key = request_opts.index_value or box.NULL + local iterator_opts = request_opts.iterator_opts + + return setmetatable({ + collection_name = collection_name, + index_name = index_name, + keys = {key}, + iterator_opts = iterator_opts or {}, + }, request_batch_mt) +end + +function request_batch.new(collection_name, index_name, keys, iterator_opts) + return setmetatable({ + collection_name = collection_name, + index_name = index_name, + keys = keys, + iterator_opts = iterator_opts or {}, + }, request_batch_mt) +end + +return request_batch diff --git a/graphql/server/server.lua b/graphql/server/server.lua index 999ef93..4f1a2ec 100644 --- a/graphql/server/server.lua +++ b/graphql/server/server.lua @@ -43,7 +43,7 @@ local function static_handler(req) } end -function server.init(graphql, host, port) +function server.init(graphql, host, port, compile_opts) local host = host or '127.0.0.1' local port = port or 8080 local httpd = require('http.server').new(host, port) @@ -103,7 +103,7 @@ function server.init(graphql, host, port) local traceback local ok, compiled_query = xpcall(function() - return graphql:compile(query) + return graphql:compile(query, compile_opts) end, function(err) traceback = debug.traceback() return err diff --git a/graphql/statistics.lua b/graphql/statistics.lua new file mode 100644 index 0000000..ab2d0ec --- /dev/null +++ b/graphql/statistics.lua @@ -0,0 +1,101 @@ +local utils = require('graphql.utils') +local error_codes = require('graphql.error_codes') + +local check = utils.check +local e = error_codes + +local statistics = {} + +--- Count fetch event. +local function objects_fetched(self, info) + check(self, 'self', 'table') + check(info, 'info', 'table') + local fetches_cnt = info.fetches_cnt + local fetched_objects_cnt = info.fetched_objects_cnt + local full_scan_cnt = info.full_scan_cnt + local index_lookup_cnt = info.index_lookup_cnt + check(fetches_cnt, 'fetches_cnt', 'number') + check(fetched_objects_cnt, 'fetched_objects_cnt', 'number') + check(full_scan_cnt, 'full_scan_cnt', 'number') + check(index_lookup_cnt, 'index_lookup_cnt', 'number') + + -- count fetches and fetched objects + self.fetches_cnt = self.fetches_cnt + fetches_cnt + self.fetched_object_cnt = self.fetched_object_cnt + fetched_objects_cnt + + -- count full scan and index select request + self.full_scan_cnt = self.full_scan_cnt + full_scan_cnt + self.index_lookup_cnt = self.index_lookup_cnt + index_lookup_cnt + + if self.limits.fetched_object_cnt_max == nil then + return + end + if self.fetched_object_cnt > self.limits.fetched_object_cnt_max then + error(e.fetched_objects_limit_exceeded( + ('fetched objects count (%d) exceeds fetched_object_cnt_max ' .. + 'limit (%d)'):format(self.fetched_object_cnt, + self.limits.fetched_object_cnt_max))) + end +end + +--- Count retire event. +local function objects_retired(self, info) + check(self, 'self', 'table') + check(info, 'info', 'table') + local retired_objects_cnt = info.retired_objects_cnt + check(retired_objects_cnt, 'retired_objects_cnt', 'number') + + self.resulting_object_cnt = self.resulting_object_cnt + retired_objects_cnt + + if self.limits.resulting_object_cnt_max == nil then + return + end + if self.resulting_object_cnt > self.limits.resulting_object_cnt_max then + error(e.resulting_objects_limit_exceeded( + ('resulting objects count (%d) exceeds resulting_object_cnt_max ' .. + 'limit (%d)'):format(self.resulting_object_cnt, + self.limits.resulting_object_cnt_max))) + end +end + +--- Count cache hit / miss event. +local function cache_lookup(self, info) + check(self, 'self', 'table') + check(info, 'info', 'table') + local cache_hits_cnt = info.cache_hits_cnt + local cache_hit_objects_cnt = info.cache_hit_objects_cnt + check(cache_hits_cnt, 'cache_hits_cnt', 'number') + check(cache_hit_objects_cnt, 'cache_hit_objects_cnt', 'number') + + self.cache_hits_cnt = self.cache_hits_cnt + cache_hits_cnt + self.cache_hit_objects_cnt = self.cache_hit_objects_cnt + + cache_hit_objects_cnt +end + +function statistics.new(opts) + local opts = opts or {} + local resulting_object_cnt_max = opts.resulting_object_cnt_max + local fetched_object_cnt_max = opts.fetched_object_cnt_max + + return setmetatable({ + resulting_object_cnt = 0, -- retire + fetches_cnt = 0, -- fetch + fetched_object_cnt = 0, -- fetch + full_scan_cnt = 0, -- fetch + index_lookup_cnt = 0, -- fetch + cache_hits_cnt = 0, -- cache lookup + cache_hit_objects_cnt = 0, -- cache lookup + limits = { + resulting_object_cnt_max = resulting_object_cnt_max, -- retire limit + fetched_object_cnt_max = fetched_object_cnt_max, -- fetch limit + } + }, { + __index = { + objects_fetched = objects_fetched, + objects_retired = objects_retired, + cache_lookup = cache_lookup, + } + }) +end + +return statistics diff --git a/graphql/storage.lua b/graphql/storage.lua new file mode 100644 index 0000000..ba4aca0 --- /dev/null +++ b/graphql/storage.lua @@ -0,0 +1,45 @@ +local log = require('log') +local json = require('json') +-- local yaml = require('yaml') + +local storage = {} + +local function batch_select(space_name, index_name, keys, opts) + log.info(('batch_select(%s, %s, %s, %s)'):format( + space_name, index_name or 'nil', json.encode(keys), + json.encode(opts))) + + local index = index_name == nil and + box.space[space_name].index[0] or + box.space[space_name].index[index_name] + local results = {} + + for _, key in ipairs(keys) do + -- log.info('batch_select key:\n' .. json.encode(key)) + local tuples = index:select(key, opts) + -- log.info('batch_select tuples:\n' .. yaml.encode(tuples)) + table.insert(results, tuples) + end + + -- log.info('batch_select result:\n' .. yaml.encode(results)) + return results +end + +function storage.functions() + return { + batch_select = batch_select, + } +end + +function storage.init() + for k, v in pairs(storage.functions()) do + _G[k] = v + end +end + +-- declare globals for require('strict').on() +for k, _ in pairs(storage.functions()) do + _G[k] = nil +end + +return storage diff --git a/graphql/utils.lua b/graphql/utils.lua index 2e946b2..50170a0 100644 --- a/graphql/utils.lua +++ b/graphql/utils.lua @@ -29,6 +29,10 @@ function utils.is_subtable(t, sub) return true end +function utils.are_tables_same(t1, t2) + return utils.is_subtable(t1, t2) and utils.is_subtable(t2, t1) +end + --- Check whether table is an array. --- --- Based on [that][1] implementation. @@ -181,23 +185,16 @@ function utils.check(obj, obj_name, type_1, type_2, type_3) end if type_3 ~= nil then - error(('%s must be a %s or a % or a %, got %s'):format(obj_name, type_1, - type_2, type_3, type(obj))) + error(('%s must be a %s or a % or a %s, got %s'):format(obj_name, + type_1, type_2, type_3, type(obj))) elseif type_2 ~= nil then - error(('%s must be a %s or a %, got %s'):format(obj_name, type_1, - type_2, type(obj))) + error(('%s must be a %s or a %s, got %s'):format(obj_name, type_1, + type_2, type(obj))) else error(('%s must be a %s, got %s'):format(obj_name, type_1, type(obj))) end end ---- Check if given table has only one specific key. -function utils.has_only(t, key) - local fst_key = next(t) - local snd_key = next(t, fst_key) - return fst_key == key and snd_key == nil -end - function utils.table_size(t) local count = 0 for _, _ in pairs(t) do @@ -276,4 +273,37 @@ function utils.serialize_error(err, traceback) return res end +--- Append all elements of the list `tail` to the end of list `list`. +--- +--- @tparam table list list to add elements to +--- @tparam table tail list to add elements from +function utils.expand_list(list, tail) + for _, item in ipairs(tail) do + table.insert(list, item) + end +end + +--- Add a debug print to the log enabled by an environment variable. +--- +--- @param data_or_func (string or function) data to print or a function that +--- will be return the data to print; the function called only if the +--- environment variable toogle is enabled +--- +--- @param ... parameters of the function `data_or_func` +--- +--- @return nothing +function utils.debug(data_or_func, ...) + if (os.getenv('TARANTOOL_GRAPHQL_DEBUG') or ''):len() > 0 then + local data + if type(data_or_func) == 'function' then + data = data_or_func(...) + else + data = data_or_func + assert(select('#', ...) == 0) + end + assert(type(data) == 'string') + log.info('DEBUG: %s', data) + end +end + return utils diff --git a/test/bench/bench.lua b/test/bench/bench.lua index d277087..e62959c 100644 --- a/test/bench/bench.lua +++ b/test/bench/bench.lua @@ -49,6 +49,7 @@ local function workload(shard, bench_prepare, bench_iter, opts) -- first iteration; print result and update checksum local result = bench_iter(state) + local statistics = result.meta.statistics local result_str = yaml.encode(result.data) checksum:update(result_str .. '1') @@ -88,6 +89,7 @@ local function workload(shard, bench_prepare, bench_iter, opts) duration_successive = duration, latency_successive_avg = latency_avg, rps_successive_avg = rps_avg, + statistics = statistics, } end @@ -98,13 +100,33 @@ local function write_result(test_name, conf_name, bench_result, to_file) result_name = ('%s.%s'):format(result_name, result_suffix) end + local metrics = { + 'duration_successive', + 'latency_successive_avg', + 'rps_successive_avg', + 'statistics.resulting_object_cnt', + 'statistics.fetches_cnt', + 'statistics.fetched_object_cnt', + 'statistics.full_scan_cnt', + 'statistics.index_lookup_cnt', + 'statistics.cache_hits_cnt', + 'statistics.cache_hit_objects_cnt', + } + local result = '' - result = result .. ('%s.duration_successive: %f\n'):format( - result_name, bench_result.duration_successive) - result = result .. ('%s.latency_successive_avg: %f\n'):format( - result_name, bench_result.latency_successive_avg) - result = result .. ('%s.rps_successive_avg: %f\n'):format( - result_name, bench_result.rps_successive_avg) + for _, metric in ipairs(metrics) do + local value + local value_type + if metric:startswith('statistics.') then + value = bench_result.statistics[metric:gsub('^.-%.', '')] + value_type = '%d' + else + value = bench_result[metric] + value_type = '%f' + end + result = result .. ('%s.%s: ' .. value_type .. '\n'):format( + result_name, metric, value) + end if not to_file then print(result) @@ -160,9 +182,7 @@ end function bench.bench_prepare_helper(testdata, shard, meta) testdata.fill_test_data(shard or box.space, meta) return test_utils.graphql_from_testdata(testdata, shard, { - graphql_opts = { - timeout_ms = graphql.TIMEOUT_INFINITY, - } + timeout_ms = graphql.TIMEOUT_INFINITY, }) end diff --git a/test/bench/forking-2-1-1.test.lua b/test/bench/forking-2-1-1.test.lua new file mode 100755 index 0000000..cd57eca --- /dev/null +++ b/test/bench/forking-2-1-1.test.lua @@ -0,0 +1,77 @@ +#!/usr/bin/env tarantool + +-- requires +-- -------- + +local fio = require('fio') + +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") + :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path + +local bench = require('test.bench.bench') +local testdata = require('test.testdata.bench_testdata') + +-- functions +-- --------- + +local function bench_prepare(state) + local meta = testdata.meta or testdata.get_test_metadata() + state.gql_wrapper = bench.bench_prepare_helper(testdata, state.shard, meta) + local query = [[ + query match_by_user_and_passport_id_and_equipment_id( + $user_id: String + $passport_id: String + $equipment_id: String + ) { + user( + user_id: $user_id + user_to_passport_c: {passport_id: $passport_id} + user_to_equipment_c: {equipment_id: $equipment_id} + ) { + user_id + first_name + middle_name + last_name + user_to_passport_c { + passport_id + } + user_to_equipment_c { + equipment_id + } + } + } + ]] + state.variables = { + user_id = 'user_id_42', + passport_id = 'passport_id_42', + equipment_id = 'equipment_id_42', + } + state.gql_query = state.gql_wrapper:compile(query) +end + +local function bench_iter(state) + return state.gql_query:execute(state.variables) +end + +-- run +-- --- + +box.cfg({}) + +bench.run('forking-2-1-1', { + init_function = testdata.init_spaces, + cleanup_function = testdata.drop_spaces, + bench_prepare = bench_prepare, + bench_iter = bench_iter, + iterations = { + space = 100000, + shard = 10000, + }, + checksums = { + space = 2631928398, + shard = 2570197652, + }, +}) + +os.exit() diff --git a/test/bench/forking-2-100-1.test.lua b/test/bench/forking-2-100-1.test.lua new file mode 100755 index 0000000..b1f54ad --- /dev/null +++ b/test/bench/forking-2-100-1.test.lua @@ -0,0 +1,74 @@ +#!/usr/bin/env tarantool + +-- requires +-- -------- + +local fio = require('fio') + +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") + :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path + +local bench = require('test.bench.bench') +local testdata = require('test.testdata.bench_testdata') + +-- functions +-- --------- + +local function bench_prepare(state) + local meta = testdata.meta or testdata.get_test_metadata() + state.gql_wrapper = bench.bench_prepare_helper(testdata, state.shard, meta) + local query = [[ + query match_by_passport_id_and_equipment_id( + $passport_id: String + $equipment_id: String + ) { + user( + user_to_passport_c: {passport_id: $passport_id} + user_to_equipment_c: {equipment_id: $equipment_id} + ) { + user_id + first_name + middle_name + last_name + user_to_passport_c { + passport_id + } + user_to_equipment_c { + equipment_id + } + } + } + ]] + state.variables = { + passport_id = 'passport_id_42', + equipment_id = 'equipment_id_42', + } + state.gql_query = state.gql_wrapper:compile(query) +end + +local function bench_iter(state) + return state.gql_query:execute(state.variables) +end + +-- run +-- --- + +box.cfg({}) + +bench.run('forking-2-100-1', { + init_function = testdata.init_spaces, + cleanup_function = testdata.drop_spaces, + bench_prepare = bench_prepare, + bench_iter = bench_iter, + iterations = { + space = 10000, + shard = 1000, + }, + checksums = { + space = 2570197652, + shard = 3470604763, + }, +}) + +os.exit() diff --git a/test/bench/forking-3-1-1-1.test.lua b/test/bench/forking-3-1-1-1.test.lua new file mode 100755 index 0000000..bdb2046 --- /dev/null +++ b/test/bench/forking-3-1-1-1.test.lua @@ -0,0 +1,80 @@ +#!/usr/bin/env tarantool + +-- requires +-- -------- + +local fio = require('fio') + +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") + :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path + +local bench = require('test.bench.bench') +local testdata = require('test.testdata.bench_testdata') + +-- functions +-- --------- + +local function bench_prepare(state) + local meta = testdata.meta or testdata.get_test_metadata() + state.gql_wrapper = bench.bench_prepare_helper(testdata, state.shard, meta) + local query = [[ + query match_by_user_and_passport_and_equipment( + $user_id: String, $number: String + ) { + user( + user_id: $user_id, + user_to_passport_c: {passport_c: {number: $number}} + user_to_equipment_c: {equipment_c: {number: $number}} + ) { + user_id + first_name + middle_name + last_name + user_to_passport_c { + passport_c { + passport_id + number + } + } + user_to_equipment_c { + equipment_c { + equipment_id + number + } + } + } + } + ]] + state.variables = { + user_id = 'user_id_42', + number = 'number_42', + } + state.gql_query = state.gql_wrapper:compile(query) +end + +local function bench_iter(state) + return state.gql_query:execute(state.variables) +end + +-- run +-- --- + +box.cfg({}) + +bench.run('forking-3-1-1-1', { + init_function = testdata.init_spaces, + cleanup_function = testdata.drop_spaces, + bench_prepare = bench_prepare, + bench_iter = bench_iter, + iterations = { + space = 100000, + shard = 10000, + }, + checksums = { + space = 3712855152, + shard = 2948309431, + }, +}) + +os.exit() diff --git a/test/bench/forking-3-100-100-1.test.lua b/test/bench/forking-3-100-100-1.test.lua new file mode 100755 index 0000000..06de619 --- /dev/null +++ b/test/bench/forking-3-100-100-1.test.lua @@ -0,0 +1,76 @@ +#!/usr/bin/env tarantool + +-- requires +-- -------- + +local fio = require('fio') + +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") + :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path + +local bench = require('test.bench.bench') +local testdata = require('test.testdata.bench_testdata') + +-- functions +-- --------- + +local function bench_prepare(state) + local meta = testdata.meta or testdata.get_test_metadata() + state.gql_wrapper = bench.bench_prepare_helper(testdata, state.shard, meta) + local query = [[ + query match_by_passport_and_equipment($number: String) { + user( + user_to_passport_c: {passport_c: {number: $number}} + user_to_equipment_c: {equipment_c: {number: $number}} + ) { + user_id + first_name + middle_name + last_name + user_to_passport_c { + passport_c { + passport_id + number + } + } + user_to_equipment_c { + equipment_c { + equipment_id + number + } + } + } + } + ]] + state.variables = { + number = 'number_42', + } + state.gql_query = state.gql_wrapper:compile(query) +end + +local function bench_iter(state) + return state.gql_query:execute(state.variables) +end + +-- run +-- --- + +box.cfg({}) + +bench.run('forking-3-100-100-1', { + init_function = testdata.init_spaces, + cleanup_function = testdata.drop_spaces, + bench_prepare = bench_prepare, + bench_iter = bench_iter, + iterations = { + space = 10000, + shard = 1000, + }, + checksums = { + space = 2948309431, + shard = 3663249523, + }, +}) + +os.exit() diff --git a/test/bench/suite.ini b/test/bench/suite.ini index 59f70ba..23f743d 100644 --- a/test/bench/suite.ini +++ b/test/bench/suite.ini @@ -4,6 +4,10 @@ description = microbenchmarking config = suite.cfg is_parallel = False long_run = + forking-2-100-1.test.lua + forking-2-1-1.test.lua + forking-3-100-100-1.test.lua + forking-3-1-1-1.test.lua nesting-1-100.test.lua nesting-1-1.test.lua nesting-2-100-1.test.lua diff --git a/test/common/limit_result.test.lua b/test/common/limit_result.test.lua index e017714..ed32cfe 100755 --- a/test/common/limit_result.test.lua +++ b/test/common/limit_result.test.lua @@ -10,19 +10,9 @@ local tap = require('tap') local test_utils = require('test.test_utils') local testdata = require('test.testdata.user_order_item_testdata') local graphql = require('graphql') -local graphql_utils = require('graphql.utils') -local test_run = graphql_utils.optional_require('test_run') -test_run = test_run and test_run.new() local e = graphql.error_codes -local apply_settings_to = test_run and test_run:get_cfg('apply_settings_to') or - 'graphql' -local settings = { - resulting_object_cnt_max = 3, - fetched_object_cnt_max = 5, -} - local function run_queries(gql_wrapper) local test = tap.test('result cnt') test:plan(2) @@ -42,8 +32,9 @@ local function run_queries(gql_wrapper) } ]] - local query_opts = apply_settings_to == 'query' and settings or nil - local gql_query = gql_wrapper:compile(query, query_opts) + local gql_query = gql_wrapper:compile(query, { + resulting_object_cnt_max = 3, + }) local variables = { user_id = 5, } @@ -57,7 +48,10 @@ local function run_queries(gql_wrapper) test:is_deeply({err, code}, {exp_err, e.RESULTING_OBJECTS_LIMIT_EXCEEDED}, 'resulting_object_cnt_max test') - variables = { + local gql_query = gql_wrapper:compile(query, { + fetched_object_cnt_max = 5, + }) + local variables = { user_id = 5, description = "no such description" } @@ -76,10 +70,8 @@ end box.cfg({}) -local graphql_opts = apply_settings_to == 'graphql' and settings or nil test_utils.run_testdata(testdata, { run_queries = run_queries, - graphql_opts = graphql_opts, }) os.exit() diff --git a/test/common/query_timeout.test.lua b/test/common/query_timeout.test.lua index 0d24e8b..dbc7baa 100755 --- a/test/common/query_timeout.test.lua +++ b/test/common/query_timeout.test.lua @@ -16,9 +16,9 @@ test_run = test_run and test_run.new() local e = graphql.error_codes -local apply_settings_to = test_run and test_run:get_cfg('apply_settings_to') or +local apply_timeout_to = test_run and test_run:get_cfg('apply_timeout_to') or 'graphql' -local settings = { +local test_graphql_opts = { timeout_ms = 0.001, } @@ -41,7 +41,7 @@ local function run_queries(gql_wrapper) } ]] - local query_opts = apply_settings_to == 'query' and settings or nil + local query_opts = apply_timeout_to == 'query' and test_graphql_opts or nil local gql_query = gql_wrapper:compile(query, query_opts) local variables = {} local result = gql_query:execute(variables) @@ -57,7 +57,7 @@ end box.cfg({}) -local graphql_opts = apply_settings_to == 'graphql' and settings or nil +local graphql_opts = apply_timeout_to == 'graphql' and test_graphql_opts or nil test_utils.run_testdata(testdata, { run_queries = run_queries, graphql_opts = graphql_opts, diff --git a/test/common/suite.cfg b/test/common/suite.cfg index e9b97c2..c2a37ee 100644 --- a/test/common/suite.cfg +++ b/test/common/suite.cfg @@ -1,23 +1,126 @@ { "query_timeout.test.lua": { - "space (g)": {"conf": "space", "apply_settings_to": "graphql"}, - "shard_2x2 (g)": {"conf": "shard_2x2", "apply_settings_to": "graphql"}, - "shard_4x1 (g)": {"conf": "shard_4x1", "apply_settings_to": "graphql"}, - "space (q)": {"conf": "space", "apply_settings_to": "query"}, - "shard_2x2 (q)": {"conf": "shard_2x2", "apply_settings_to": "query"}, - "shard_4x1 (q)": {"conf": "shard_4x1", "apply_settings_to": "query"} - }, - "limit_result.test.lua": { - "space (g)": {"conf": "space", "apply_settings_to": "graphql"}, - "shard_2x2 (g)": {"conf": "shard_2x2", "apply_settings_to": "graphql"}, - "shard_4x1 (g)": {"conf": "shard_4x1", "apply_settings_to": "graphql"}, - "space (q)": {"conf": "space", "apply_settings_to": "query"}, - "shard_2x2 (q)": {"conf": "shard_2x2", "apply_settings_to": "query"}, - "shard_4x1 (q)": {"conf": "shard_4x1", "apply_settings_to": "query"} + "space (dfs, g)": { + "conf": "space", + "apply_timeout_to": "graphql", + "graphql_opts": { + "use_bfs_executor": "never" + } + }, + "space (bfs, g)": { + "conf": "space", + "apply_timeout_to": "graphql", + "graphql_opts": { + "use_bfs_executor": "always" + } + }, + "shard_2x2 (dfs, g)": { + "conf": "shard_2x2", + "apply_timeout_to": "graphql", + "graphql_opts": { + "use_bfs_executor": "never" + } + }, + "shard_2x2 (bfs, g)": { + "conf": "shard_2x2", + "apply_timeout_to": "graphql", + "graphql_opts": { + "use_bfs_executor": "always" + } + }, + "shard_4x1 (dfs, g)": { + "conf": "shard_4x1", + "apply_timeout_to": "graphql", + "graphql_opts": { + "use_bfs_executor": "never" + } + }, + "shard_4x1 (bfs, g)": { + "conf": "shard_4x1", + "apply_timeout_to": "graphql", + "graphql_opts": { + "use_bfs_executor": "always" + } + }, + "space (dfs, q)": { + "conf": "space", + "apply_timeout_to": "query", + "graphql_opts": { + "use_bfs_executor": "never" + } + }, + "space (bfs, q)": { + "conf": "space", + "apply_timeout_to": "query", + "graphql_opts": { + "use_bfs_executor": "always" + } + }, + "shard_2x2 (dfs, q)": { + "conf": "shard_2x2", + "apply_timeout_to": "query", + "graphql_opts": { + "use_bfs_executor": "never" + } + }, + "shard_2x2 (bfs, q)": { + "conf": "shard_2x2", + "apply_timeout_to": "query", + "graphql_opts": { + "use_bfs_executor": "always" + } + }, + "shard_4x1 (dfs, q)": { + "conf": "shard_4x1", + "apply_timeout_to": "query", + "graphql_opts": { + "use_bfs_executor": "never" + } + }, + "shard_4x1 (bfs, q)": { + "conf": "shard_4x1", + "apply_timeout_to": "query", + "graphql_opts": { + "use_bfs_executor": "always" + } + } }, "*": { - "space": {"conf": "space"}, - "shard_2x2": {"conf": "shard_2x2"}, - "shard_4x1": {"conf": "shard_4x1"} + "space (dfs)": { + "conf": "space", + "graphql_opts": { + "use_bfs_executor": "never" + } + }, + "space (bfs)": { + "conf": "space", + "graphql_opts": { + "use_bfs_executor": "always" + } + }, + "shard_2x2 (dfs)": { + "conf": "shard_2x2", + "graphql_opts": { + "use_bfs_executor": "never" + } + }, + "shard_2x2 (bfs)": { + "conf": "shard_2x2", + "graphql_opts": { + "use_bfs_executor": "always" + } + }, + "shard_4x1 (dfs)": { + "conf": "shard_4x1", + "graphql_opts": { + "use_bfs_executor": "never" + } + }, + "shard_4x1 (bfs)": { + "conf": "shard_4x1", + "graphql_opts": { + "use_bfs_executor": "always" + } + } } } diff --git a/test/extra/connections_1_1_name_clash.test.lua b/test/extra/connections_1_1_name_clash.test.lua index e51aca7..936c5c3 100755 --- a/test/extra/connections_1_1_name_clash.test.lua +++ b/test/extra/connections_1_1_name_clash.test.lua @@ -1,7 +1,8 @@ #!/usr/bin/env tarantool + local fio = require('fio') local json = require('json') -local test = require('tap').test('connections 1:1 name clash') +local tap = require('tap') -- require in-repo version of graphql/ sources despite current working directory package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") @@ -9,8 +10,12 @@ package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") package.path local graphql = require('graphql') +local utils = require('graphql.utils') +local test_utils = require('test.test_utils') + +local test = tap.test('connections 1:1 name clash') -box.cfg{ wal_mode="none" } +box.cfg{wal_mode="none"} test:plan(3) local schemas = json.decode([[{ @@ -181,13 +186,13 @@ local indexes = { } } -local gql_wrapper_1 = graphql.new({ +local gql_wrapper_1 = graphql.new(utils.merge_tables({ schemas = schemas, collections = collections_1, service_fields = service_fields, indexes = indexes, - accessor = 'space' -}) + accessor = 'space', +}, test_utils.test_conf_graphql_opts())) test:isnt(gql_wrapper_1, nil) @@ -252,13 +257,13 @@ local collections_2 = json.decode([[{ } }]]) -local gql_wrapper_2 = graphql.new({ +local gql_wrapper_2 = graphql.new(utils.merge_tables({ schemas = schemas, collections = collections_2, service_fields = service_fields, indexes = indexes, - accessor = 'space' -}) + accessor = 'space', +}, test_utils.test_conf_graphql_opts())) test:isnt(gql_wrapper_2, nil) @@ -336,13 +341,13 @@ local collections_3 = json.decode([[{ } }]]) -local gql_wrapper_3 = graphql.new({ +local gql_wrapper_3 = graphql.new(utils.merge_tables({ schemas = schemas, collections = collections_3, service_fields = service_fields, indexes = indexes, - accessor = 'space' -}) + accessor = 'space', +}, test_utils.test_conf_graphql_opts())) test:isnt(gql_wrapper_3, nil) test:check() diff --git a/test/extra/suite.cfg b/test/extra/suite.cfg new file mode 100644 index 0000000..375087d --- /dev/null +++ b/test/extra/suite.cfg @@ -0,0 +1,14 @@ +{ + "*": { + "dfs": { + "graphql_opts": { + "use_bfs_executor": "never" + } + }, + "bfs": { + "graphql_opts": { + "use_bfs_executor": "always" + } + } + } +} diff --git a/test/extra/suite.ini b/test/extra/suite.ini index e6b0976..6cfb601 100644 --- a/test/extra/suite.ini +++ b/test/extra/suite.ini @@ -2,3 +2,4 @@ core = app description = tests on features which are not related to specific executor is_parallel = True +config = suite.cfg diff --git a/test/extra/to_avro_arrays.test.lua b/test/extra/to_avro_arrays.test.lua index 32cc7f0..0fd1847 100755 --- a/test/extra/to_avro_arrays.test.lua +++ b/test/extra/to_avro_arrays.test.lua @@ -1,17 +1,21 @@ #!/usr/bin/env tarantool + local fio = require('fio') local yaml = require('yaml') local avro = require('avro_schema') -local test = require('tap').test('to avro schema') +local tap = require('tap') -- require in-repo version of graphql/ sources despite current working directory package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path +local utils = require('graphql.utils') +local graphql = require('graphql') +local test_utils = require('test.test_utils') local testdata = require('test.testdata.array_and_map_testdata') -local graphql = require('graphql') +local test = tap.test('to avro schema') box.cfg{wal_mode="none"} test:plan(4) @@ -20,18 +24,13 @@ testdata.init_spaces() testdata.fill_test_data() local meta = testdata.get_test_metadata() -local accessor = graphql.accessor_space.new({ +local gql_wrapper = graphql.new(utils.merge_tables({ schemas = meta.schemas, collections = meta.collections, - service_fields = meta.service_fields, indexes = meta.indexes, -}) - -local gql_wrapper = graphql.new({ - schemas = meta.schemas, - collections = meta.collections, - accessor = accessor, -}) + service_fields = meta.service_fields, + accessor = 'space', +}, test_utils.test_conf_graphql_opts())) -- We do not select `customer_balances` and `favorite_holidays` because thay are -- is of `Map` type, which is not supported. diff --git a/test/extra/to_avro_directives.test.lua b/test/extra/to_avro_directives.test.lua index 22d651e..59e49a9 100755 --- a/test/extra/to_avro_directives.test.lua +++ b/test/extra/to_avro_directives.test.lua @@ -1,17 +1,23 @@ #!/usr/bin/env tarantool + local fio = require('fio') local yaml = require('yaml') local avro = require('avro_schema') -local test = require('tap').test('to avro schema') +local tap = require('tap') -- require in-repo version of graphql/ sources despite current working directory package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") - :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path + :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. + package.path +local graphql = require('graphql') +local utils = require('graphql.utils') +local test_utils = require('test.test_utils') local common_testdata = require('test.testdata.common_testdata') local union_testdata = require('test.testdata.union_testdata') local multihead_testdata = require('test.testdata.multihead_conn_testdata') -local graphql = require('graphql') + +local test = tap.test('to avro schema') test:plan(15) @@ -23,13 +29,13 @@ common_testdata.init_spaces() local common_meta = common_testdata.get_test_metadata() common_testdata.fill_test_data(box.space, common_meta) -local gql_wrapper = graphql.new({ +local gql_wrapper = graphql.new(utils.merge_tables({ schemas = common_meta.schemas, collections = common_meta.collections, service_fields = common_meta.service_fields, indexes = common_meta.indexes, accessor = 'space' -}) +}, test_utils.test_conf_graphql_opts())) local common_query = [[ query order_by_id($order_id: String, $include_description: Boolean, @@ -187,13 +193,13 @@ union_testdata.init_spaces() local union_meta = union_testdata.get_test_metadata() union_testdata.fill_test_data(box.space, union_meta) -local gql_wrapper = graphql.new({ +local gql_wrapper = graphql.new(utils.merge_tables({ schemas = union_meta.schemas, collections = union_meta.collections, service_fields = union_meta.service_fields, indexes = union_meta.indexes, accessor = 'space' -}) +}, test_utils.test_conf_graphql_opts())) local union_query = [[ query user_collection ($include_stuff: Boolean) { @@ -352,13 +358,13 @@ multihead_testdata.init_spaces() local multihead_meta = multihead_testdata.get_test_metadata() multihead_testdata.fill_test_data(box.space, multihead_meta) -local gql_wrapper = graphql.new({ +local gql_wrapper = graphql.new(utils.merge_tables({ schemas = multihead_meta.schemas, collections = multihead_meta.collections, service_fields = multihead_meta.service_fields, indexes = multihead_meta.indexes, accessor = 'space' -}) +}, test_utils.test_conf_graphql_opts())) local multihead_query = [[ query obtainHeroes($hero_id: String, $include_connections: Boolean) { diff --git a/test/extra/to_avro_huge.test.lua b/test/extra/to_avro_huge.test.lua index 4f824c1..264f3a8 100755 --- a/test/extra/to_avro_huge.test.lua +++ b/test/extra/to_avro_huge.test.lua @@ -1,17 +1,21 @@ #!/usr/bin/env tarantool + local fio = require('fio') local yaml = require('yaml') local avro = require('avro_schema') -local test = require('tap').test('to avro schema') +local tap = require('tap') -- require in-repo version of graphql/ sources despite current working directory package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path +local graphql = require('graphql') +local utils = require('graphql.utils') +local test_utils = require('test.test_utils') local data = require('test.testdata.user_order_item_testdata') -local graphql = require('graphql') +local test = tap.test('to avro schema') box.cfg{wal_mode="none"} test:plan(4) @@ -19,18 +23,13 @@ test:plan(4) data.init_spaces() data.fill_test_data(box.space) -local accessor = graphql.accessor_space.new({ +local gql_wrapper = graphql.new(utils.merge_tables({ schemas = data.meta.schemas, collections = data.meta.collections, - service_fields = data.meta.service_fields, indexes = data.meta.indexes, -}) - -local gql_wrapper = graphql.new({ - schemas = data.meta.schemas, - collections = data.meta.collections, - accessor = accessor, -}) + service_fields = data.meta.service_fields, + accessor = 'space', +}, test_utils.test_conf_graphql_opts())) local query = [[ query object_result_max($user_id: Int, $order_id: Int) { diff --git a/test/extra/to_avro_multihead.test.lua b/test/extra/to_avro_multihead.test.lua index 5b2dec3..800a54e 100755 --- a/test/extra/to_avro_multihead.test.lua +++ b/test/extra/to_avro_multihead.test.lua @@ -1,14 +1,21 @@ #!/usr/bin/env tarantool + local fio = require('fio') local yaml = require('yaml') local avro = require('avro_schema') -local test = require('tap').test('to avro schema') -local testdata = require('test.testdata.multihead_conn_with_nulls_testdata') -local graphql = require('graphql') +local tap = require('tap') -- require in-repo version of graphql/ sources despite current working directory package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") - :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path + :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. + package.path + +local graphql = require('graphql') +local utils = require('graphql.utils') +local test_utils = require('test.test_utils') +local testdata = require('test.testdata.multihead_conn_with_nulls_testdata') + +local test = tap.test('to avro schema') test:plan(7) @@ -18,13 +25,13 @@ testdata.init_spaces() local meta = testdata.get_test_metadata() testdata.fill_test_data(box.space, meta) -local gql_wrapper = graphql.new({ +local gql_wrapper = graphql.new(utils.merge_tables({ schemas = meta.schemas, collections = meta.collections, service_fields = meta.service_fields, indexes = meta.indexes, accessor = 'space' -}) +}, test_utils.test_conf_graphql_opts())) local query = [[ query obtainHeroes($hero_id: String) { diff --git a/test/extra/to_avro_nested.test.lua b/test/extra/to_avro_nested.test.lua index 99937af..c1e6704 100755 --- a/test/extra/to_avro_nested.test.lua +++ b/test/extra/to_avro_nested.test.lua @@ -1,17 +1,21 @@ #!/usr/bin/env tarantool + local fio = require('fio') local yaml = require('yaml') local avro = require('avro_schema') -local test = require('tap').test('to avro schema') +local tap = require('tap') -- require in-repo version of graphql/ sources despite current working directory package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path +local graphql = require('graphql') +local utils = require('graphql.utils') +local test_utils = require('test.test_utils') local data = require('test.testdata.nested_record_testdata') -local graphql = require('graphql') +local test = tap.test('to avro schema') box.cfg{wal_mode="none"} test:plan(4) @@ -19,18 +23,13 @@ test:plan(4) data.init_spaces() data.fill_test_data(box.space, data.meta) -local accessor = graphql.accessor_space.new({ +local gql_wrapper = graphql.new(utils.merge_tables({ schemas = data.meta.schemas, collections = data.meta.collections, - service_fields = data.meta.service_fields, indexes = data.meta.indexes, -}) - -local gql_wrapper = graphql.new({ - schemas = data.meta.schemas, - collections = data.meta.collections, - accessor = accessor, -}) + service_fields = data.meta.service_fields, + accessor = 'space', +}, test_utils.test_conf_graphql_opts())) local query = [[ query getUserByUid($uid: Long) { diff --git a/test/extra/to_avro_nullable.test.lua b/test/extra/to_avro_nullable.test.lua index bc24333..99486b5 100755 --- a/test/extra/to_avro_nullable.test.lua +++ b/test/extra/to_avro_nullable.test.lua @@ -1,16 +1,21 @@ #!/usr/bin/env tarantool + local fio = require('fio') local yaml = require('yaml') local avro = require('avro_schema') -local test = require('tap').test('to avro schema') +local tap = require('tap') + -- require in-repo version of graphql/ sources despite current working directory package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path +local graphql = require('graphql') +local utils = require('graphql.utils') +local test_utils = require('test.test_utils') local testdata = require('test.testdata.nullable_index_testdata') -local graphql = require('graphql') +local test = tap.test('to avro schema') box.cfg{wal_mode="none"} test:plan(4) @@ -19,18 +24,13 @@ testdata.init_spaces() local meta = testdata.get_test_metadata() testdata.fill_test_data(box.space, meta) -local accessor = graphql.accessor_space.new({ +local gql_wrapper = graphql.new(utils.merge_tables({ schemas = meta.schemas, collections = meta.collections, - service_fields = meta.service_fields, indexes = meta.indexes, -}) - -local gql_wrapper = graphql.new({ - schemas = meta.schemas, - collections = meta.collections, - accessor = accessor, -}) + service_fields = meta.service_fields, + accessor = 'space', +}, test_utils.test_conf_graphql_opts())) -- We do not select `customer_balances` and `favorite_holidays` because thay are -- is of `Map` type, which is not supported. diff --git a/test/extra/to_avro_unions_and_maps.test.lua b/test/extra/to_avro_unions_and_maps.test.lua index 84ac2e5..04f27a2 100755 --- a/test/extra/to_avro_unions_and_maps.test.lua +++ b/test/extra/to_avro_unions_and_maps.test.lua @@ -1,16 +1,21 @@ #!/usr/bin/env tarantool + local fio = require('fio') local yaml = require('yaml') local avro = require('avro_schema') -local test = require('tap').test('to avro schema') +local tap = require('tap') -- require in-repo version of graphql/ sources despite current working directory package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") - :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path + :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. + package.path +local graphql = require('graphql') +local utils = require('graphql.utils') +local test_utils = require('test.test_utils') local testdata = require('test.testdata.union_testdata') -local graphql = require('graphql') +local test = tap.test('to avro schema') box.cfg{wal_mode="none"} test:plan(3) @@ -19,13 +24,13 @@ testdata.init_spaces() testdata.fill_test_data() local meta = testdata.get_test_metadata() -local gql_wrapper = graphql.new({ +local gql_wrapper = graphql.new(utils.merge_tables({ schemas = meta.schemas, collections = meta.collections, service_fields = meta.service_fields, indexes = meta.indexes, accessor = 'space' -}) +}, test_utils.test_conf_graphql_opts())) local query = [[ query user_collection { diff --git a/test/shard_servers/shard.lua b/test/shard_servers/shard.lua index 821a831..07774fb 100644 --- a/test/shard_servers/shard.lua +++ b/test/shard_servers/shard.lua @@ -1,5 +1,13 @@ #!/usr/bin/env tarantool +local fio = require('fio') + +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") + :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path + +local graphql_storage = require('graphql.storage') + -- get instance name from filename (shard1.lua => 1) local INSTANCE_ID = string.match(arg[0], "%d") local SOCKET_DIR = require('fio').cwd() @@ -8,6 +16,8 @@ local function instance_uri(instance_id) return ('%s/shard%s.sock'):format(SOCKET_DIR, instance_id) end +graphql_storage.init() + -- start console first require('console').listen(os.getenv('ADMIN')) diff --git a/test/shard_servers/shard_tcp.lua b/test/shard_servers/shard_tcp.lua index b7f2a42..de9339c 100644 --- a/test/shard_servers/shard_tcp.lua +++ b/test/shard_servers/shard_tcp.lua @@ -1,5 +1,15 @@ #!/usr/bin/env tarantool +local fio = require('fio') + +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") + :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path + +local graphql_storage = require('graphql.storage') + +graphql_storage.init() + -- start console first require('console').listen(os.getenv('ADMIN')) diff --git a/test/space/complemented_config.test.lua b/test/space/complemented_config.test.lua index d5e3761..166c9fb 100755 --- a/test/space/complemented_config.test.lua +++ b/test/space/complemented_config.test.lua @@ -3,6 +3,7 @@ local tap = require('tap') local yaml = require('yaml') local graphql = require('graphql') +local utils = require('graphql.utils') local test_utils = require('test.test_utils') local connections = { @@ -172,6 +173,8 @@ local function run_queries(gql_wrapper) user_collection: [] order_collection: [] ]]):strip()) + exp_result_1_2 = utils.merge_tables(exp_result_1_2, + test_utils.test_conf_graphql_opts()) test:is_deeply(result_1_2, exp_result_1_2, '1_2') assert(test:check(), 'check plan') @@ -181,7 +184,8 @@ test_utils.show_trace(function() box.cfg { background = false } init_spaces() fill_test_data() - local gql_wrapper = graphql.new({connections = connections}) + local gql_wrapper = graphql.new(utils.merge_tables( + {connections = connections}, test_utils.test_conf_graphql_opts())) run_queries(gql_wrapper) drop_spaces() end) diff --git a/test/space/default_instance.test.lua b/test/space/default_instance.test.lua index 95daaab..dffc1d2 100755 --- a/test/space/default_instance.test.lua +++ b/test/space/default_instance.test.lua @@ -44,7 +44,8 @@ test:plan(5) -- test require('graphql').compile(query) test_utils.show_trace(function() local variables_1 = {user_id = 'user_id_1'} - local compiled_query = gql_lib.compile(query) + local compiled_query = gql_lib.compile(query, + test_utils.test_conf_graphql_opts()) local result = compiled_query:execute(variables_1) local exp_result = yaml.decode(([[ --- @@ -58,7 +59,8 @@ end) -- test require('graphql').execute(query) test_utils.show_trace(function() local variables_2 = {user_id = 'user_id_2'} - local result = gql_lib.execute(query, variables_2) + local result = gql_lib.execute(query, variables_2, nil, + test_utils.test_conf_graphql_opts()) local exp_result = yaml.decode(([[ --- user_collection: @@ -70,7 +72,8 @@ end) -- test server test_utils.show_trace(function() - local res = gql_lib.start_server() + local res = gql_lib.start_server(nil, nil, + test_utils.test_conf_graphql_opts()) local exp_res_start = 'The GraphQL server started at http://127.0.0.1:8080' test:is(res, exp_res_start, 'start_server') diff --git a/test/space/init_fail.test.lua b/test/space/init_fail.test.lua index 7ee1c73..aae6ada 100755 --- a/test/space/init_fail.test.lua +++ b/test/space/init_fail.test.lua @@ -10,6 +10,7 @@ package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") local tap = require('tap') local graphql = require('graphql') local utils = require('graphql.utils') +local test_utils = require('test.test_utils') local testdata = require('test.testdata.compound_index_testdata') -- init box, upload test data and acquire metadata @@ -36,18 +37,13 @@ metadata.collections.order_collection.connections[1].parts[2] = nil -- ---------------------------------- local function create_gql_wrapper(metadata) - local accessor = graphql.accessor_space.new({ + return graphql.new(utils.merge_tables({ schemas = metadata.schemas, collections = metadata.collections, service_fields = metadata.service_fields, indexes = metadata.indexes, - }) - - return graphql.new({ - schemas = metadata.schemas, - collections = metadata.collections, - accessor = accessor, - }) + accessor = 'space', + }, test_utils.test_conf_graphql_opts())) end local test = tap.test('init_fail') diff --git a/test/space/nested_args.test.lua b/test/space/nested_args.test.lua index b6a3689..70860f3 100755 --- a/test/space/nested_args.test.lua +++ b/test/space/nested_args.test.lua @@ -10,6 +10,7 @@ package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") local tap = require('tap') local yaml = require('yaml') local graphql = require('graphql') +local utils = require('graphql.utils') local test_utils = require('test.test_utils') local common_testdata = require('test.testdata.common_testdata') local emails_testdata = require('test.testdata.nullable_1_1_conn_testdata') @@ -49,7 +50,7 @@ local emails_metadata = emails_testdata.get_test_metadata() -- build accessor and graphql schemas -- ---------------------------------- -local common_gql_wrapper = graphql.new({ +local common_gql_wrapper = graphql.new(utils.merge_tables({ schemas = common_metadata.schemas, collections = common_metadata.collections, service_fields = common_metadata.service_fields, @@ -57,9 +58,9 @@ local common_gql_wrapper = graphql.new({ accessor = 'space', -- gh-137: timeout exceeded timeout_ms = 10000, -- 10 seconds -}) +}, test_utils.test_conf_graphql_opts())) -local emails_gql_wrapper = graphql.new({ +local emails_gql_wrapper = graphql.new(utils.merge_tables({ schemas = emails_metadata.schemas, collections = emails_metadata.collections, service_fields = emails_metadata.service_fields, @@ -67,7 +68,7 @@ local emails_gql_wrapper = graphql.new({ accessor = 'space', -- gh-137: timeout exceeded timeout_ms = 10000, -- 10 seconds -}) +}, test_utils.test_conf_graphql_opts())) -- run queries -- ----------- diff --git a/test/space/server.test.lua b/test/space/server.test.lua index 9f0ba67..4b9e8b9 100755 --- a/test/space/server.test.lua +++ b/test/space/server.test.lua @@ -10,6 +10,7 @@ local yaml = require('yaml') local json = require('json') local http = require('http.client').new() local graphql = require('graphql') +local utils = require('graphql.utils') local test_utils = require('test.test_utils') local testdata = require('test.testdata.common_testdata') @@ -31,18 +32,13 @@ local indexes = metadata.indexes -- build accessor and graphql schemas -- ---------------------------------- -local accessor = graphql.accessor_space.new({ +local gql_wrapper = graphql.new(utils.merge_tables({ schemas = schemas, collections = collections, service_fields = service_fields, indexes = indexes, -}) - -local gql_wrapper = graphql.new({ - schemas = schemas, - collections = collections, - accessor = accessor, -}) + accessor = 'space', +}, test_utils.test_conf_graphql_opts())) local test = tap.test('server') test:plan(6) @@ -80,7 +76,8 @@ test_utils.show_trace(function() box.space.order_collection:format({{name='order_id', type='string'}, {name='user_id', type='string'}, {name='description', type='string'}}) - local res = graphql.start_server() + local res = graphql.start_server(nil, nil, + test_utils.test_conf_graphql_opts()) test:is(res, exp_res_start, 'start_server') _, response = pcall(function() diff --git a/test/space/suite.cfg b/test/space/suite.cfg new file mode 100644 index 0000000..375087d --- /dev/null +++ b/test/space/suite.cfg @@ -0,0 +1,14 @@ +{ + "*": { + "dfs": { + "graphql_opts": { + "use_bfs_executor": "never" + } + }, + "bfs": { + "graphql_opts": { + "use_bfs_executor": "always" + } + } + } +} diff --git a/test/space/suite.ini b/test/space/suite.ini index 56e11d1..9ee69b8 100644 --- a/test/space/suite.ini +++ b/test/space/suite.ini @@ -1,4 +1,5 @@ [default] core = app description = tests with space accessor -is_parallel = True +config = suite.cfg +is_parallel = False diff --git a/test/space/unflatten_tuple.test.lua b/test/space/unflatten_tuple.test.lua index 67cf7d0..8cd7d34 100755 --- a/test/space/unflatten_tuple.test.lua +++ b/test/space/unflatten_tuple.test.lua @@ -11,10 +11,10 @@ local tap = require('tap') local yaml = require('yaml') local avro = require('avro_schema') local graphql = require('graphql') +local utils = require('graphql.utils') local test_utils = require('test.test_utils') local testdata = require('test.testdata.common_testdata') -local utils = require('graphql.utils') local check = utils.check -- init box, upload test data and acquire metadata @@ -66,20 +66,16 @@ local function unflatten_tuple(self, collection_name, tuple, default) error('unexpected collection_name: ' .. tostring(collection_name)) end -local accessor = graphql.accessor_space.new({ +local gql_wrapper = graphql.new(utils.merge_tables({ schemas = schemas, collections = collections, - service_fields = service_fields, indexes = indexes, -}, { - unflatten_tuple = unflatten_tuple, -}) - -local gql_wrapper = graphql.new({ - schemas = schemas, - collections = collections, - accessor = accessor, -}) + service_fields = service_fields, + accessor = 'space', + accessor_funcs = { + unflatten_tuple = unflatten_tuple, + }, +}, test_utils.test_conf_graphql_opts())) -- run queries -- ----------- diff --git a/test/space/zero_config.test.lua b/test/space/zero_config.test.lua index 8b37507..c327772 100755 --- a/test/space/zero_config.test.lua +++ b/test/space/zero_config.test.lua @@ -3,6 +3,7 @@ local tap = require('tap') local yaml = require('yaml') local graphql = require('graphql') +local utils = require('graphql.utils') local test_utils = require('test.test_utils') local function init_spaces() @@ -109,6 +110,8 @@ local function run_queries(gql_wrapper) service_fields: user_collection: [] ]]):strip()) + exp_result_1_2 = utils.merge_tables(exp_result_1_2, + test_utils.test_conf_graphql_opts()) test:is_deeply(result_1_2, exp_result_1_2, '1_2') assert(test:check(), 'check plan') @@ -118,7 +121,7 @@ test_utils.show_trace(function() box.cfg { background = false } init_spaces() fill_test_data() - local gql_wrapper = graphql.new() + local gql_wrapper = graphql.new(test_utils.test_conf_graphql_opts()) run_queries(gql_wrapper) drop_spaces() end) diff --git a/test/test_utils.lua b/test/test_utils.lua index 46a60cb..fdfd4a5 100644 --- a/test/test_utils.lua +++ b/test/test_utils.lua @@ -7,6 +7,7 @@ package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") :gsub('/./', '/'):gsub('/+$', '')) .. '/../?.lua' .. ';' .. package.path local log = require('log') +local yaml = require('yaml') local avro_schema = require('avro_schema') local digest = require('digest') local shard = require('shard') @@ -113,8 +114,12 @@ function test_utils.graphql_from_testdata(testdata, shard, graphql_opts) accessor = shard and 'shard' or 'space', } + -- allow to run under tarantool w/o additional opts w/o test-run + local test_conf_graphql_opts = test_run and test_run:get_cfg('graphql_opts') + or {} + local gql_wrapper = graphql.new(utils.merge_tables( - default_graphql_opts, graphql_opts)) + default_graphql_opts, test_conf_graphql_opts, graphql_opts)) return gql_wrapper end @@ -159,7 +164,11 @@ function test_utils.show_trace(func, ...) return select(2, xpcall( function() return func(unpack(args)) end, function(err) - log.info('ERROR: ' .. tostring(err)) + if type(err) == 'string' then + log.info('ERROR: ' .. err) + else + log.info('ERROR:\n' .. yaml.encode(err)) + end log.info(debug.traceback()) end )) @@ -187,4 +196,9 @@ function test_utils.get_shard_key_hash(key) return 1 + digest.guava(num, shards_n) end +function test_utils.test_conf_graphql_opts() + -- allow to run under tarantool w/o additional opts w/o test-run + return test_run and test_run:get_cfg('graphql_opts') or {} +end + return test_utils diff --git a/test/testdata/bench_testdata.lua b/test/testdata/bench_testdata.lua index ea12bf8..e6c915a 100644 --- a/test/testdata/bench_testdata.lua +++ b/test/testdata/bench_testdata.lua @@ -30,6 +30,22 @@ function bench_testdata.get_test_metadata() {"name": "passport_id", "type": "string"}, {"name": "number", "type": "string"} ] + }, + "user_to_equipment": { + "type": "record", + "name": "user_to_equipment", + "fields": [ + {"name": "user_id", "type": "string"}, + {"name": "equipment_id", "type": "string"} + ] + }, + "equipment": { + "type": "record", + "name": "equipment", + "fields": [ + {"name": "equipment_id", "type": "string"}, + {"name": "number", "type": "string"} + ] } }]]) @@ -42,7 +58,22 @@ function bench_testdata.get_test_metadata() "name": "user_to_passport_c", "destination_collection": "user_to_passport", "parts": [ - { "source_field": "user_id", "destination_field": "user_id" } + { + "source_field": "user_id", + "destination_field": "user_id" + } + ], + "index_name": "user_id" + }, + { + "type": "1:1", + "name": "user_to_equipment_c", + "destination_collection": "user_to_equipment", + "parts": [ + { + "source_field": "user_id", + "destination_field": "user_id" + } ], "index_name": "user_id" } @@ -56,15 +87,39 @@ function bench_testdata.get_test_metadata() "name": "passport_c", "destination_collection": "passport", "parts": [ - { "source_field": "passport_id", "destination_field": "passport_id" } + { + "source_field": "passport_id", + "destination_field": "passport_id" + } ], "index_name": "passport_id" } ] }, + "user_to_equipment": { + "schema_name": "user_to_equipment", + "connections": [ + { + "type": "1:1", + "name": "equipment_c", + "destination_collection": "equipment", + "parts": [ + { + "source_field": "equipment_id", + "destination_field": "equipment_id" + } + ], + "index_name": "equipment_id" + } + ] + }, "passport": { "schema_name": "passport", "connections": [] + }, + "equipment": { + "schema_name": "equipment", + "connections": [] } }]]) @@ -72,6 +127,8 @@ function bench_testdata.get_test_metadata() user = {}, user_to_passport = {}, passport = {}, + user_to_equipment = {}, + equipment = {}, } local indexes = { @@ -116,6 +173,38 @@ function bench_testdata.get_test_metadata() primary = true, }, }, + user_to_equipment = { + primary = { + service_fields = {}, + fields = {'user_id', 'equipment_id'}, + index_type = 'tree', + unique = true, + primary = true, + }, + user_id = { + service_fields = {}, + fields = {'user_id'}, + index_type = 'tree', + unique = true, + primary = false, + }, + equipment_id = { + service_fields = {}, + fields = {'equipment_id'}, + index_type = 'tree', + unique = true, + primary = false, + }, + }, + equipment = { + equipment_id = { + service_fields = {}, + fields = {'equipment_id'}, + index_type = 'tree', + unique = true, + primary = true, + }, + }, } return { @@ -131,12 +220,19 @@ function bench_testdata.init_spaces() local U_USER_ID_FN = 1 -- user_to_passport fields - local T_USER_ID_FN = 1 - local T_PASSPORT_ID_FN = 2 + local UTP_USER_ID_FN = 1 + local UTP_PASSPORT_ID_FN = 2 -- passport fields local P_PASSPORT_ID_FN = 1 + -- user_to_equipment fields + local UTE_USER_ID_FN = 1 + local UTE_EQUIPMENT_ID_FN = 2 + + -- equipment fields + local E_EQUIPMENT_ID_FN = 1 + box.once('init_spaces_bench', function() -- user space box.schema.create_space('user') @@ -150,18 +246,18 @@ function bench_testdata.init_spaces() box.schema.create_space('user_to_passport') box.space.user_to_passport:create_index('primary', {type = 'tree', parts = { - T_USER_ID_FN, 'string', - T_PASSPORT_ID_FN, 'string', + UTP_USER_ID_FN, 'string', + UTP_PASSPORT_ID_FN, 'string', }} ) box.space.user_to_passport:create_index('user_id', {type = 'tree', parts = { - T_USER_ID_FN, 'string', + UTP_USER_ID_FN, 'string', }} ) box.space.user_to_passport:create_index('passport_id', {type = 'tree', parts = { - T_PASSPORT_ID_FN, 'string', + UTP_PASSPORT_ID_FN, 'string', }} ) @@ -172,6 +268,33 @@ function bench_testdata.init_spaces() P_PASSPORT_ID_FN, 'string', }} ) + + -- user_to_equipment space + box.schema.create_space('user_to_equipment') + box.space.user_to_equipment:create_index('primary', + {type = 'tree', parts = { + UTE_USER_ID_FN, 'string', + UTE_EQUIPMENT_ID_FN, 'string', + }} + ) + box.space.user_to_equipment:create_index('user_id', + {type = 'tree', parts = { + UTE_USER_ID_FN, 'string', + }} + ) + box.space.user_to_equipment:create_index('equipment_id', + {type = 'tree', parts = { + UTE_EQUIPMENT_ID_FN, 'string', + }} + ) + + -- equipment space + box.schema.create_space('equipment') + box.space.equipment:create_index('equipment_id', + {type = 'tree', parts = { + E_EQUIPMENT_ID_FN, 'string', + }} + ) end) end @@ -194,6 +317,14 @@ function bench_testdata.fill_test_data(shard, meta) passport_id = 'passport_id_' .. s, number = 'number_' .. s, }) + test_utils.replace_object(virtbox, meta, 'user_to_equipment', { + user_id = 'user_id_' .. s, + equipment_id = 'equipment_id_' .. s, + }) + test_utils.replace_object(virtbox, meta, 'equipment', { + equipment_id = 'equipment_id_' .. s, + number = 'number_' .. s, + }) end end @@ -202,6 +333,8 @@ function bench_testdata.drop_spaces() box.space.user:drop() box.space.user_to_passport:drop() box.space.passport:drop() + box.space.user_to_equipment:drop() + box.space.equipment:drop() end return bench_testdata diff --git a/test/testdata/nested_record_testdata.lua b/test/testdata/nested_record_testdata.lua index 4866f23..c04477b 100644 --- a/test/testdata/nested_record_testdata.lua +++ b/test/testdata/nested_record_testdata.lua @@ -89,29 +89,29 @@ end function testdata.run_queries(gql_wrapper) local test = tap.test('nested_record') - test:plan(2) + test:plan(3) local query_1 = [[ - query getUserByUid($uid: Long) { + query getUserByUid($uid: Long, $include_y: Boolean) { user(uid: $uid) { uid p1 p2 nested { x - y + y @include(if: $include_y) } } } ]] - local variables_1 = {uid = 5} - local result_1 = test_utils.show_trace(function() - local gql_query_1 = gql_wrapper:compile(query_1) - return gql_query_1:execute(variables_1) + local gql_query_1 = test_utils.show_trace(function() + return gql_wrapper:compile(query_1) end) - local exp_result_1 = yaml.decode(([[ + local variables_1_1 = {uid = 5, include_y = true} + local result_1_1 = gql_query_1:execute(variables_1_1) + local exp_result_1_1 = yaml.decode(([[ --- user: - uid: 5 @@ -121,8 +121,20 @@ function testdata.run_queries(gql_wrapper) x: 1005 y: 2005 ]]):strip()) + test:is_deeply(result_1_1.data, exp_result_1_1, 'show all nested fields') - test:is_deeply(result_1.data, exp_result_1, '1') + local variables_1_2 = {uid = 5, include_y = false} + local result_1_2 = gql_query_1:execute(variables_1_2) + local exp_result_1_2 = yaml.decode(([[ + --- + user: + - uid: 5 + p1: p1 5 + p2: p2 5 + nested: + x: 1005 + ]]):strip()) + test:is_deeply(result_1_2.data, exp_result_1_2, 'show some nested fields') local query_2 = [[ query getUserByX($x: Long) { @@ -155,7 +167,7 @@ function testdata.run_queries(gql_wrapper) y: 2005 ]]):strip()) - test:is_deeply(result_2.data, exp_result_2, '2') + test:is_deeply(result_2.data, exp_result_2, 'filter by nested field') assert(test:check(), 'check plan') end