Skip to content
This repository was archived by the owner on Apr 14, 2022. It is now read-only.

Commit ea84bf6

Browse files
committed
Count tuples from cache fetch requests in statistics
Don't count tuples from the cache as fetched ones. These changes made the statistics more fair. Return an iterator from a cache lookup call to make the API closer to the regular data fetch request (accessor:get_index(...):pairs(...)). Perform cache lookup from accessor_general, not from accessor_shard (it allows to distinguish such requests and simplify implementation of the accessor functions).
1 parent d30ef91 commit ea84bf6

File tree

6 files changed

+149
-57
lines changed

6 files changed

+149
-57
lines changed

graphql/accessor_general.lua

Lines changed: 42 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -853,9 +853,8 @@ local function process_tuple(self, state, tuple, opts)
853853
local do_filter = opts.do_filter
854854
local pivot_filter = opts.pivot_filter
855855
local qcontext = state.qcontext
856-
local qstats = qcontext.statistics
857856

858-
qstats:objects_fetched({
857+
qcontext.statistics:objects_fetched({
859858
is_full_scan = opts.is_full_scan,
860859
objects_count = opts.fetched_tuples_cnt,
861860
})
@@ -881,11 +880,9 @@ local function process_tuple(self, state, tuple, opts)
881880
return true -- skip pivot item too
882881
end
883882

884-
-- Don't count subrequest resulting objects (needed for filtering) into
885-
-- count of object we show to an user as a result.
886-
-- XXX: It is better to have an option to control whether selected objects
887-
-- will be counted as resulting ones.
888-
local saved_resulting_object_cnt = qstats.resulting_object_cnt
883+
-- XXX: implement retired flag for invoke resolve and don't save-restore
884+
-- resulting_object_cnt
885+
local saved_resulting_object_cnt = qcontext.statistics.resulting_object_cnt
889886

890887
-- make subrequests if needed
891888
for k, v in pairs(filter) do
@@ -900,7 +897,7 @@ local function process_tuple(self, state, tuple, opts)
900897
end
901898
end
902899

903-
qstats.resulting_object_cnt = saved_resulting_object_cnt
900+
qcontext.statistics.resulting_object_cnt = saved_resulting_object_cnt
904901

905902
-- filter out non-matching objects
906903
local match = utils.is_subtable(obj, filter) and
@@ -916,7 +913,7 @@ local function process_tuple(self, state, tuple, opts)
916913
state.objs[#state.objs + 1] = obj
917914
state.count = state.count + 1
918915

919-
qstats:objects_retired({
916+
qcontext.statistics:objects_retired({
920917
objects_count = 1,
921918
})
922919

@@ -1138,18 +1135,30 @@ local function invoke_select_internal(self, prepared_select)
11381135
local request_opts = prepared_select.request_opts
11391136
local select_state = prepared_select.select_state
11401137
local select_opts = prepared_select.select_opts
1138+
local collection_name = prepared_select.collection_name
11411139
local args = prepared_select.args
11421140
local extra = prepared_select.extra
11431141

11441142
local index = request_opts.index
1143+
local index_name = request_opts.index_name
11451144
local index_value = request_opts.index_value
11461145
local iterator_opts = request_opts.iterator_opts
11471146
local is_full_scan = request_opts.is_full_scan
11481147

11491148
local tuple_count = 0
11501149
local out = {}
11511150

1152-
for _, tuple in index:pairs(index_value, iterator_opts, out) do
1151+
-- lookup for needed data in the cache if it is supported
1152+
local iterable
1153+
if self:cache_is_supported() then
1154+
iterable = self.funcs.cache_lookup(self, collection_name, index_name,
1155+
index_value, iterator_opts)
1156+
end
1157+
if iterable == nil then
1158+
iterable = index
1159+
end
1160+
1161+
for _, tuple in iterable:pairs(index_value, iterator_opts, out) do
11531162
local fetched_tuples_cnt = out.fetched_tuples_cnt
11541163
check(out.fetched_tuples_cnt, 'out.fetched_tuples_cnt', 'number')
11551164
out.fetched_tuples_cnt = 0
@@ -1644,11 +1653,32 @@ function accessor_general.new(opts, funcs)
16441653
cache_is_supported = function(self)
16451654
return self.data_cache ~= nil
16461655
end,
1647-
cache_fetch = function(self, batches)
1656+
cache_fetch = function(self, batches, qcontext)
16481657
if not self:cache_is_supported() then
16491658
return nil
16501659
end
1651-
local fetch_id = self.funcs.cache_fetch(self, batches)
1660+
1661+
local res = self.funcs.cache_fetch(self, batches)
1662+
if res == nil then
1663+
return nil
1664+
end
1665+
1666+
local fetch_id = res.fetch_id
1667+
local stats = res.stats
1668+
check(fetch_id, 'fetch_id', 'number')
1669+
check(stats, 'stats', 'table')
1670+
1671+
-- update statistics
1672+
init_qcontext(self, qcontext)
1673+
for _, stat in ipairs(stats) do
1674+
qcontext.statistics:objects_fetched({
1675+
is_full_scan = stat.is_full_scan,
1676+
objects_count = stat.fetched_tuples_cnt,
1677+
})
1678+
end
1679+
1680+
-- XXX: check timeout
1681+
16521682
return fetch_id
16531683
end,
16541684
-- cache_delete = function(self, fetch_id)

graphql/accessor_shard.lua

Lines changed: 24 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -179,29 +179,23 @@ local function get_index(self, collection_name, index_name)
179179
if not is_index_exists(collection_name, index_name) then
180180
return nil
181181
end
182-
local accessor = self
183-
local cache_lookup = self.funcs.cache_lookup
184-
check(cache_lookup, 'cache_lookup', 'function')
185182

183+
-- XXX: wrap all data into the table, don't create the capture
186184
local index = setmetatable({}, {
187185
__index = {
188-
pairs = function(self, value, opts, out)
186+
pairs = function(_, value, opts, out)
189187
out.fetched_tuples_cnt = 0
190188
local func_name = 'accessor_shard.get_index.<index>.pairs'
191-
-- check cache
192-
local tuples = cache_lookup(accessor, collection_name,
193-
index_name, value, opts)
189+
194190
-- perform select
195-
if tuples == nil then
196-
local opts = opts or {}
197-
opts.limit = opts.limit or LIMIT
198-
local err
199-
tuples, err = shard:secondary_select(collection_name,
200-
index_name, opts, value, 0)
201-
accessor_shard_helpers.shard_check_error(func_name,
202-
tuples, err)
203-
end
191+
local opts = opts or {}
192+
opts.limit = opts.limit or LIMIT
193+
local tuples, err = shard:secondary_select(collection_name,
194+
index_name, opts, value, 0)
195+
accessor_shard_helpers.shard_check_error(func_name,
196+
tuples, err)
204197
out.fetched_tuples_cnt = out.fetched_tuples_cnt + #tuples
198+
205199
-- create iterator
206200
local cur = 1
207201
local function gen()
@@ -210,6 +204,7 @@ local function get_index(self, collection_name, index_name)
210204
cur = cur + 1
211205
return cur, res
212206
end
207+
213208
return gen, nil, nil
214209
end
215210
}
@@ -488,7 +483,18 @@ end
488483
---
489484
--- @tparam table batches see @{accessor_shard_cache.cache_fetch}
490485
---
491-
--- @treturn number `fetch_id` identifier of the fetched data
486+
--- @treturn table the following structure or nil:
487+
---
488+
--- {
489+
--- fetch_id = <number>, -- identifier of the fetched data
490+
--- stats = { -- data to update statistics
491+
--- {
492+
--- is_full_scan = <boolean>,
493+
--- fetched_tuples_cnt = <number>,
494+
--- },
495+
--- ...
496+
--- }
497+
--- }
492498
local function cache_fetch(self, batches)
493499
return self.data_cache:fetch(batches)
494500
end
@@ -525,7 +531,7 @@ end
525531
---
526532
--- @tparam table iterator_opts e.g. {} or {iterator = 'GT'}
527533
---
528-
--- @treturn table `result` fetched data (can be nil)
534+
--- @return luafun iterator (one value) to fetched data or nil
529535
local function cache_lookup(self, collection_name, index_name,
530536
key, iterator_opts)
531537
return self.data_cache:lookup(collection_name, index_name, key,

graphql/accessor_shard_cache.lua

Lines changed: 45 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@ local shard = require('shard')
33
local utils = require('graphql.utils')
44
local accessor_shard_index_info = require('graphql.accessor_shard_index_info')
55

6+
local check = utils.check
7+
68
local accessor_shard_cache = {}
79

810
-- XXX: maybe use a space instead of a Lua table
@@ -48,12 +50,13 @@ local function cache_fetch(self, batches)
4850
local fetch_id = self.fetch_id_next
4951
self.fetch_id_next = self.fetch_id_next + 1
5052

53+
local stats = {}
54+
5155
for _, batch in pairs(batches) do
5256
local ids = get_select_ids(self, batch, {skip_cached = true})
5357
if next(ids) == nil then
5458
goto continue
5559
end
56-
-- print('fetch ids: ' .. require('yaml').encode(ids))
5760
-- local buffers = {}
5861
local results = {}
5962

@@ -79,6 +82,10 @@ local function cache_fetch(self, batches)
7982
for _, tuple in ipairs(node_result) do
8083
table.insert(results[j], tuple)
8184
end
85+
stats[j] = stats[j] or {
86+
is_full_scan = batch.keys[j] == nil,
87+
fetched_tuples_cnt = 0,
88+
}
8289
end
8390
end
8491

@@ -87,8 +94,9 @@ local function cache_fetch(self, batches)
8794
-- sort by a primary key
8895
local primary_index_info = accessor_shard_index_info.get_index_info(
8996
batch.collection_name, 0)
90-
for _, result in ipairs(results) do
91-
table.sort(result, function(a ,b)
97+
for j, result in ipairs(results) do
98+
stats[j].fetched_tuples_cnt = stats[j].fetched_tuples_cnt + #result
99+
table.sort(result, function(a, b)
92100
for i, part in pairs(primary_index_info.parts) do
93101
if a[part.fieldno] ~= b[part.fieldno] then
94102
return a[part.fieldno] < b[part.fieldno]
@@ -99,13 +107,10 @@ local function cache_fetch(self, batches)
99107
end
100108

101109
-- write to cache
102-
--print('#results: ' .. tostring(#results))
103-
--print('#batch.keys: ' .. tostring(#batch.keys))
104110
assert(#results == #batch.keys, 'XXX') -- XXX
105111
assert(#results == #ids, 'XXX') -- XXX
106112
for i = 1, #ids do
107113
if self.cache[ids[i]] == nil then
108-
-- print('fetch: ' .. require('json').encode(ids[i]))
109114
self.cache[ids[i]] = {
110115
result = results[i],
111116
fetch_ids = {fetch_id}
@@ -122,7 +127,10 @@ local function cache_fetch(self, batches)
122127
::continue::
123128
end
124129

125-
return fetch_id
130+
return {
131+
fetch_id = fetch_id,
132+
stats = stats,
133+
}
126134
end
127135

128136
-- Unused for now.
@@ -169,12 +177,38 @@ local function cache_lookup(self, collection_name, index_name, key,
169177
iterator_opts = iterator_opts,
170178
}
171179
local id = get_select_ids(self, batch)[1]
172-
local result
173-
if self.cache[id] ~= nil then
174-
result = self.cache[id].result
180+
if self.cache[id] == nil then
181+
return nil
175182
end
176183

177-
return result
184+
local tuples = self.cache[id].result
185+
check(tuples, 'tuples', 'table')
186+
-- XXX: wrap all data into the table, don't create the capture
187+
return setmetatable({tuples = tuples}, {
188+
__index = {
189+
pairs = function(self, value, opts, out)
190+
assert(value == key, 'expected the same key in ' ..
191+
'cache_lookup call and :pairs() on returned iterable')
192+
assert(opts == iterator_opts, 'expected the same ' ..
193+
'iterator_opts in cache_lookup call and :pairs() on ' ..
194+
'returned iterable')
195+
196+
-- we actually do not fetch any tuples here
197+
out.fetched_tuples_cnt = 0
198+
199+
-- create iterator
200+
local cur = 1
201+
local function gen()
202+
if cur > #self.tuples then return nil end
203+
local res = tuples[cur]
204+
cur = cur + 1
205+
return cur, res
206+
end
207+
208+
return gen, nil, nil
209+
end
210+
}
211+
})
178212
end
179213

180214
function accessor_shard_cache.new()

0 commit comments

Comments
 (0)