diff --git a/graphql/accessor_general.lua b/graphql/accessor_general.lua index 8b2bc33..d3bdce3 100644 --- a/graphql/accessor_general.lua +++ b/graphql/accessor_general.lua @@ -883,8 +883,7 @@ local function process_tuple(self, state, tuple, opts) if obj[k] == nil then local field_name = k local sub_filter = v - local sub_opts = {dont_force_nullability = true} - local field = resolveField(field_name, obj, sub_filter, sub_opts) + local field = resolveField(field_name, obj, sub_filter) if field == nil then return true end obj[k] = field -- XXX: Remove the value from a filter? But then we need to copy @@ -991,6 +990,7 @@ local function select_internal(self, collection_name, from, filter, args, extra) -- XXX: save type of args.offset at parsing and check here -- check(args.offset, 'args.offset', ...) check(args.pcre, 'args.pcre', 'table', 'nil') + check(extra.exp_tuple_count, 'extra.exp_tuple_count', 'number', 'nil') local collection = self.collections[collection_name] assert(collection ~= nil, @@ -1044,6 +1044,18 @@ local function select_internal(self, collection_name, from, filter, args, extra) resolveField = extra.resolveField, } + -- assert that connection constraint applied only to objects got from the + -- index that underlies the connection + if extra.exp_tuple_count ~= nil then + local err = 'internal error: connection constraint (expected tuple ' .. + 'count) cannot be applied to an index that is not under a ' .. + 'connection' + assert(from.collection_name ~= nil, err) + assert(index ~= nil, err) + assert(pivot == nil or (pivot.value_list == nil and + pivot.filter ~= nil), err) + end + if index == nil then -- fullscan local primary_index = self.funcs.get_primary_index(self, @@ -1088,21 +1100,38 @@ local function select_internal(self, collection_name, from, filter, args, extra) iterator_opts.limit = args.limit end + local tuple_count = 0 + for _, tuple in index:pairs(index_value, iterator_opts) do + tuple_count = tuple_count + 1 + -- check full match constraint + if extra.exp_tuple_count ~= nil and + tuple_count > extra.exp_tuple_count then + error(('FULL MATCH constraint was failed: we got more then ' .. + '%d tuples'):format(extra.exp_tuple_count)) + end local continue = process_tuple(self, select_state, tuple, select_opts) if not continue then break end end + + -- check full match constraint + if extra.exp_tuple_count ~= nil and + tuple_count ~= extra.exp_tuple_count then + error(('FULL MATCH constraint was failed: we expect %d tuples, ' .. + 'got %d'):format(extra.exp_tuple_count, tuple_count)) + end end local count = select_state.count local objs = select_state.objs assert(args.limit == nil or count <= args.limit, - ('count[%d] exceeds limit[%s] (before return)'):format( - count, args.limit)) + ('internal error: selected objects count (%d) exceeds limit (%s)') + :format(count, args.limit)) assert(#objs == count, - ('count[%d] is not equal to objs count[%d]'):format(count, #objs)) + ('internal error: selected objects count (%d) is not equal size of ' .. + 'selected object list (%d)'):format(count, #objs)) return objs end @@ -1383,6 +1412,25 @@ end --- @treturn table data accessor instance, a table with the two methods --- (`select` and `arguments`) as described in the @{impl.new} function --- description. +--- +--- Brief explanation of some select function parameters: +--- +--- * `from` (table or nil) is nil for a top-level collection or a table with +--- the following fields: +--- +--- - collection_name +--- - connection_name +--- - destination_args_names +--- - destination_args_values +--- +--- * `extra` (table) is a table which contains additional data for the query: +--- +--- - `qcontext` (table) can be used by an accessor to store any +--- query-related data; +--- - `resolveField(field_name, object, filter, opts)` (function) for +--- performing a subrequest on a fields connected using a 1:1 connection. +--- - extra_args +--- - exp_tuple_count function accessor_general.new(opts, funcs) assert(type(opts) == 'table', 'opts must be a table, got ' .. type(opts)) diff --git a/graphql/config_complement.lua b/graphql/config_complement.lua index 025decc..3acddf5 100644 --- a/graphql/config_complement.lua +++ b/graphql/config_complement.lua @@ -9,64 +9,25 @@ local json = require('json') local yaml = require('yaml') local log = require('log') + local utils = require('graphql.utils') local check = utils.check -local get_spaces_formats = require('graphql.simple_config').get_spaces_formats local config_complement = {} ---- The function determines connection type by connection.parts ---- and source collection space format. ---- ---- XXX Currently there are two possible situations when connection_parts form ---- unique index - all source_fields are nullable (1:1*) or all source_fields ---- are non nullable (1:1). In case of partially nullable connection_parts (which ---- form unique index) the error is raised. There is an alternative: relax ---- this requirement and deduce non-null connection type in the case. -local function determine_connection_type(connection_parts, index, source_space_format) - local type - - if #connection_parts < #(index.fields) then - type = '1:N' - end - - if #connection_parts == #(index.fields) then - if index.unique then - type = '1:1' - else - type = '1:N' - end - end - - local is_all_nullable = true - local is_all_not_nullable = true - - for _, connection_part in pairs(connection_parts) do - for _,field_format in ipairs(source_space_format) do - if connection_part.source_field == field_format.name then - if field_format.is_nullable == true then - is_all_not_nullable = false - else - is_all_nullable = false - end - end - end - end - - if is_all_nullable == is_all_not_nullable and type == '1:1' then - error('source_fields in connection_parts must be all nullable or ' .. - 'not nullable at the same time') +--- Determine connection type by connection.parts and index uniqueness. +local function determine_connection_type(connection_parts, index) + if #connection_parts < #index.fields then + return '1:N' + elseif #connection_parts == #index.fields then + return index.unique and '1:1' or '1:N' end - if is_all_nullable and type == '1:1' then - type = '1:1*' - end - - return type + error(('Connection parts count is more then index parts count: %d > %d') + :format(#connection_parts, #index.fields)) end --- The function returns connection_parts sorted by destination_fields as --- index_fields prefix. +-- Return connection_parts sorted by destination_fields as index_fields prefix. local function sort_parts(connection_parts, index_fields) local sorted_parts = {} @@ -211,8 +172,6 @@ local function complement_connections(collections, connections, indexes) check(collections, 'collections', 'table') check(connections, 'connections', 'table') - local spaces_formats = get_spaces_formats() - for _, c in pairs(connections) do check(c.name, 'connection.name', 'string') check(c.source_collection, 'connection.source_collection', 'string') @@ -230,10 +189,7 @@ local function complement_connections(collections, connections, indexes) result_c.destination_collection = c.destination_collection result_c.parts = determine_connection_parts(c.parts, index) - local source_space_format = spaces_formats[result_c.source_collection] - - result_c.type = determine_connection_type(result_c.parts, index, - source_space_format) + result_c.type = determine_connection_type(result_c.parts, index) result_c.index_name = c.index_name result_c.name = c.name diff --git a/graphql/convert_schema/resolve.lua b/graphql/convert_schema/resolve.lua index ba4c4fc..cc33c7b 100644 --- a/graphql/convert_schema/resolve.lua +++ b/graphql/convert_schema/resolve.lua @@ -29,10 +29,10 @@ local function gen_from_parameter(collection_name, parent, connection) } end --- Check FULL match constraint before request of --- destination object(s). Note that connection key parts --- can be prefix of index key parts. Zero parts count --- considered as ok by this check. +--- Check FULL match constraint before request of destination object(s). +--- +--- Note that connection key parts can be prefix of index key parts. Zero parts +--- count considered as ok by this check. local function are_all_parts_null(parent, connection_parts) local are_all_parts_null = true local are_all_parts_non_null = true @@ -84,8 +84,10 @@ local function separate_args_instance(args_instance, arguments) end function resolve.gen_resolve_function(collection_name, connection, - destination_type, arguments, accessor) + destination_type, arguments, accessor, opts) local c = connection + local opts = opts or {} + local disable_dangling_check = opts.disable_dangling_check or false local bare_destination_type = core_types.bare(destination_type) -- capture `bare_destination_type` @@ -106,37 +108,30 @@ function resolve.gen_resolve_function(collection_name, connection, local opts = opts or {} assert(type(opts) == 'table', 'opts must be nil or a table, got ' .. type(opts)) - local dont_force_nullability = - opts.dont_force_nullability or false - assert(type(dont_force_nullability) == 'boolean', - 'opts.dont_force_nullability ' .. - 'must be nil or a boolean, got ' .. - type(dont_force_nullability)) + -- no opts for now local from = gen_from_parameter(collection_name, parent, c) -- Avoid non-needed index lookup on a destination collection when -- all connection parts are null: - -- * return null for 1:1* connection; + -- * return null for 1:1 connection; -- * return {} for 1:N connection (except the case when source -- collection is the query or the mutation pseudo-collection). if collection_name ~= nil and are_all_parts_null(parent, c.parts) then - if c.type ~= '1:1*' and c.type ~= '1:N' then - -- `if` is to avoid extra json.encode - assert(c.type == '1:1*' or c.type == '1:N', - ('only 1:1* or 1:N connections can have ' .. - 'all key parts null; parent is %s from ' .. - 'collection "%s"'):format(json.encode(parent), - tostring(collection_name))) - end return c.type == '1:N' and {} or nil end + local exp_tuple_count + if not disable_dangling_check and c.type == '1:1' then + exp_tuple_count = 1 + end + local resolveField = genResolveField(info) local extra = { qcontext = info.qcontext, resolveField = resolveField, -- for subrequests extra_args = {}, + exp_tuple_count = exp_tuple_count, } -- object_args_instance will be passed to 'filter' @@ -152,14 +147,8 @@ function resolve.gen_resolve_function(collection_name, connection, assert(type(objs) == 'table', 'objs list received from an accessor ' .. 'must be a table, got ' .. type(objs)) - if c.type == '1:1' or c.type == '1:1*' then - -- we expect here exactly one object even for 1:1* - -- connections because we processed all-parts-are-null - -- situation above - assert(#objs == 1 or dont_force_nullability, - 'expect one matching object, got ' .. - tostring(#objs)) - return objs[1] + if c.type == '1:1' then + return objs[1] -- nil for empty list of matching objects else -- c.type == '1:N' return objs end @@ -167,7 +156,9 @@ function resolve.gen_resolve_function(collection_name, connection, end function resolve.gen_resolve_function_multihead(collection_name, connection, - union_types, var_num_to_box_field_name, accessor) + union_types, var_num_to_box_field_name, accessor, opts) + local opts = opts or {} + local disable_dangling_check = opts.disable_dangling_check or false local c = connection local determinant_keys = utils.get_keys(c.variants[1].determinant) @@ -208,9 +199,13 @@ function resolve.gen_resolve_function_multihead(collection_name, connection, name = c.name, destination_collection = v.destination_collection, } + local opts = { + disable_dangling_check = disable_dangling_check, + } -- XXX: generate a function for each variant at schema generation time local result = resolve.gen_resolve_function(collection_name, - quazi_connection, destination_type, {}, accessor)(parent, {}, info) + quazi_connection, destination_type, {}, accessor, opts)( + parent, {}, info) -- This 'wrapping' is needed because we use 'select' on 'collection' -- GraphQL type and the result of the resolve function must be in diff --git a/graphql/convert_schema/schema.lua b/graphql/convert_schema/schema.lua index eaaaaa1..947f226 100644 --- a/graphql/convert_schema/schema.lua +++ b/graphql/convert_schema/schema.lua @@ -25,7 +25,7 @@ local schema = {} --- * DONE: Move avro-schema -> GraphQL arguments translating into its own --- module. --- * DONE: Support a sub-record arguments and others (union, array, ...). ---- * TBD: Generate arguments for cartesian product of {1:1, 1:1*, 1:N, all} x +--- * TBD: Generate arguments for cartesian product of {1:1, 1:N, all} x --- {query, mutation, all} x {top-level, nested, all} x {collections}. --- * TBD: Use generated arguments in GraphQL types (schema) generation. --- @@ -138,7 +138,8 @@ local function create_root_collection(state) }) end ---- Execute a function for each 1:1 or 1:1* connection of each collection. +--- Execute a function for each connection of one of specified types in each +--- collection. --- --- @tparam table state tarantool_graphql instance --- @@ -160,7 +161,7 @@ local function for_each_connection(state, connection_types, func) end end ---- Add arguments corresponding to 1:1 and 1:1* connections (nested filters). +--- Add arguments corresponding to 1:1 connections (nested filters). --- --- @tparam table state graphql_tarantool instance local function add_connection_arguments(state) @@ -169,8 +170,8 @@ local function add_connection_arguments(state) -- map source collection and connection name to an input object local lookup_input_objects = {} - -- create InputObjects for each 1:1 or 1:1* connection of each collection - for_each_connection(state, {'1:1', '1:1*'}, function(collection_name, c) + -- create InputObjects for each 1:1 connection of each collection + for_each_connection(state, {'1:1'}, function(collection_name, c) -- XXX: support multihead connections if c.variants ~= nil then return end @@ -195,7 +196,7 @@ local function add_connection_arguments(state) -- update fields of collection arguments and input objects with other input -- objects - for_each_connection(state, {'1:1', '1:1*'}, function(collection_name, c) + for_each_connection(state, {'1:1'}, function(collection_name, c) -- XXX: support multihead connections if c.variants ~= nil then return end diff --git a/graphql/convert_schema/types.lua b/graphql/convert_schema/types.lua index dd4484f..dac999a 100644 --- a/graphql/convert_schema/types.lua +++ b/graphql/convert_schema/types.lua @@ -49,8 +49,6 @@ local function args_from_destination_collection(state, collection, connection_type) if connection_type == '1:1' then return state.object_arguments[collection] - elseif connection_type == '1:1*' then - return state.object_arguments[collection] elseif connection_type == '1:N' then return state.all_arguments[collection] else @@ -60,8 +58,6 @@ end local function specify_destination_type(destination_type, connection_type) if connection_type == '1:1' then - return core_types.nonNull(destination_type) - elseif connection_type == '1:1*' then return destination_type elseif connection_type == '1:N' then return core_types.nonNull(core_types.list(core_types.nonNull( @@ -77,7 +73,7 @@ end --- described in comments to @{convert_multihead_connection}. --- --- @tparam table type_to_box GraphQL Object type (which represents a collection) ---- @tparam string connection_type of given collection (1:1, 1:1* or 1:N) +--- @tparam string connection_type of given collection (1:1, 1:N) --- @tparam string type_to_box_name name of given 'type_to_box' (It can not --- be taken from 'type_to_box' because at the time of function execution --- 'type_to_box' refers to an empty table, which later will be filled with @@ -96,9 +92,6 @@ local function box_collection_type(type_to_box, connection_type, if connection_type == '1:1' then box_type_name = 'box_' .. type_to_box_name box_type_description = 'Box around 1:1 multi-head variant' - elseif connection_type == '1:1*' then - box_type_name = 'box_' .. type_to_box_name - box_type_description = 'Box around 1:1* multi-head variant' elseif connection_type == '1:N' then box_type_name = 'box_array_' .. type_to_box_name box_type_description = 'Box around 1:N multi-head variant' @@ -162,8 +155,11 @@ local function convert_simple_connection(state, connection, collection_name) extra = e_args, } + local opts = { + disable_dangling_check = state.disable_dangling_check, + } local resolve_function = resolve.gen_resolve_function(collection_name, c, - destination_type, arguments, state.accessor) + destination_type, arguments, state.accessor, opts) local field = { name = c.name, @@ -283,9 +279,12 @@ local function convert_multihead_connection(state, connection, collection_name, union_types[#union_types + 1] = variant_type end + local opts = { + disable_dangling_check = state.disable_dangling_check, + } local resolve_function = resolve.gen_resolve_function_multihead( collection_name, c, union_types, var_num_to_box_field_name, - state.accessor) + state.accessor, opts) local field = { name = c.name, @@ -317,9 +316,8 @@ end local convert_connection_to_field = function(state, connection, collection_name, context) check(connection.type, 'connection.type', 'string') - assert(connection.type == '1:1' or connection.type == '1:1*' or - connection.type == '1:N', 'connection.type must be 1:1, 1:1* or 1:N, '.. - 'got ' .. connection.type) + assert(connection.type == '1:1' or connection.type == '1:N', + 'connection.type must be 1:1 or 1:N, got ' .. connection.type) check(connection.name, 'connection.name', 'string') assert(connection.destination_collection or connection.variants, 'connection must either destination_collection or variants field') diff --git a/graphql/impl.lua b/graphql/impl.lua index f97c110..83ed138 100644 --- a/graphql/impl.lua +++ b/graphql/impl.lua @@ -222,34 +222,16 @@ end --- }, --- ... --- }, ---- accessor = setmetatable({}, { ---- __index = { ---- select = function(self, parent, collection_name, from, ---- object_args_instance, list_args_instance, extra) ---- -- * from has the following structure: ---- -- ---- -- { ---- -- collection_name = <...>, ---- -- connection_name = <...>, ---- -- destination_args_names = <...>, ---- -- destination_args_values = <...>, ---- -- } ---- -- ---- -- from.collection_name is nil for a top-level collection. ---- -- ---- -- `extra` is a table which contains additional data for ---- -- the query: ---- -- ---- -- * `qcontext` (table) can be used by an accessor to store ---- -- any query-related data; ---- -- * `resolveField(field_name, object, filter, opts)` ---- -- (function) for performing a subrequest on a fields ---- -- connected using a 1:1 or 1:1* connection. ---- -- ---- return ... ---- end, ---- } ---- }), +--- indexes =