From 100a2ec40f9a89d339e912d414a70693f5f49714 Mon Sep 17 00:00:00 2001 From: Alexander Turenko Date: Tue, 19 Jun 2018 13:41:38 +0300 Subject: [PATCH 1/2] Remove 1:1* connection, change 1:1 behaviour Changes in 1:1 connection behaviour: * The case when all connection parts are null is allowed (gives null child object instead of an error). * The case when no child object were matched is allowed. - FULL MATCH contraint does not verified entirely (further work is needed). - The following bug was fixed: usage of filtering arguments now cannot trigger 'expect one matching object' error if data are consistent. Other changes: * GraphQL auto configuration behaviour was changed in case when a connection parts count is the same with underlying index, the index is unique and some index parts are nullable and some are not. Now it generates 1:1 connection (and lean on runtime checks) instead of giving an error. Part of #135. --- graphql/accessor_general.lua | 3 +- graphql/config_complement.lua | 66 ++++---------------- graphql/convert_schema/resolve.lua | 27 ++------ graphql/convert_schema/schema.lua | 13 ++-- graphql/convert_schema/types.lua | 14 +---- graphql/impl.lua | 2 +- test/extra/to_avro_huge.test.lua | 6 +- test/testdata/nullable_1_1_conn_testdata.lua | 2 +- 8 files changed, 31 insertions(+), 102 deletions(-) diff --git a/graphql/accessor_general.lua b/graphql/accessor_general.lua index 8b2bc33..54b1c82 100644 --- a/graphql/accessor_general.lua +++ b/graphql/accessor_general.lua @@ -883,8 +883,7 @@ local function process_tuple(self, state, tuple, opts) if obj[k] == nil then local field_name = k local sub_filter = v - local sub_opts = {dont_force_nullability = true} - local field = resolveField(field_name, obj, sub_filter, sub_opts) + local field = resolveField(field_name, obj, sub_filter) if field == nil then return true end obj[k] = field -- XXX: Remove the value from a filter? But then we need to copy diff --git a/graphql/config_complement.lua b/graphql/config_complement.lua index 025decc..3acddf5 100644 --- a/graphql/config_complement.lua +++ b/graphql/config_complement.lua @@ -9,64 +9,25 @@ local json = require('json') local yaml = require('yaml') local log = require('log') + local utils = require('graphql.utils') local check = utils.check -local get_spaces_formats = require('graphql.simple_config').get_spaces_formats local config_complement = {} ---- The function determines connection type by connection.parts ---- and source collection space format. ---- ---- XXX Currently there are two possible situations when connection_parts form ---- unique index - all source_fields are nullable (1:1*) or all source_fields ---- are non nullable (1:1). In case of partially nullable connection_parts (which ---- form unique index) the error is raised. There is an alternative: relax ---- this requirement and deduce non-null connection type in the case. -local function determine_connection_type(connection_parts, index, source_space_format) - local type - - if #connection_parts < #(index.fields) then - type = '1:N' - end - - if #connection_parts == #(index.fields) then - if index.unique then - type = '1:1' - else - type = '1:N' - end - end - - local is_all_nullable = true - local is_all_not_nullable = true - - for _, connection_part in pairs(connection_parts) do - for _,field_format in ipairs(source_space_format) do - if connection_part.source_field == field_format.name then - if field_format.is_nullable == true then - is_all_not_nullable = false - else - is_all_nullable = false - end - end - end - end - - if is_all_nullable == is_all_not_nullable and type == '1:1' then - error('source_fields in connection_parts must be all nullable or ' .. - 'not nullable at the same time') +--- Determine connection type by connection.parts and index uniqueness. +local function determine_connection_type(connection_parts, index) + if #connection_parts < #index.fields then + return '1:N' + elseif #connection_parts == #index.fields then + return index.unique and '1:1' or '1:N' end - if is_all_nullable and type == '1:1' then - type = '1:1*' - end - - return type + error(('Connection parts count is more then index parts count: %d > %d') + :format(#connection_parts, #index.fields)) end --- The function returns connection_parts sorted by destination_fields as --- index_fields prefix. +-- Return connection_parts sorted by destination_fields as index_fields prefix. local function sort_parts(connection_parts, index_fields) local sorted_parts = {} @@ -211,8 +172,6 @@ local function complement_connections(collections, connections, indexes) check(collections, 'collections', 'table') check(connections, 'connections', 'table') - local spaces_formats = get_spaces_formats() - for _, c in pairs(connections) do check(c.name, 'connection.name', 'string') check(c.source_collection, 'connection.source_collection', 'string') @@ -230,10 +189,7 @@ local function complement_connections(collections, connections, indexes) result_c.destination_collection = c.destination_collection result_c.parts = determine_connection_parts(c.parts, index) - local source_space_format = spaces_formats[result_c.source_collection] - - result_c.type = determine_connection_type(result_c.parts, index, - source_space_format) + result_c.type = determine_connection_type(result_c.parts, index) result_c.index_name = c.index_name result_c.name = c.name diff --git a/graphql/convert_schema/resolve.lua b/graphql/convert_schema/resolve.lua index ba4c4fc..bb3c036 100644 --- a/graphql/convert_schema/resolve.lua +++ b/graphql/convert_schema/resolve.lua @@ -106,29 +106,16 @@ function resolve.gen_resolve_function(collection_name, connection, local opts = opts or {} assert(type(opts) == 'table', 'opts must be nil or a table, got ' .. type(opts)) - local dont_force_nullability = - opts.dont_force_nullability or false - assert(type(dont_force_nullability) == 'boolean', - 'opts.dont_force_nullability ' .. - 'must be nil or a boolean, got ' .. - type(dont_force_nullability)) + -- no opts for now local from = gen_from_parameter(collection_name, parent, c) -- Avoid non-needed index lookup on a destination collection when -- all connection parts are null: - -- * return null for 1:1* connection; + -- * return null for 1:1 connection; -- * return {} for 1:N connection (except the case when source -- collection is the query or the mutation pseudo-collection). if collection_name ~= nil and are_all_parts_null(parent, c.parts) then - if c.type ~= '1:1*' and c.type ~= '1:N' then - -- `if` is to avoid extra json.encode - assert(c.type == '1:1*' or c.type == '1:N', - ('only 1:1* or 1:N connections can have ' .. - 'all key parts null; parent is %s from ' .. - 'collection "%s"'):format(json.encode(parent), - tostring(collection_name))) - end return c.type == '1:N' and {} or nil end @@ -152,14 +139,8 @@ function resolve.gen_resolve_function(collection_name, connection, assert(type(objs) == 'table', 'objs list received from an accessor ' .. 'must be a table, got ' .. type(objs)) - if c.type == '1:1' or c.type == '1:1*' then - -- we expect here exactly one object even for 1:1* - -- connections because we processed all-parts-are-null - -- situation above - assert(#objs == 1 or dont_force_nullability, - 'expect one matching object, got ' .. - tostring(#objs)) - return objs[1] + if c.type == '1:1' then + return objs[1] -- nil for empty list of matching objects else -- c.type == '1:N' return objs end diff --git a/graphql/convert_schema/schema.lua b/graphql/convert_schema/schema.lua index eaaaaa1..947f226 100644 --- a/graphql/convert_schema/schema.lua +++ b/graphql/convert_schema/schema.lua @@ -25,7 +25,7 @@ local schema = {} --- * DONE: Move avro-schema -> GraphQL arguments translating into its own --- module. --- * DONE: Support a sub-record arguments and others (union, array, ...). ---- * TBD: Generate arguments for cartesian product of {1:1, 1:1*, 1:N, all} x +--- * TBD: Generate arguments for cartesian product of {1:1, 1:N, all} x --- {query, mutation, all} x {top-level, nested, all} x {collections}. --- * TBD: Use generated arguments in GraphQL types (schema) generation. --- @@ -138,7 +138,8 @@ local function create_root_collection(state) }) end ---- Execute a function for each 1:1 or 1:1* connection of each collection. +--- Execute a function for each connection of one of specified types in each +--- collection. --- --- @tparam table state tarantool_graphql instance --- @@ -160,7 +161,7 @@ local function for_each_connection(state, connection_types, func) end end ---- Add arguments corresponding to 1:1 and 1:1* connections (nested filters). +--- Add arguments corresponding to 1:1 connections (nested filters). --- --- @tparam table state graphql_tarantool instance local function add_connection_arguments(state) @@ -169,8 +170,8 @@ local function add_connection_arguments(state) -- map source collection and connection name to an input object local lookup_input_objects = {} - -- create InputObjects for each 1:1 or 1:1* connection of each collection - for_each_connection(state, {'1:1', '1:1*'}, function(collection_name, c) + -- create InputObjects for each 1:1 connection of each collection + for_each_connection(state, {'1:1'}, function(collection_name, c) -- XXX: support multihead connections if c.variants ~= nil then return end @@ -195,7 +196,7 @@ local function add_connection_arguments(state) -- update fields of collection arguments and input objects with other input -- objects - for_each_connection(state, {'1:1', '1:1*'}, function(collection_name, c) + for_each_connection(state, {'1:1'}, function(collection_name, c) -- XXX: support multihead connections if c.variants ~= nil then return end diff --git a/graphql/convert_schema/types.lua b/graphql/convert_schema/types.lua index dd4484f..5a9ad0a 100644 --- a/graphql/convert_schema/types.lua +++ b/graphql/convert_schema/types.lua @@ -49,8 +49,6 @@ local function args_from_destination_collection(state, collection, connection_type) if connection_type == '1:1' then return state.object_arguments[collection] - elseif connection_type == '1:1*' then - return state.object_arguments[collection] elseif connection_type == '1:N' then return state.all_arguments[collection] else @@ -60,8 +58,6 @@ end local function specify_destination_type(destination_type, connection_type) if connection_type == '1:1' then - return core_types.nonNull(destination_type) - elseif connection_type == '1:1*' then return destination_type elseif connection_type == '1:N' then return core_types.nonNull(core_types.list(core_types.nonNull( @@ -77,7 +73,7 @@ end --- described in comments to @{convert_multihead_connection}. --- --- @tparam table type_to_box GraphQL Object type (which represents a collection) ---- @tparam string connection_type of given collection (1:1, 1:1* or 1:N) +--- @tparam string connection_type of given collection (1:1, 1:N) --- @tparam string type_to_box_name name of given 'type_to_box' (It can not --- be taken from 'type_to_box' because at the time of function execution --- 'type_to_box' refers to an empty table, which later will be filled with @@ -96,9 +92,6 @@ local function box_collection_type(type_to_box, connection_type, if connection_type == '1:1' then box_type_name = 'box_' .. type_to_box_name box_type_description = 'Box around 1:1 multi-head variant' - elseif connection_type == '1:1*' then - box_type_name = 'box_' .. type_to_box_name - box_type_description = 'Box around 1:1* multi-head variant' elseif connection_type == '1:N' then box_type_name = 'box_array_' .. type_to_box_name box_type_description = 'Box around 1:N multi-head variant' @@ -317,9 +310,8 @@ end local convert_connection_to_field = function(state, connection, collection_name, context) check(connection.type, 'connection.type', 'string') - assert(connection.type == '1:1' or connection.type == '1:1*' or - connection.type == '1:N', 'connection.type must be 1:1, 1:1* or 1:N, '.. - 'got ' .. connection.type) + assert(connection.type == '1:1' or connection.type == '1:N', + 'connection.type must be 1:1 or 1:N, got ' .. connection.type) check(connection.name, 'connection.name', 'string') assert(connection.destination_collection or connection.variants, 'connection must either destination_collection or variants field') diff --git a/graphql/impl.lua b/graphql/impl.lua index f97c110..d5d3306 100644 --- a/graphql/impl.lua +++ b/graphql/impl.lua @@ -244,7 +244,7 @@ end --- -- any query-related data; --- -- * `resolveField(field_name, object, filter, opts)` --- -- (function) for performing a subrequest on a fields ---- -- connected using a 1:1 or 1:1* connection. +--- -- connected using a 1:1 connection. --- -- --- return ... --- end, diff --git a/test/extra/to_avro_huge.test.lua b/test/extra/to_avro_huge.test.lua index b60512d..15ea2a7 100755 --- a/test/extra/to_avro_huge.test.lua +++ b/test/extra/to_avro_huge.test.lua @@ -114,7 +114,7 @@ fields: type: int - name: order_item__item type: - type: record + type: record* fields: - name: id type: int @@ -144,7 +144,7 @@ fields: type: string - name: user_connection type: - type: record + type: record* fields: - name: id type: int @@ -168,7 +168,7 @@ fields: fields: - name: order_item__item type: - type: record + type: record* fields: - name: name type: string diff --git a/test/testdata/nullable_1_1_conn_testdata.lua b/test/testdata/nullable_1_1_conn_testdata.lua index b3e4f02..fc7f1a0 100644 --- a/test/testdata/nullable_1_1_conn_testdata.lua +++ b/test/testdata/nullable_1_1_conn_testdata.lua @@ -51,7 +51,7 @@ function nullable_1_1_conn_testdata.get_test_metadata() "index_name": "in_reply_to" }, { - "type": "1:1*", + "type": "1:1", "name": "in_reply_to", "destination_collection": "email", "parts": [ From 11d5e4eb41f464035de48817686e52bc2623e1ff Mon Sep 17 00:00:00 2001 From: Alexander Turenko Date: Wed, 20 Jun 2018 16:00:56 +0300 Subject: [PATCH 2/2] Implement runtime dangling 1:1 connection check This check was implemented improperly: it can be triggered by an user using filters that do not match anything. The check was removed in the previous commit. Now it is implemented correctly. The option 'disable_dangling_check' to disable this check was introduced. Fixes #135. --- graphql/accessor_general.lua | 55 +++++++++++++++- graphql/convert_schema/resolve.lua | 28 ++++++--- graphql/convert_schema/types.lua | 10 ++- graphql/impl.lua | 42 ++++--------- .../common/nullable_1_1_conn_nocheck.test.lua | 20 ++++++ test/space/nested_args.test.lua | 2 +- test/testdata/nullable_1_1_conn_testdata.lua | 63 ++++++++++++++++++- 7 files changed, 175 insertions(+), 45 deletions(-) create mode 100755 test/common/nullable_1_1_conn_nocheck.test.lua diff --git a/graphql/accessor_general.lua b/graphql/accessor_general.lua index 54b1c82..d3bdce3 100644 --- a/graphql/accessor_general.lua +++ b/graphql/accessor_general.lua @@ -990,6 +990,7 @@ local function select_internal(self, collection_name, from, filter, args, extra) -- XXX: save type of args.offset at parsing and check here -- check(args.offset, 'args.offset', ...) check(args.pcre, 'args.pcre', 'table', 'nil') + check(extra.exp_tuple_count, 'extra.exp_tuple_count', 'number', 'nil') local collection = self.collections[collection_name] assert(collection ~= nil, @@ -1043,6 +1044,18 @@ local function select_internal(self, collection_name, from, filter, args, extra) resolveField = extra.resolveField, } + -- assert that connection constraint applied only to objects got from the + -- index that underlies the connection + if extra.exp_tuple_count ~= nil then + local err = 'internal error: connection constraint (expected tuple ' .. + 'count) cannot be applied to an index that is not under a ' .. + 'connection' + assert(from.collection_name ~= nil, err) + assert(index ~= nil, err) + assert(pivot == nil or (pivot.value_list == nil and + pivot.filter ~= nil), err) + end + if index == nil then -- fullscan local primary_index = self.funcs.get_primary_index(self, @@ -1087,21 +1100,38 @@ local function select_internal(self, collection_name, from, filter, args, extra) iterator_opts.limit = args.limit end + local tuple_count = 0 + for _, tuple in index:pairs(index_value, iterator_opts) do + tuple_count = tuple_count + 1 + -- check full match constraint + if extra.exp_tuple_count ~= nil and + tuple_count > extra.exp_tuple_count then + error(('FULL MATCH constraint was failed: we got more then ' .. + '%d tuples'):format(extra.exp_tuple_count)) + end local continue = process_tuple(self, select_state, tuple, select_opts) if not continue then break end end + + -- check full match constraint + if extra.exp_tuple_count ~= nil and + tuple_count ~= extra.exp_tuple_count then + error(('FULL MATCH constraint was failed: we expect %d tuples, ' .. + 'got %d'):format(extra.exp_tuple_count, tuple_count)) + end end local count = select_state.count local objs = select_state.objs assert(args.limit == nil or count <= args.limit, - ('count[%d] exceeds limit[%s] (before return)'):format( - count, args.limit)) + ('internal error: selected objects count (%d) exceeds limit (%s)') + :format(count, args.limit)) assert(#objs == count, - ('count[%d] is not equal to objs count[%d]'):format(count, #objs)) + ('internal error: selected objects count (%d) is not equal size of ' .. + 'selected object list (%d)'):format(count, #objs)) return objs end @@ -1382,6 +1412,25 @@ end --- @treturn table data accessor instance, a table with the two methods --- (`select` and `arguments`) as described in the @{impl.new} function --- description. +--- +--- Brief explanation of some select function parameters: +--- +--- * `from` (table or nil) is nil for a top-level collection or a table with +--- the following fields: +--- +--- - collection_name +--- - connection_name +--- - destination_args_names +--- - destination_args_values +--- +--- * `extra` (table) is a table which contains additional data for the query: +--- +--- - `qcontext` (table) can be used by an accessor to store any +--- query-related data; +--- - `resolveField(field_name, object, filter, opts)` (function) for +--- performing a subrequest on a fields connected using a 1:1 connection. +--- - extra_args +--- - exp_tuple_count function accessor_general.new(opts, funcs) assert(type(opts) == 'table', 'opts must be a table, got ' .. type(opts)) diff --git a/graphql/convert_schema/resolve.lua b/graphql/convert_schema/resolve.lua index bb3c036..cc33c7b 100644 --- a/graphql/convert_schema/resolve.lua +++ b/graphql/convert_schema/resolve.lua @@ -29,10 +29,10 @@ local function gen_from_parameter(collection_name, parent, connection) } end --- Check FULL match constraint before request of --- destination object(s). Note that connection key parts --- can be prefix of index key parts. Zero parts count --- considered as ok by this check. +--- Check FULL match constraint before request of destination object(s). +--- +--- Note that connection key parts can be prefix of index key parts. Zero parts +--- count considered as ok by this check. local function are_all_parts_null(parent, connection_parts) local are_all_parts_null = true local are_all_parts_non_null = true @@ -84,8 +84,10 @@ local function separate_args_instance(args_instance, arguments) end function resolve.gen_resolve_function(collection_name, connection, - destination_type, arguments, accessor) + destination_type, arguments, accessor, opts) local c = connection + local opts = opts or {} + local disable_dangling_check = opts.disable_dangling_check or false local bare_destination_type = core_types.bare(destination_type) -- capture `bare_destination_type` @@ -119,11 +121,17 @@ function resolve.gen_resolve_function(collection_name, connection, return c.type == '1:N' and {} or nil end + local exp_tuple_count + if not disable_dangling_check and c.type == '1:1' then + exp_tuple_count = 1 + end + local resolveField = genResolveField(info) local extra = { qcontext = info.qcontext, resolveField = resolveField, -- for subrequests extra_args = {}, + exp_tuple_count = exp_tuple_count, } -- object_args_instance will be passed to 'filter' @@ -148,7 +156,9 @@ function resolve.gen_resolve_function(collection_name, connection, end function resolve.gen_resolve_function_multihead(collection_name, connection, - union_types, var_num_to_box_field_name, accessor) + union_types, var_num_to_box_field_name, accessor, opts) + local opts = opts or {} + local disable_dangling_check = opts.disable_dangling_check or false local c = connection local determinant_keys = utils.get_keys(c.variants[1].determinant) @@ -189,9 +199,13 @@ function resolve.gen_resolve_function_multihead(collection_name, connection, name = c.name, destination_collection = v.destination_collection, } + local opts = { + disable_dangling_check = disable_dangling_check, + } -- XXX: generate a function for each variant at schema generation time local result = resolve.gen_resolve_function(collection_name, - quazi_connection, destination_type, {}, accessor)(parent, {}, info) + quazi_connection, destination_type, {}, accessor, opts)( + parent, {}, info) -- This 'wrapping' is needed because we use 'select' on 'collection' -- GraphQL type and the result of the resolve function must be in diff --git a/graphql/convert_schema/types.lua b/graphql/convert_schema/types.lua index 5a9ad0a..dac999a 100644 --- a/graphql/convert_schema/types.lua +++ b/graphql/convert_schema/types.lua @@ -155,8 +155,11 @@ local function convert_simple_connection(state, connection, collection_name) extra = e_args, } + local opts = { + disable_dangling_check = state.disable_dangling_check, + } local resolve_function = resolve.gen_resolve_function(collection_name, c, - destination_type, arguments, state.accessor) + destination_type, arguments, state.accessor, opts) local field = { name = c.name, @@ -276,9 +279,12 @@ local function convert_multihead_connection(state, connection, collection_name, union_types[#union_types + 1] = variant_type end + local opts = { + disable_dangling_check = state.disable_dangling_check, + } local resolve_function = resolve.gen_resolve_function_multihead( collection_name, c, union_types, var_num_to_box_field_name, - state.accessor) + state.accessor, opts) local field = { name = c.name, diff --git a/graphql/impl.lua b/graphql/impl.lua index d5d3306..83ed138 100644 --- a/graphql/impl.lua +++ b/graphql/impl.lua @@ -222,34 +222,16 @@ end --- }, --- ... --- }, ---- accessor = setmetatable({}, { ---- __index = { ---- select = function(self, parent, collection_name, from, ---- object_args_instance, list_args_instance, extra) ---- -- * from has the following structure: ---- -- ---- -- { ---- -- collection_name = <...>, ---- -- connection_name = <...>, ---- -- destination_args_names = <...>, ---- -- destination_args_values = <...>, ---- -- } ---- -- ---- -- from.collection_name is nil for a top-level collection. ---- -- ---- -- `extra` is a table which contains additional data for ---- -- the query: ---- -- ---- -- * `qcontext` (table) can be used by an accessor to store ---- -- any query-related data; ---- -- * `resolveField(field_name, object, filter, opts)` ---- -- (function) for performing a subrequest on a fields ---- -- connected using a 1:1 connection. ---- -- ---- return ... ---- end, ---- } ---- }), +--- indexes = , +--- service_fields =
, +--- accessor =
or , +--- accessor_funcs =
, +--- collection_use_tomap = , +--- resulting_object_cnt_max = , +--- fetched_object_cnt_max = , +--- timeout_ms = , +--- enable_mutations = , +--- disable_dangling_check = , --- }) function impl.new(cfg) local cfg = cfg or {} @@ -279,7 +261,9 @@ function impl.new(cfg) cfg.indexes = cfg.accessor.indexes end - local state = {} + local state = { + disable_dangling_check = cfg.disable_dangling_check, + } convert_schema.convert(state, cfg) return setmetatable(state, { __index = { diff --git a/test/common/nullable_1_1_conn_nocheck.test.lua b/test/common/nullable_1_1_conn_nocheck.test.lua new file mode 100755 index 0000000..1d2b69b --- /dev/null +++ b/test/common/nullable_1_1_conn_nocheck.test.lua @@ -0,0 +1,20 @@ +#!/usr/bin/env tarantool + +local fio = require('fio') + +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") + :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path + +local test_utils = require('test.test_utils') +local testdata = require('test.testdata.nullable_1_1_conn_testdata') + +box.cfg({}) + +test_utils.run_testdata(testdata, { + graphql_opts = { + disable_dangling_check = true, + }, +}) + +os.exit() diff --git a/test/space/nested_args.test.lua b/test/space/nested_args.test.lua index 3f1329a..fadc8ab 100755 --- a/test/space/nested_args.test.lua +++ b/test/space/nested_args.test.lua @@ -36,7 +36,7 @@ local BODY_FN = avro_version == 3 and 5 or 7 for _, tuple in box.space.email:pairs() do local body = tuple[BODY_FN] - if body:match('^[xy]$') then + if body:match('^[xyz]$') then local key = {tuple[LOCALPART_FN], tuple[DOMAIN_FN]} box.space.email:delete(key) end diff --git a/test/testdata/nullable_1_1_conn_testdata.lua b/test/testdata/nullable_1_1_conn_testdata.lua index fc7f1a0..077d4b2 100644 --- a/test/testdata/nullable_1_1_conn_testdata.lua +++ b/test/testdata/nullable_1_1_conn_testdata.lua @@ -255,6 +255,17 @@ function nullable_1_1_conn_testdata.fill_test_data(virtbox, meta) in_reply_to_domain = box.NULL, body = 'y', }) + + -- Check dangling 1:1 connection. + local localpart = prng:next_string(16):hex() + local non_existent_localpart = prng:next_string(16):hex() + test_utils.replace_object(virtbox, meta, 'email', { + localpart = localpart, + domain = domain, + in_reply_to_localpart = non_existent_localpart, + in_reply_to_domain = DOMAIN, + body = 'z', + }) end function nullable_1_1_conn_testdata.drop_spaces() @@ -264,7 +275,7 @@ end function nullable_1_1_conn_testdata.run_queries(gql_wrapper) local test = tap.test('nullable_1_1_conn') - test:plan(5) + test:plan(7) -- {{{ downside traversal (1:N connections) @@ -339,10 +350,10 @@ function nullable_1_1_conn_testdata.run_queries(gql_wrapper) -- {{{ upside traversal (1:1 connections) local query_upside = [[ - query emails_trace_upside($body: String) { + query emails_trace_upside($body: String, $child_domain: String) { email(body: $body) { body - in_reply_to { + in_reply_to(domain: $child_domain) { body in_reply_to { body @@ -416,6 +427,52 @@ function nullable_1_1_conn_testdata.run_queries(gql_wrapper) exp_result.err = exp_result.err:gsub(', ', ',') test:is_deeply(result, exp_result, 'upside_y') + -- Check we get an error when trying to use dangling 1:1 connection. Check + -- we don't get this error when `disable_dangling_check` is set. + if gql_wrapper.disable_dangling_check then + local variables_upside_z = {body = 'z'} + local result = test_utils.show_trace(function() + return gql_query_upside:execute(variables_upside_z) + end) + + local exp_result = yaml.decode(([[ + --- + email: + - body: z + ]]):strip()) + + test:is_deeply(result, exp_result, 'upside_z disabled constraint check') + else + local variables_upside_z = {body = 'z'} + local ok, err = pcall(function() + return gql_query_upside:execute(variables_upside_z) + end) + + local result = {ok = ok, err = test_utils.strip_error(err)} + local exp_result = yaml.decode(([[ + --- + ok: false + err: "FULL MATCH constraint was failed: we expect 1 tuples, got 0" + ]]):strip()) + test:is_deeply(result, exp_result, 'upside_z constraint violation') + end + + -- We can got zero objects by 1:1 connection when use filters, it is not + -- violation of FULL MATCH constraint, because we found corresponding + -- tuple, but filter it then. + local variables_upside_f = {body = 'f', child_domain = 'non-existent'} + local result = test_utils.show_trace(function() + return gql_query_upside:execute(variables_upside_f) + end) + + local exp_result = yaml.decode(([[ + --- + email: + - body: f + ]]):strip()) + + test:is_deeply(result, exp_result, 'upside_f filter child') + assert(test:check(), 'check plan') end