Skip to content
This repository was archived by the owner on Apr 14, 2022. It is now read-only.

Remove 1:1* connections, relax constraints of 1:1 ones #178

Merged
merged 2 commits into from
Jun 20, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 53 additions & 5 deletions graphql/accessor_general.lua
Original file line number Diff line number Diff line change
Expand Up @@ -883,8 +883,7 @@ local function process_tuple(self, state, tuple, opts)
if obj[k] == nil then
local field_name = k
local sub_filter = v
local sub_opts = {dont_force_nullability = true}
local field = resolveField(field_name, obj, sub_filter, sub_opts)
local field = resolveField(field_name, obj, sub_filter)
if field == nil then return true end
obj[k] = field
-- XXX: Remove the value from a filter? But then we need to copy
Expand Down Expand Up @@ -991,6 +990,7 @@ local function select_internal(self, collection_name, from, filter, args, extra)
-- XXX: save type of args.offset at parsing and check here
-- check(args.offset, 'args.offset', ...)
check(args.pcre, 'args.pcre', 'table', 'nil')
check(extra.exp_tuple_count, 'extra.exp_tuple_count', 'number', 'nil')

local collection = self.collections[collection_name]
assert(collection ~= nil,
Expand Down Expand Up @@ -1044,6 +1044,18 @@ local function select_internal(self, collection_name, from, filter, args, extra)
resolveField = extra.resolveField,
}

-- assert that connection constraint applied only to objects got from the
-- index that underlies the connection
if extra.exp_tuple_count ~= nil then
local err = 'internal error: connection constraint (expected tuple ' ..
'count) cannot be applied to an index that is not under a ' ..
'connection'
assert(from.collection_name ~= nil, err)
assert(index ~= nil, err)
assert(pivot == nil or (pivot.value_list == nil and
pivot.filter ~= nil), err)
end

if index == nil then
-- fullscan
local primary_index = self.funcs.get_primary_index(self,
Expand Down Expand Up @@ -1088,21 +1100,38 @@ local function select_internal(self, collection_name, from, filter, args, extra)
iterator_opts.limit = args.limit
end

local tuple_count = 0

for _, tuple in index:pairs(index_value, iterator_opts) do
tuple_count = tuple_count + 1
-- check full match constraint
if extra.exp_tuple_count ~= nil and
tuple_count > extra.exp_tuple_count then
error(('FULL MATCH constraint was failed: we got more then ' ..
'%d tuples'):format(extra.exp_tuple_count))
end
local continue = process_tuple(self, select_state, tuple,
select_opts)
if not continue then break end
end

-- check full match constraint
if extra.exp_tuple_count ~= nil and
tuple_count ~= extra.exp_tuple_count then
error(('FULL MATCH constraint was failed: we expect %d tuples, ' ..
'got %d'):format(extra.exp_tuple_count, tuple_count))
end
end

local count = select_state.count
local objs = select_state.objs

assert(args.limit == nil or count <= args.limit,
('count[%d] exceeds limit[%s] (before return)'):format(
count, args.limit))
('internal error: selected objects count (%d) exceeds limit (%s)')
:format(count, args.limit))
assert(#objs == count,
('count[%d] is not equal to objs count[%d]'):format(count, #objs))
('internal error: selected objects count (%d) is not equal size of ' ..
'selected object list (%d)'):format(count, #objs))

return objs
end
Expand Down Expand Up @@ -1383,6 +1412,25 @@ end
--- @treturn table data accessor instance, a table with the two methods
--- (`select` and `arguments`) as described in the @{impl.new} function
--- description.
---
--- Brief explanation of some select function parameters:
---
--- * `from` (table or nil) is nil for a top-level collection or a table with
--- the following fields:
---
--- - collection_name
--- - connection_name
--- - destination_args_names
--- - destination_args_values
---
--- * `extra` (table) is a table which contains additional data for the query:
---
--- - `qcontext` (table) can be used by an accessor to store any
--- query-related data;
--- - `resolveField(field_name, object, filter, opts)` (function) for
--- performing a subrequest on a fields connected using a 1:1 connection.
--- - extra_args
--- - exp_tuple_count
function accessor_general.new(opts, funcs)
assert(type(opts) == 'table',
'opts must be a table, got ' .. type(opts))
Expand Down
66 changes: 11 additions & 55 deletions graphql/config_complement.lua
Original file line number Diff line number Diff line change
Expand Up @@ -9,64 +9,25 @@
local json = require('json')
local yaml = require('yaml')
local log = require('log')

local utils = require('graphql.utils')
local check = utils.check
local get_spaces_formats = require('graphql.simple_config').get_spaces_formats

local config_complement = {}

--- The function determines connection type by connection.parts
--- and source collection space format.
---
--- XXX Currently there are two possible situations when connection_parts form
--- unique index - all source_fields are nullable (1:1*) or all source_fields
--- are non nullable (1:1). In case of partially nullable connection_parts (which
--- form unique index) the error is raised. There is an alternative: relax
--- this requirement and deduce non-null connection type in the case.
local function determine_connection_type(connection_parts, index, source_space_format)
local type

if #connection_parts < #(index.fields) then
type = '1:N'
end

if #connection_parts == #(index.fields) then
if index.unique then
type = '1:1'
else
type = '1:N'
end
end

local is_all_nullable = true
local is_all_not_nullable = true

for _, connection_part in pairs(connection_parts) do
for _,field_format in ipairs(source_space_format) do
if connection_part.source_field == field_format.name then
if field_format.is_nullable == true then
is_all_not_nullable = false
else
is_all_nullable = false
end
end
end
end

if is_all_nullable == is_all_not_nullable and type == '1:1' then
error('source_fields in connection_parts must be all nullable or ' ..
'not nullable at the same time')
--- Determine connection type by connection.parts and index uniqueness.
local function determine_connection_type(connection_parts, index)
if #connection_parts < #index.fields then
return '1:N'
elseif #connection_parts == #index.fields then
return index.unique and '1:1' or '1:N'
end

if is_all_nullable and type == '1:1' then
type = '1:1*'
end

return type
error(('Connection parts count is more then index parts count: %d > %d')
:format(#connection_parts, #index.fields))
end

-- The function returns connection_parts sorted by destination_fields as
-- index_fields prefix.
-- Return connection_parts sorted by destination_fields as index_fields prefix.
local function sort_parts(connection_parts, index_fields)
local sorted_parts = {}

Expand Down Expand Up @@ -211,8 +172,6 @@ local function complement_connections(collections, connections, indexes)
check(collections, 'collections', 'table')
check(connections, 'connections', 'table')

local spaces_formats = get_spaces_formats()

for _, c in pairs(connections) do
check(c.name, 'connection.name', 'string')
check(c.source_collection, 'connection.source_collection', 'string')
Expand All @@ -230,10 +189,7 @@ local function complement_connections(collections, connections, indexes)
result_c.destination_collection = c.destination_collection
result_c.parts = determine_connection_parts(c.parts, index)

local source_space_format = spaces_formats[result_c.source_collection]

result_c.type = determine_connection_type(result_c.parts, index,
source_space_format)
result_c.type = determine_connection_type(result_c.parts, index)
result_c.index_name = c.index_name
result_c.name = c.name

Expand Down
55 changes: 25 additions & 30 deletions graphql/convert_schema/resolve.lua
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,10 @@ local function gen_from_parameter(collection_name, parent, connection)
}
end

-- Check FULL match constraint before request of
-- destination object(s). Note that connection key parts
-- can be prefix of index key parts. Zero parts count
-- considered as ok by this check.
--- Check FULL match constraint before request of destination object(s).
---
--- Note that connection key parts can be prefix of index key parts. Zero parts
--- count considered as ok by this check.
local function are_all_parts_null(parent, connection_parts)
local are_all_parts_null = true
local are_all_parts_non_null = true
Expand Down Expand Up @@ -84,8 +84,10 @@ local function separate_args_instance(args_instance, arguments)
end

function resolve.gen_resolve_function(collection_name, connection,
destination_type, arguments, accessor)
destination_type, arguments, accessor, opts)
local c = connection
local opts = opts or {}
local disable_dangling_check = opts.disable_dangling_check or false
local bare_destination_type = core_types.bare(destination_type)

-- capture `bare_destination_type`
Expand All @@ -106,37 +108,30 @@ function resolve.gen_resolve_function(collection_name, connection,
local opts = opts or {}
assert(type(opts) == 'table',
'opts must be nil or a table, got ' .. type(opts))
local dont_force_nullability =
opts.dont_force_nullability or false
assert(type(dont_force_nullability) == 'boolean',
'opts.dont_force_nullability ' ..
'must be nil or a boolean, got ' ..
type(dont_force_nullability))
-- no opts for now

local from = gen_from_parameter(collection_name, parent, c)

-- Avoid non-needed index lookup on a destination collection when
-- all connection parts are null:
-- * return null for 1:1* connection;
-- * return null for 1:1 connection;
-- * return {} for 1:N connection (except the case when source
-- collection is the query or the mutation pseudo-collection).
if collection_name ~= nil and are_all_parts_null(parent, c.parts) then
if c.type ~= '1:1*' and c.type ~= '1:N' then
-- `if` is to avoid extra json.encode
assert(c.type == '1:1*' or c.type == '1:N',
('only 1:1* or 1:N connections can have ' ..
'all key parts null; parent is %s from ' ..
'collection "%s"'):format(json.encode(parent),
tostring(collection_name)))
end
return c.type == '1:N' and {} or nil
end

local exp_tuple_count
if not disable_dangling_check and c.type == '1:1' then
exp_tuple_count = 1
end

local resolveField = genResolveField(info)
local extra = {
qcontext = info.qcontext,
resolveField = resolveField, -- for subrequests
extra_args = {},
exp_tuple_count = exp_tuple_count,
}

-- object_args_instance will be passed to 'filter'
Expand All @@ -152,22 +147,18 @@ function resolve.gen_resolve_function(collection_name, connection,
assert(type(objs) == 'table',
'objs list received from an accessor ' ..
'must be a table, got ' .. type(objs))
if c.type == '1:1' or c.type == '1:1*' then
-- we expect here exactly one object even for 1:1*
-- connections because we processed all-parts-are-null
-- situation above
assert(#objs == 1 or dont_force_nullability,
'expect one matching object, got ' ..
tostring(#objs))
return objs[1]
if c.type == '1:1' then
return objs[1] -- nil for empty list of matching objects
else -- c.type == '1:N'
return objs
end
end
end

function resolve.gen_resolve_function_multihead(collection_name, connection,
union_types, var_num_to_box_field_name, accessor)
union_types, var_num_to_box_field_name, accessor, opts)
local opts = opts or {}
local disable_dangling_check = opts.disable_dangling_check or false
local c = connection

local determinant_keys = utils.get_keys(c.variants[1].determinant)
Expand Down Expand Up @@ -208,9 +199,13 @@ function resolve.gen_resolve_function_multihead(collection_name, connection,
name = c.name,
destination_collection = v.destination_collection,
}
local opts = {
disable_dangling_check = disable_dangling_check,
}
-- XXX: generate a function for each variant at schema generation time
local result = resolve.gen_resolve_function(collection_name,
quazi_connection, destination_type, {}, accessor)(parent, {}, info)
quazi_connection, destination_type, {}, accessor, opts)(
parent, {}, info)

-- This 'wrapping' is needed because we use 'select' on 'collection'
-- GraphQL type and the result of the resolve function must be in
Expand Down
13 changes: 7 additions & 6 deletions graphql/convert_schema/schema.lua
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ local schema = {}
--- * DONE: Move avro-schema -> GraphQL arguments translating into its own
--- module.
--- * DONE: Support a sub-record arguments and others (union, array, ...).
--- * TBD: Generate arguments for cartesian product of {1:1, 1:1*, 1:N, all} x
--- * TBD: Generate arguments for cartesian product of {1:1, 1:N, all} x
--- {query, mutation, all} x {top-level, nested, all} x {collections}.
--- * TBD: Use generated arguments in GraphQL types (schema) generation.
---
Expand Down Expand Up @@ -138,7 +138,8 @@ local function create_root_collection(state)
})
end

--- Execute a function for each 1:1 or 1:1* connection of each collection.
--- Execute a function for each connection of one of specified types in each
--- collection.
---
--- @tparam table state tarantool_graphql instance
---
Expand All @@ -160,7 +161,7 @@ local function for_each_connection(state, connection_types, func)
end
end

--- Add arguments corresponding to 1:1 and 1:1* connections (nested filters).
--- Add arguments corresponding to 1:1 connections (nested filters).
---
--- @tparam table state graphql_tarantool instance
local function add_connection_arguments(state)
Expand All @@ -169,8 +170,8 @@ local function add_connection_arguments(state)
-- map source collection and connection name to an input object
local lookup_input_objects = {}

-- create InputObjects for each 1:1 or 1:1* connection of each collection
for_each_connection(state, {'1:1', '1:1*'}, function(collection_name, c)
-- create InputObjects for each 1:1 connection of each collection
for_each_connection(state, {'1:1'}, function(collection_name, c)
-- XXX: support multihead connections
if c.variants ~= nil then return end

Expand All @@ -195,7 +196,7 @@ local function add_connection_arguments(state)

-- update fields of collection arguments and input objects with other input
-- objects
for_each_connection(state, {'1:1', '1:1*'}, function(collection_name, c)
for_each_connection(state, {'1:1'}, function(collection_name, c)
-- XXX: support multihead connections
if c.variants ~= nil then return end

Expand Down
Loading