diff --git a/graphql/accessor_general.lua b/graphql/accessor_general.lua index 02c68a6..3d9703b 100644 --- a/graphql/accessor_general.lua +++ b/graphql/accessor_general.lua @@ -839,7 +839,7 @@ local function process_tuple(state, tuple, opts) -- convert tuple -> object local obj = opts.unflatten_tuple(collection_name, tuple, - opts.default_unflatten_tuple) + { use_tomap = opts.use_tomap }, opts.default_unflatten_tuple) -- skip all items before pivot (the item pointed by offset) if not state.pivot_found and pivot_filter then @@ -987,6 +987,7 @@ local function select_internal(self, collection_name, from, filter, args, extra) fetched_object_cnt_max = self.settings.fetched_object_cnt_max, collection_name = collection_name, unflatten_tuple = self.funcs.unflatten_tuple, + use_tomap = self.collection_use_tomap[collection_name] or false, default_unflatten_tuple = default_unflatten_tuple, pcre = args.pcre, resolveField = extra.resolveField, @@ -1196,7 +1197,10 @@ end --- Provided `funcs` argument determines certain functions for retrieving --- tuples. --- ---- @tparam table opts `schemas`, `collections`, `service_fields` and `indexes` +--- @tparam table opts `schemas`, `collections`, `service_fields`, `indexes` and +--- `collection_use_tomap` ({[collection_name] = whether objects in collection +--- collection_name intended to be unflattened using tuple:tomap({names_only = true}) +--- method instead of compiled_avro_schema.unflatten(tuple), ...}) --- to give the data accessor all needed meta-information re data; the format is --- shown below; additional attributes `resulting_object_cnt_max` and --- `fetched_object_cnt_max` are optional positive numbers which help to control @@ -1301,6 +1305,7 @@ function accessor_general.new(opts, funcs) indexes = indexes, models = models, default_unflatten_tuple = default_unflatten_tuple, + collection_use_tomap = opts.collection_use_tomap or {}, index_cache = index_cache, funcs = funcs, settings = { diff --git a/graphql/accessor_shard.lua b/graphql/accessor_shard.lua index 4481cb3..b7371ae 100644 --- a/graphql/accessor_shard.lua +++ b/graphql/accessor_shard.lua @@ -84,12 +84,20 @@ end --- --- @tparam string collection_name --- @tparam cdata/table tuple +--- @tparam table opts +--- * `use_tomap` (boolean, default: false; whether objects in collection +--- collection_name intended to be unflattened using tuple:tomap({names_only = true}) +--- method instead of compiled_avro_schema.unflatten(tuple) --- @tparam function default unflatten action, call it in the following way: --- ---- ``` ---- return default(collection_name, tuple) ---- ``` -local function unflatten_tuple(collection_name, tuple, default) +--- +--- return default(collection_name, tuple) +--- +local function unflatten_tuple(collection_name, tuple, opts, default) + if opts.use_tomap then + return tuple:tomap({ names_only = true }) + end + return default(collection_name, tuple) end diff --git a/graphql/accessor_space.lua b/graphql/accessor_space.lua index b520d1c..0e0913f 100644 --- a/graphql/accessor_space.lua +++ b/graphql/accessor_space.lua @@ -26,12 +26,20 @@ end --- --- @tparam string collection_name --- @tparam cdata/table tuple +--- @tparam table opts +--- @tparam table opts +--- * `use_tomap` (boolean, default: false; whether objects in collection +--- collection_name intended to be unflattened using tuple:tomap({names_only = true}) +--- method instead of compiled_avro_schema.unflatten(tuple) --- @tparam function default unflatten action, call it in the following way: --- ---- ``` ---- return default(collection_name, tuple) ---- ``` -local function unflatten_tuple(collection_name, tuple, default) +--- +--- return default(collection_name, tuple) +--- +local function unflatten_tuple(collection_name, tuple, opts, default) + if opts.use_tomap then + return tuple:tomap({ names_only = true }) + end return default(collection_name, tuple) end diff --git a/graphql/config_complement.lua b/graphql/config_complement.lua new file mode 100644 index 0000000..2c89e31 --- /dev/null +++ b/graphql/config_complement.lua @@ -0,0 +1,255 @@ +--- Config complement module provides an ability to complement user-defined config +--- (in a simplified format) to a fully specified format. +--- +--- Notes: +--- +--- * Currently the module complements only connections (cfg.connections), +--- see @{complement_connections}. + +local json = require('json') +local yaml = require('yaml') +local log = require('log') +local utils = require('graphql.utils') +local check = utils.check +local get_spaces_formats = require('graphql.simple_config').get_spaces_formats + +local config_complement = {} + +--- The function determines connection type by connection.parts +--- and source collection space format. +--- +--- XXX Currently there are two possible situations when connection_parts form +--- unique index - all source_fields are nullable (1:1*) or all source_fields +--- are non nullable (1:1). In case of partially nullable connection_parts (which +--- form unique index) the error is raised. There is an alternative: relax +--- this requirement and deduce non-null connection type in the case. +local function determine_connection_type(connection_parts, index, source_space_format) + local type + + if #connection_parts < #(index.fields) then + type = '1:N' + end + + if #connection_parts == #(index.fields) then + if index.unique then + type = '1:1' + else + type = '1:N' + end + end + + local is_all_nullable = true + local is_all_not_nullable = true + + for _, connection_part in pairs(connection_parts) do + for _,field_format in ipairs(source_space_format) do + if connection_part.source_field == field_format.name then + if field_format.is_nullable == true then + is_all_not_nullable = false + else + is_all_nullable = false + end + end + end + end + + if is_all_nullable == is_all_not_nullable and type == '1:1' then + error('source_fields in connection_parts must be all nullable or ' .. + 'not nullable at the same time') + end + + if is_all_nullable and type == '1:1' then + type = '1:1*' + end + + return type +end + +-- The function returns connection_parts sorted by destination_fields as +-- index_fields prefix. +local function sort_parts(connection_parts, index_fields) + local sorted_parts = {} + + -- check if fields in connection_parts exist in index_fields + for _, part in ipairs(connection_parts) do + local is_found = false + for i, index_field in ipairs(index_fields) do + if part.destination_field == index_field then + is_found = true + end + end + assert(is_found, ('part.destination_field %s was not found in ' .. + 'connection index %s'):format(part.destination_field, + json.encode(index_fields))) + end + + -- sort parts and check that sorted_parts form index prefix + -- (including index itself) + for i = 1, utils.table_size(connection_parts) do + local index_field = index_fields[i] + for _, part in ipairs(connection_parts) do + if part.destination_field == index_field then + sorted_parts[i] = {destination_field = part.destination_field, + source_field = part.source_field} + break + end + -- no match found + error(('given parts %s does not form an index or an index ' .. + 'prefix %s'):format(json.encode(connection_parts), + json.encode(index_fields))) + end + end + return sorted_parts +end + +local function is_order_different(connection_parts, index_fields) + for i, _ in ipairs(connection_parts) do + if connection_parts[i].destination_field ~= index_fields[i] then + return true + end + end + return false +end + +--- The function complements partially defined (nil/number) connection parts +--- or check and sort fully defined (table) connection parts. +--- @tparam table parts partially defined connection's part given by user +--- @tparam table index connection index (cfg.indexes[collection][index_name]) +--- index.fields will be used as the source of information about the index parts order. +--- An error will be raised in cases when parts is a table and cannot form a +--- prefix of index.fields. When parts can be resorted to fit right order, they +--- will be resorted. +local function determine_connection_parts(parts, index) + check(parts, 'parts', 'nil', 'number', 'table') + local result_parts = {} + + -- User defined no parts of the connection. All connection's index fields + -- are taken as 'parts' + if type(parts) == 'nil' then + for i, v in ipairs(index.fields) do + result_parts[i] = {source_field = v, destination_field = v} + end + end + + -- User defined a number of fields of index which must form index prefix. + -- First 'number' index fields are taken as 'parts' + if type(parts) == 'number' then + for i = 1, parts do + local v = index.fields[i] + result_parts[i] = {source_field = v, destination_field = v} + end + end + + -- User defined parts as pairs of {source_field: foo_field, + -- destination_field: boo_field}. These 'parts' may correspond either to full + -- index or index prefix + if type(parts) == 'table' then + -- sorting parts is necessary to check if user defined part form an + -- index or an index prefix + if is_order_different(parts, index) then + log.warn(('Parts \n %s \n were given in the wrong order and ' .. + 'sorted to match the right order of destination collection ' .. + 'index fields \n %s \n'):format(yaml.encode(parts), + yaml.encode(index.fields))) + result_parts = sort_parts(parts, index.fields) + else + result_parts = parts + end + end + + return result_parts +end + +--- The function complements collections' connections, described in simplified +--- format, to connections in a fully specified format. Type determined on index type. +--- Each connection will be added to a `source_collection' collection, +--- because the format of a collection assumes inclusion of all outcoming connections. +--- Notice an example: +--- +--- "connections" : [ +--- { +--- "name": "order_connection", +--- "source_collection": "user_collection", +--- "destination_collection": "order_collection" +--- "index_name": "user_id_index", +--- "parts" : nil | number | table (destination fields can be omitted) +--- in case of 'table' expected format is: +--- "parts": [ +--- {"source_field": "user_id", "destination_field": "user_id"}, +--- ... +--- ] +--- }, +--- ... +--- ] +--- +--- will produce following complement in 'user_collection' : +--- +--- "user_collection": { +--- "schema_name": "user", +--- "connections": [ +--- { +--- "type": "1:N", +--- "name": "order_connection", +--- "destination_collection": "order_collection", +--- "parts": [ +--- { "source_field": "user_id", "destination_field": "user_id" } +--- ], +--- "index_name": "user_id_index" +--- }, +--- ] +--- } +--- +--- @tparam table collections cfg.collections (will be changed in place) +--- @tparam table connections cfg.connections - user-defined collections +--- @tparam table indexes cfg.indexes - {[collection_name] = collection_indexes, ...} +--- @treturn table `collections` is complemented collections +local function complement_connections(collections, connections, indexes, schemas) + if connections == nil then + return collections + end + + check(collections, 'collections', 'table') + check(connections, 'connections', 'table') + + local spaces_formats = get_spaces_formats() + + for _, c in pairs(connections) do + check(c.name, 'connection.name', 'string') + check(c.source_collection, 'connection.source_collection', 'string') + check(c.destination_collection, 'connection.destination_collection', + 'string') + check(c.index_name, 'connection.index_name', 'string') + check(c.parts, 'connection.parts', 'string', 'table', 'nil') + + local index = indexes[c.source_collection][c.index_name] + assert(index.unique ~= nil, 'index.unique must not be nil ' .. + 'during connections complementing') + + local result_c = {} + result_c.source_collection = c.source_collection + result_c.destination_collection = c.destination_collection + result_c.parts = determine_connection_parts(c.parts, index) + + local source_space_format = spaces_formats[result_c.source_collection] + + result_c.type = determine_connection_type(result_c.parts, index, + source_space_format) + result_c.index_name = c.index_name + result_c.name = c.name + + local collection_connections = collections[c.source_collection]. + connections or {} + collection_connections[#collection_connections + 1] = result_c + end + return collections +end + +--- The function complements cfg.collection.connections using given +--- cfg.connections. See @{complement_connections} for details. +function config_complement.complement_cfg(cfg) + cfg.collections = complement_connections(cfg.collections, cfg.connections, + cfg.indexes) + return cfg +end + +return config_complement diff --git a/graphql/simple_config.lua b/graphql/simple_config.lua new file mode 100644 index 0000000..103ebf3 --- /dev/null +++ b/graphql/simple_config.lua @@ -0,0 +1,196 @@ +--- The simple config module provides an ability to generate config (cfg) for +--- tarantool_graphql using tarantool meta-information. +--- +------ Explanation: +--- +--- * To make use of it you must specify tarantool tuples' format during space +--- creation passing or after it using space_object:format(). Spaces with no +--- formats (both 'name and 'type' fields must be filled) will be ignored. +--- Resulting schemas lack fields of the following types: 'record', 'array' +--- and 'map'. Resulting collections have no connections. Schemas and +--- collections may be complemented. + +local check = require('graphql.utils').check + +local simple_config = {} + +--- The functions tells if given space is a tarantool system space or not. +--- It relies on tarantool implementation's details. The source of the function is +--- space_is_system() in tarantool/src/box/schema.cc +local function is_system_space(space) + local BOX_SYSTEM_ID_MIN = 256 + local BOX_SYSTEM_ID_MAX = 511 + local space_id = space[1] + return (BOX_SYSTEM_ID_MIN < space_id and space_id < BOX_SYSTEM_ID_MAX) +end + +--- The functions converts given tarantool tuple's (received from space:format()) +--- field type into avro-schema type. Notes on tarantool tuple's field type can +--- be found at https://tarantool.org/en/doc/2.0/book/box/data_model.html#indexed-field-types +--- +--- XXX scalar type conversion is not implemented yet. Consider using +--- avro unions to implement it. +local function convert_index_type_to_avro(index_type, is_nullable) + -- unsigned | string | integer | number | boolean | array | scalar + check(index_type, 'index_type', 'string') + + if index_type == 'scalar' then + error('scalar type conversion (tarantool types -> avro) is not ' .. + 'implemented yet') + end + + local index_type_to_avro_type = + {unsigned = 'long', string = 'string', integer = 'long', + number = 'double', boolean = 'boolean', + array = {type = 'array', items = 'long'}} + + local result = index_type_to_avro_type[index_type] + + assert(result, 'index type to avro type conversion failed, as there ' .. + 'were no match for type ' .. index_type) + + if is_nullable then + return result .. '*' + else + return result + end +end + +--- The function generates avro schema using given space format. Note that +--- space format is a flat format so no nested schemas (e.g record inside record) +--- can be generated. +local function generate_avro_schema(space_format, schema_name) + check(space_format, 'space_format', 'table') + check(schema_name, 'schema_name', 'string') + + local avro_schema = {type = 'record', name = schema_name, fields = {}} + for i, f in ipairs(space_format) do + check(f.name, 'field format name', 'string') + check(f.type, 'field format type', 'string') + if not (i == 0 or f.type == 'any') then + avro_schema.fields[#avro_schema.fields + 1] = + {name = f.name, type = convert_index_type_to_avro(f.type, f.is_nullable)} + end + end + return avro_schema +end + +--- XXX currently only TREE and HASH tarantool index types are supported +local function convert_index_type(index_type) + assert(type(index_type) == 'string', 'index type must be string, got ' .. + type(index_type)) + local index_types = {TREE = 'tree', HASH = 'hash'} + local result = index_types[index_type] + assert(result, 'index type conversion (from tarantool to graphQL) ' .. + 'failed, as there were no match for type ' .. index_type) + return result +end + +local function extract_collection_indexes(space_name, space_format) + local collection_indexes = {} + local i = 0 + local index = box.space[space_name].index[i] + while index ~= nil do + local collection_index = {} + collection_index.index_type = convert_index_type(index.type) + collection_index.unique = index.unique + + collection_index.primary = (i == 0) + + collection_index.service_fields = {} + collection_index.fields = {} + + for i, part in ipairs(index.parts) do + collection_index.fields[i] = + space_format[part.fieldno].name + end + + collection_indexes[index.name] = collection_index + + i = i + 1 + index = box.space[space_name].index[i] + end + return collection_indexes +end + +local function generate_collection(space_name) + return { schema_name = space_name, connections = {} } +end + +--- Tarantool space's format may be defined in different ways: +--- {{'x', 'unsigned', true}, ...} +--- {{name = 'x', type = 'unsigned', is_nullable = true}, ...} +--- {{'x', type = 'unsigned'}, ...} +--- All these ways have the same meaning. The function converts all these +--- formats into the single one: +--- {{name = 'x', type = 'unsigned', is_nullable = true}, ...} +local function unify_format(space_format) + local resulting_format = {} + for i, field_format in ipairs(space_format) do + resulting_format[i] = {} + resulting_format[i].name = field_format[1] or field_format.name + resulting_format[i].type = field_format[2] or field_format.type + resulting_format[i].is_nullable = field_format[3] or field_format.is_nullable + resulting_format[i].is_nullable = resulting_format[i].is_nullable or false + end + return resulting_format +end + +local function is_fully_defined(space_format) + for _, f in ipairs(space_format) do + if f.name == nil or f.type == nil or f.is_nullable == nil then + return false + end + end + return true +end + +--- The function returns formats of all fully defined spaces. +--- Spaces are taken from the tarantool instance in which +--- tarantool graphql is launched. For definition of fully +--- defined spaces see @{is_fully_defined}. +--- +--- @treturn table spaces_formats {[space_name] = {space_format}, ...} +--- where space_format is {{first_field_format}, {second_field_format}, ...} +--- and field_format is {[name] = name_string, [type] = type_string, +--- [is_nullable] = boolean_flag}' +function simple_config.get_spaces_formats() + local spaces_formats = {} + local FORMAT = 7 + local NAME = 3 + for _, s in box.space._space:pairs() do + if not is_system_space(s) then + local space_format = unify_format(s[FORMAT]) + if is_fully_defined(space_format) then + spaces_formats[s[NAME]] = space_format + end + end + end + return spaces_formats +end + +--- The function creates a tarantool graphql config using tarantool metainfo +--- from space:format() and space.index:format(). Notice that this function +--- does not set accessor. +--- @treturn table cfg with `schemas`, `collections`, `has_space_format`, +--- `service_fields` (empty table), `indexes` +function simple_config.graphql_cfg_from_tarantool() + local cfg = {} + cfg.schemas = {} + cfg.service_fields = {} + cfg.indexes = {} + cfg.collections = {} + cfg.collection_use_tomap = {} + + for space_name, space_format in pairs(simple_config.get_spaces_formats()) do + cfg.schemas[space_name] = generate_avro_schema(space_format, space_name) + cfg.indexes[space_name] = + extract_collection_indexes(space_name, space_format) + cfg.service_fields[space_name] = {} + cfg.collections[space_name] = generate_collection(space_name) + cfg.collection_use_tomap[space_name] = true + end + return cfg +end + +return simple_config diff --git a/graphql/tarantool_graphql.lua b/graphql/tarantool_graphql.lua index 8c220a4..394e871 100644 --- a/graphql/tarantool_graphql.lua +++ b/graphql/tarantool_graphql.lua @@ -14,17 +14,17 @@ --- behaves this way. So 'common fields' are not supported. This does NOT --- work: --- ---- ``` ---- hero { ---- hero_id -- common field; does NOT work ---- ... on human { ---- name ---- } ---- ... on droid { ---- model +--- hero { +--- hero_id -- common field; does NOT work +--- ... on human { +--- name +--- } +--- ... on droid { +--- model +--- } --- } ---- } ---- ``` +--- +--- --- --- (GraphQL spec: http://facebook.github.io/graphql/October2016/#sec-Unions) --- Also, no arguments are currently allowed for fragments. @@ -33,16 +33,25 @@ local json = require('json') local yaml = require('yaml') +local accessor_space = require('graphql.accessor_space') +local accessor_shard = require('graphql.accessor_shard') local parse = require('graphql.core.parse') local schema = require('graphql.core.schema') local types = require('graphql.core.types') local validate = require('graphql.core.validate') local execute = require('graphql.core.execute') local query_to_avro = require('graphql.query_to_avro') +local simple_config = require('graphql.simple_config') +local config_complement = require('graphql.config_complement') local utils = require('graphql.utils') +local check = utils.check local tarantool_graphql = {} +-- instance of tarantool graphql to provide graphql:compile() and +-- graphql:execute() method (with creating zero configuration graphql instance +-- under hood when calling compile() for the first time) +local default_instance -- forward declarations local gql_type @@ -1031,6 +1040,59 @@ local function gql_compile(state, query) return gql_query end +function tarantool_graphql.compile(query) + if default_instance == nil then + default_instance = tarantool_graphql.new() + end + return default_instance.compile(query) +end + +function tarantool_graphql.execute(query, variables) + local compiled_query = tarantool_graphql.compile(query) + return compiled_query.execute(variables) +end + +--- The function creates an accessor of desired type with default configuration. +--- +--- @tparam table cfg general tarantool_graphql config (contains schemas, +--- collections, service_fields and indexes) +--- @tparam string accessor type of desired accessor (space or shard) +--- @tparam table accessor_funcs set of functions to overwrite accessor +--- inner functions (`is_collection_exists`, `get_index`, `get_primary_index`, +--- `unflatten_tuple`, For more detailed description see @{accessor_general.new}) +--- These function allow this abstract data accessor behaves in the certain way. +--- Note that accessor_space and accessor_shard have their own set of these functions +--- and accessorFuncs argument (if passed) will be used to overwrite them +local function create_default_accessor(cfg) + check(cfg.accessor, 'cfg.accessor', 'string') + assert(cfg.accessor == 'space' or cfg.accessor == 'shard', + 'accessor_type must be shard or space, got ' .. cfg.accessor) + check(cfg.service_fields, 'cfg.service_fields', 'table') + check(cfg.indexes, 'cfg.indexes', 'table') + check(cfg.collection_use_tomap, 'cfg.collection_use_tomap', 'table', 'nil') + check(cfg.accessor_funcs, 'cfg.accessor_funcs', 'table', 'nil') + + if cfg.accessor == 'space' then + return accessor_space.new({ + schemas = cfg.schemas, + collections = cfg.collections, + service_fields = cfg.service_fields, + indexes = cfg.indexes, + collection_use_tomap = cfg.collection_use_tomap + }, cfg.accessor_funcs) + end + + if cfg.accessor == 'shard' then + return accessor_shard.new({ + schemas = cfg.schemas, + collections = cfg.collections, + service_fields = cfg.service_fields, + indexes = cfg.indexes, + collection_use_tomap = cfg.collection_use_tomap + }, cfg.accessor_funcs); + end +end + --- Create a tarantool_graphql library instance. --- --- Usage: @@ -1104,10 +1166,29 @@ end --- }), --- }) function tarantool_graphql.new(cfg) + local cfg = cfg or {} + + -- auto config case + if not next(cfg) or utils.has_only(cfg, 'connections') then + local generated_cfg = simple_config.graphql_cfg_from_tarantool() + generated_cfg.accessor = 'space' + generated_cfg.connections = cfg.connections or {} + cfg = generated_cfg + cfg = config_complement.complement_cfg(cfg) + end + + check(cfg.accessor, 'cfg.accessor', 'string', 'table') + if type(cfg.accessor) == 'string' then + cfg.accessor = create_default_accessor(cfg) + end + local state = parse_cfg(cfg) return setmetatable(state, { __index = { compile = gql_compile, + internal = { -- for unit testing + cfg = cfg, + } } }) end diff --git a/graphql/utils.lua b/graphql/utils.lua index 2c37c2c..89def8c 100644 --- a/graphql/utils.lua +++ b/graphql/utils.lua @@ -170,7 +170,6 @@ function utils.do_have_keys(table, keys) return true end - --- Check if passed obj has one of passed types. --- @tparam table obj to check --- @tparam {type_1, type_2} ... possible types @@ -184,10 +183,25 @@ function utils.check(obj, obj_name, type_1, type_2, type_3) type_2, type_3, type(obj))) elseif type_2 ~= nil then error(('%s must be a %s or a %, got %s'):format(obj_name, type_1, - type_2, type(obj))) + type_2, type(obj))) else error(('%s must be a %s, got %s'):format(obj_name, type_1, type(obj))) end end +--- Check if given table has only one specific key. +function utils.has_only(t, key) + local fst_key = next(t) + local snd_key = next(t, fst_key) + return fst_key == key and snd_key == nil +end + +function utils.table_size(t) + local count = 0 + for _, _ in pairs(t) do + count = count + 1 + end + return count +end + return utils diff --git a/test/local/complemented_config.result b/test/local/complemented_config.result new file mode 100644 index 0000000..ca3105d --- /dev/null +++ b/test/local/complemented_config.result @@ -0,0 +1,87 @@ +RESULT +--- +user_collection: +- user_id: user_id_1 + age: 42 + name: Ivan + order_connection: + order_id: order_id_1 + description: Ivan order +... +RESULT +--- +schemas: + user_collection: + type: record + name: user_collection + fields: + - name: user_id + type: string + - name: name + type: string + - name: age + type: long* + order_collection: + type: record + name: order_collection + fields: + - name: order_id + type: string + - name: user_id + type: string + - name: description + type: string +connections: +- index_name: user_id_index + destination_collection: order_collection + name: order_connection + source_collection: user_collection +indexes: + user_collection: + user_id_index: + unique: true + primary: true + service_fields: [] + fields: + - user_id + index_type: tree + order_collection: + order_id_index: + unique: true + primary: true + service_fields: [] + fields: + - order_id + index_type: tree + user_id_index: + unique: true + primary: false + service_fields: [] + fields: + - user_id + index_type: tree +collections: + user_collection: + schema_name: user_collection + connections: + - destination_collection: order_collection + parts: + - source_field: user_id + destination_field: user_id + type: 1:1 + index_name: user_id_index + name: order_connection + source_collection: user_collection + name: user_collection + order_collection: + schema_name: order_collection + connections: [] + name: order_collection +collection_use_tomap: + user_collection: true + order_collection: true +service_fields: + user_collection: [] + order_collection: [] +... + diff --git a/test/local/complemented_config.test.lua b/test/local/complemented_config.test.lua new file mode 100755 index 0000000..a348af3 --- /dev/null +++ b/test/local/complemented_config.test.lua @@ -0,0 +1,97 @@ +#!/usr/bin/env tarantool + +--local json = require('json') +local yaml = require('yaml') +local utils = require('graphql.utils') +local graphql = require('graphql') + +local connections = { + { + name='order_connection', + source_collection = 'user_collection', + destination_collection = 'order_collection', + index_name = 'user_id_index' + } +} + +local function init_spaces() + box.once('test_space_init_spaces', function() + box.schema.create_space('user_collection') + box.space.user_collection:format({{name='user_id', type='string'}, + {name='name', type='string'}, + {name='age', type='integer', is_nullable=true}}) + box.space.user_collection:create_index('user_id_index', + {type = 'tree', unique = true, parts = { 1, 'string' }}) + + box.schema.create_space('order_collection') + box.space.order_collection:format({{name='order_id', type='string'}, + {name='user_id', type='string'}, + {name='description', type='string'}}) + box.space.order_collection:create_index('order_id_index', + {type = 'tree', parts = { 1, 'string' }}) + box.space.order_collection:create_index('user_id_index', + {type = 'tree', parts = { 2, 'string' }}) + end) +end + +local function fill_test_data(shard) + local shard = shard or box.space + + shard.user_collection:replace( + {'user_id_1', 'Ivan', 42}) + shard.user_collection:replace( + {'user_id_2', 'Vasiliy'}) + + shard.order_collection:replace( + {'order_id_1', 'user_id_1', 'Ivan order'}) + shard.order_collection:replace( + {'order_id_2', 'user_id_2', 'Vasiliy order'}) +end + +local function drop_spaces() + box.space._schema:delete('oncetest_space_init_spaces') + box.space.user_collection:drop() + box.space.order_collection:drop() +end + + +local function run_queries(gql_wrapper) + local results = '' + + local query_1 = [[ + query user_order($user_id: String) { + user_collection(user_id: $user_id) { + user_id + age + name + order_connection{ + order_id + description + } + } + } + ]] + + local variables_1 = {user_id = 'user_id_1'} + local gql_query_1 = gql_wrapper:compile(query_1) + local result = gql_query_1:execute(variables_1) + results = results .. ('RESULT\n%s'):format(yaml.encode(result)) + + local cfg = gql_wrapper.internal.cfg + cfg.accessor = nil + local result = cfg + results = results .. ('RESULT\n%s'):format(yaml.encode(result)) + + return results +end + +utils.show_trace(function() + box.cfg { background = false } + init_spaces() + fill_test_data() + local gql_wrapper = graphql.new({connections = connections}) + print(run_queries(gql_wrapper)) + drop_spaces() +end) + +os.exit() diff --git a/test/local/simple_config.result b/test/local/simple_config.result new file mode 100644 index 0000000..69cd2eb --- /dev/null +++ b/test/local/simple_config.result @@ -0,0 +1,232 @@ +RESULT +--- +order_collection: +- order_id: order_id_1 + description: first order of Ivan + user_connection: + user_id: user_id_1 + last_name: Ivanov + first_name: Ivan +... + +RESULT +--- +user_collection: +- user_id: user_id_1 + last_name: Ivanov + first_name: Ivan + order_connection: + - order_id: order_id_1 + description: first order of Ivan + - order_id: order_id_2 + description: second order of Ivan +... + +RESULT +--- +user_collection: +- user_id: user_id_42 + last_name: last name 42 + first_name: first name 42 + order_connection: + - order_id: order_id_1574 + description: order of user 42 + - order_id: order_id_1575 + description: order of user 42 + - order_id: order_id_1576 + description: order of user 42 + - order_id: order_id_1577 + description: order of user 42 + - order_id: order_id_1578 + description: order of user 42 + - order_id: order_id_1579 + description: order of user 42 + - order_id: order_id_1580 + description: order of user 42 + - order_id: order_id_1581 + description: order of user 42 + - order_id: order_id_1582 + description: order of user 42 + - order_id: order_id_1583 + description: order of user 42 +... + +RESULT +--- +user_collection: +- user_id: user_id_42 + last_name: last name 42 + first_name: first name 42 + order_connection: + - order_id: order_id_1602 + description: order of user 42 + - order_id: order_id_1603 + description: order of user 42 +... + +RESULT +--- +user_collection: +- user_id: user_id_42 + last_name: last name 42 + first_name: first name 42 + order_connection: + - order_id: order_id_1603 + description: order of user 42 +... + +RESULT +--- +user_collection: +- user_id: user_id_42 + last_name: last name 42 + first_name: first name 42 + order_connection: + - order_id: order_id_1564 + description: order of user 42 + - order_id: order_id_1565 + description: order of user 42 + - order_id: order_id_1566 + description: order of user 42 + - order_id: order_id_1567 + description: order of user 42 + - order_id: order_id_1568 + description: order of user 42 + - order_id: order_id_1569 + description: order of user 42 + - order_id: order_id_1570 + description: order of user 42 + - order_id: order_id_1571 + description: order of user 42 + - order_id: order_id_1572 + description: order of user 42 + - order_id: order_id_1573 + description: order of user 42 + - order_id: order_id_1574 + description: order of user 42 + - order_id: order_id_1575 + description: order of user 42 + - order_id: order_id_1576 + description: order of user 42 + - order_id: order_id_1577 + description: order of user 42 + - order_id: order_id_1578 + description: order of user 42 + - order_id: order_id_1579 + description: order of user 42 + - order_id: order_id_1580 + description: order of user 42 + - order_id: order_id_1581 + description: order of user 42 + - order_id: order_id_1582 + description: order of user 42 + - order_id: order_id_1583 + description: order of user 42 + - order_id: order_id_1584 + description: order of user 42 + - order_id: order_id_1585 + description: order of user 42 + - order_id: order_id_1586 + description: order of user 42 + - order_id: order_id_1587 + description: order of user 42 + - order_id: order_id_1588 + description: order of user 42 + - order_id: order_id_1589 + description: order of user 42 + - order_id: order_id_1590 + description: order of user 42 + - order_id: order_id_1591 + description: order of user 42 + - order_id: order_id_1592 + description: order of user 42 + - order_id: order_id_1593 + description: order of user 42 + - order_id: order_id_1594 + description: order of user 42 + - order_id: order_id_1595 + description: order of user 42 + - order_id: order_id_1596 + description: order of user 42 + - order_id: order_id_1597 + description: order of user 42 + - order_id: order_id_1598 + description: order of user 42 + - order_id: order_id_1599 + description: order of user 42 + - order_id: order_id_1600 + description: order of user 42 + - order_id: order_id_1601 + description: order of user 42 + - order_id: order_id_1602 + description: order of user 42 + - order_id: order_id_1603 + description: order of user 42 +... + +RESULT +--- +user_collection: +- user_id: user_id_54 + last_name: last name 54 + first_name: first name 54 +- user_id: user_id_55 + last_name: last name 55 + first_name: first name 55 +- user_id: user_id_56 + last_name: last name 56 + first_name: first name 56 +- user_id: user_id_57 + last_name: last name 57 + first_name: first name 57 +- user_id: user_id_58 + last_name: last name 58 + first_name: first name 58 +- user_id: user_id_59 + last_name: last name 59 + first_name: first name 59 +- user_id: user_id_6 + last_name: last name 6 + first_name: first name 6 +- user_id: user_id_60 + last_name: last name 60 + first_name: first name 60 +- user_id: user_id_61 + last_name: last name 61 + first_name: first name 61 +- user_id: user_id_62 + last_name: last name 62 + first_name: first name 62 +... + +RESULT +--- +user_collection: +- user_id: user_id_1 + last_name: Ivanov + first_name: Ivan + order_connection: + - order_id: order_id_1 + description: first order of Ivan +... + +RESULT +--- +user_collection: +- user_id: user_id_1 + last_name: Ivanov + first_name: Ivan + order_connection: [] +... + +RESULT +--- +order_collection: +- order_id: order_id_1 + description: first order of Ivan + user_connection: + user_id: user_id_1 + last_name: Ivanov + first_name: Ivan +... + diff --git a/test/local/simple_config.test.lua b/test/local/simple_config.test.lua new file mode 100755 index 0000000..81e901d --- /dev/null +++ b/test/local/simple_config.test.lua @@ -0,0 +1,56 @@ +#!/usr/bin/env tarantool + +local fio = require('fio') + +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") + :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. +package.path + +local graphql = require('graphql') +local testdata = require('test.testdata.common_testdata') + +-- init box, upload test data and acquire metadata +-- ----------------------------------------------- + +-- test with accessor_space + +-- init box and data schema +box.cfg{background = false} +testdata.init_spaces() + +-- upload test data +testdata.fill_test_data() + +-- acquire metadata +local metadata = testdata.get_test_metadata() +local schemas = metadata.schemas +local collections = metadata.collections +local service_fields = metadata.service_fields +local indexes = metadata.indexes + +-- build accessor and graphql schemas +-- ---------------------------------- + + + +local gql_wrapper = graphql.new({ + schemas = schemas, + collections = collections, + service_fields = service_fields, + indexes = indexes, + accessor = 'space' +}) + +-- run queries +-- ----------- + +testdata.run_queries(gql_wrapper) + +-- clean up +-- -------- + +testdata.drop_spaces() + + +os.exit() diff --git a/test/local/zero_config.result b/test/local/zero_config.result new file mode 100644 index 0000000..a16998a --- /dev/null +++ b/test/local/zero_config.result @@ -0,0 +1,42 @@ +RESULT +--- +user_collection: +- user_id: user_id_1 + age: 42 + name: Ivan +... + +RESULT +--- +schemas: + user_collection: + type: record + name: user_collection + fields: + - name: user_id + type: string + - name: name + type: string + - name: age + type: long* +connections: [] +indexes: + user_collection: + user_id_index: + unique: true + primary: true + service_fields: [] + fields: + - user_id + index_type: tree +collections: + user_collection: + schema_name: user_collection + connections: [] + name: user_collection +collection_use_tomap: + user_collection: true +service_fields: + user_collection: [] +... + diff --git a/test/local/zero_config.test.lua b/test/local/zero_config.test.lua new file mode 100755 index 0000000..c69c29c --- /dev/null +++ b/test/local/zero_config.test.lua @@ -0,0 +1,88 @@ +#!/usr/bin/env tarantool + +--local json = require('json') +local yaml = require('yaml') +local utils = require('graphql.utils') +local graphql = require('graphql') + + +local function print_and_return(...) + print(...) + return table.concat({...}, ' ') .. '\n' +end + +local function init_spaces() + local U_USER_ID_FN = 1 + + box.once('test_space_init_spaces', function() + box.schema.create_space('user_collection') + box.space.user_collection:create_index('user_id_index', + {type = 'tree', unique = true, parts = { + U_USER_ID_FN, 'string' + }} + ) + + box.space.user_collection:format( + {{name='user_id', type='string'}, {name='name', type='string'}, + {name='age', type='integer', is_nullable=true}} + ) + end) +end + +local function fill_test_data(shard) + local shard = shard or box.space + + shard.user_collection:replace( + {'user_id_1', 'Ivan', 42}) + shard.user_collection:replace( + {'user_id_2', 'Vasiliy'}) +end + +local function drop_spaces() + box.space._schema:delete('oncetest_space_init_spaces') + box.space.user_collection:drop() +end + + +local function run_queries(gql_wrapper) + local results = '' + + local query_1 = [[ + query user_order($user_id: String) { + user_collection(user_id: $user_id) { + user_id + age + name + } + } + ]] + + utils.show_trace(function() + local variables_1 = {user_id = 'user_id_1'} + local gql_query_1 = gql_wrapper:compile(query_1) + local result = gql_query_1:execute(variables_1) + results = results .. print_and_return( + ('RESULT\n%s'):format(yaml.encode(result))) + end) + + utils.show_trace(function() + local cfg = gql_wrapper.internal.cfg + cfg.accessor = nil + local result = cfg + results = results .. print_and_return( + ('RESULT\n%s'):format(yaml.encode(result))) + end) + + return results +end + +utils.show_trace(function() + box.cfg { background = false } + init_spaces() + fill_test_data() + local gql_wrapper = graphql.new() + run_queries(gql_wrapper) + drop_spaces() +end) + +os.exit() diff --git a/test/shard_no_redundancy/shard_common_simple_config.result b/test/shard_no_redundancy/shard_common_simple_config.result new file mode 100644 index 0000000..af30c97 --- /dev/null +++ b/test/shard_no_redundancy/shard_common_simple_config.result @@ -0,0 +1,366 @@ +env = require('test_run') +--- +... +test_run = env.new() +--- +... +shard = require('shard') +--- +... +test_run:cmd("setopt delimiter ';'") +--- +- true +... +SERVERS = {'shard1', 'shard2'}; +--- +... +init_shard(SERVERS, { + servers = { + { uri = instance_uri('1'), zone = '0' }, + { uri = instance_uri('2'), zone = '1' }, + }, + login = 'guest', + password = '', + redundancy = 1, +}, 'shard_no_redundancy'); +--- +... +test_run:cmd("setopt delimiter ''"); +--- +- true +... +fio = require('fio') +--- +... +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)"):gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path +--- +... +graphql = require('graphql') +--- +... +testdata = require('test.testdata.common_testdata') +--- +... +-- init box, upload test data and acquire metadata +-- ----------------------------------------------- +-- init box and data schema +test_run:cmd('switch shard1') +--- +- true +... +require('test.testdata.common_testdata').init_spaces() +--- +... +test_run:cmd('switch shard2') +--- +- true +... +require('test.testdata.common_testdata').init_spaces() +--- +... +test_run:cmd('switch default') +--- +- true +... +shard.reload_schema() +--- +... +-- upload test data +testdata.fill_test_data(shard) +--- +... +-- acquire metadata +metadata = testdata.get_test_metadata() +--- +... +schemas = metadata.schemas +--- +... +collections = metadata.collections +--- +... +service_fields = metadata.service_fields +--- +... +indexes = metadata.indexes +--- +... +-- build accessor and graphql schemas +-- ---------------------------------- +test_run:cmd("setopt delimiter ';'") +--- +- true +... +gql_wrapper = graphql.new({ + schemas = schemas, + collections = collections, + service_fields = service_fields, + indexes = indexes, + accessor = 'shard' +}); +--- +... +test_run:cmd("setopt delimiter ''"); +--- +- true +... +testdata.run_queries(gql_wrapper) +--- +- |+ + RESULT + --- + order_collection: + - order_id: order_id_1 + description: first order of Ivan + user_connection: + user_id: user_id_1 + last_name: Ivanov + first_name: Ivan + ... + + RESULT + --- + user_collection: + - user_id: user_id_1 + last_name: Ivanov + first_name: Ivan + order_connection: + - order_id: order_id_1 + description: first order of Ivan + - order_id: order_id_2 + description: second order of Ivan + ... + + RESULT + --- + user_collection: + - user_id: user_id_42 + last_name: last name 42 + first_name: first name 42 + order_connection: + - order_id: order_id_1574 + description: order of user 42 + - order_id: order_id_1575 + description: order of user 42 + - order_id: order_id_1576 + description: order of user 42 + - order_id: order_id_1577 + description: order of user 42 + - order_id: order_id_1578 + description: order of user 42 + - order_id: order_id_1579 + description: order of user 42 + - order_id: order_id_1580 + description: order of user 42 + - order_id: order_id_1581 + description: order of user 42 + - order_id: order_id_1582 + description: order of user 42 + - order_id: order_id_1583 + description: order of user 42 + ... + + RESULT + --- + user_collection: + - user_id: user_id_42 + last_name: last name 42 + first_name: first name 42 + order_connection: + - order_id: order_id_1602 + description: order of user 42 + - order_id: order_id_1603 + description: order of user 42 + ... + + RESULT + --- + user_collection: + - user_id: user_id_42 + last_name: last name 42 + first_name: first name 42 + order_connection: + - order_id: order_id_1603 + description: order of user 42 + ... + + RESULT + --- + user_collection: + - user_id: user_id_42 + last_name: last name 42 + first_name: first name 42 + order_connection: + - order_id: order_id_1564 + description: order of user 42 + - order_id: order_id_1565 + description: order of user 42 + - order_id: order_id_1566 + description: order of user 42 + - order_id: order_id_1567 + description: order of user 42 + - order_id: order_id_1568 + description: order of user 42 + - order_id: order_id_1569 + description: order of user 42 + - order_id: order_id_1570 + description: order of user 42 + - order_id: order_id_1571 + description: order of user 42 + - order_id: order_id_1572 + description: order of user 42 + - order_id: order_id_1573 + description: order of user 42 + - order_id: order_id_1574 + description: order of user 42 + - order_id: order_id_1575 + description: order of user 42 + - order_id: order_id_1576 + description: order of user 42 + - order_id: order_id_1577 + description: order of user 42 + - order_id: order_id_1578 + description: order of user 42 + - order_id: order_id_1579 + description: order of user 42 + - order_id: order_id_1580 + description: order of user 42 + - order_id: order_id_1581 + description: order of user 42 + - order_id: order_id_1582 + description: order of user 42 + - order_id: order_id_1583 + description: order of user 42 + - order_id: order_id_1584 + description: order of user 42 + - order_id: order_id_1585 + description: order of user 42 + - order_id: order_id_1586 + description: order of user 42 + - order_id: order_id_1587 + description: order of user 42 + - order_id: order_id_1588 + description: order of user 42 + - order_id: order_id_1589 + description: order of user 42 + - order_id: order_id_1590 + description: order of user 42 + - order_id: order_id_1591 + description: order of user 42 + - order_id: order_id_1592 + description: order of user 42 + - order_id: order_id_1593 + description: order of user 42 + - order_id: order_id_1594 + description: order of user 42 + - order_id: order_id_1595 + description: order of user 42 + - order_id: order_id_1596 + description: order of user 42 + - order_id: order_id_1597 + description: order of user 42 + - order_id: order_id_1598 + description: order of user 42 + - order_id: order_id_1599 + description: order of user 42 + - order_id: order_id_1600 + description: order of user 42 + - order_id: order_id_1601 + description: order of user 42 + - order_id: order_id_1602 + description: order of user 42 + - order_id: order_id_1603 + description: order of user 42 + ... + + RESULT + --- + user_collection: + - user_id: user_id_54 + last_name: last name 54 + first_name: first name 54 + - user_id: user_id_55 + last_name: last name 55 + first_name: first name 55 + - user_id: user_id_56 + last_name: last name 56 + first_name: first name 56 + - user_id: user_id_57 + last_name: last name 57 + first_name: first name 57 + - user_id: user_id_58 + last_name: last name 58 + first_name: first name 58 + - user_id: user_id_59 + last_name: last name 59 + first_name: first name 59 + - user_id: user_id_6 + last_name: last name 6 + first_name: first name 6 + - user_id: user_id_60 + last_name: last name 60 + first_name: first name 60 + - user_id: user_id_61 + last_name: last name 61 + first_name: first name 61 + - user_id: user_id_62 + last_name: last name 62 + first_name: first name 62 + ... + + RESULT + --- + user_collection: + - user_id: user_id_1 + last_name: Ivanov + first_name: Ivan + order_connection: + - order_id: order_id_1 + description: first order of Ivan + ... + + RESULT + --- + user_collection: + - user_id: user_id_1 + last_name: Ivanov + first_name: Ivan + order_connection: [] + ... + + RESULT + --- + order_collection: + - order_id: order_id_1 + description: first order of Ivan + user_connection: + user_id: user_id_1 + last_name: Ivanov + first_name: Ivan + ... + +... +-- clean up +-- -------- +test_run:cmd('switch shard1') +--- +- true +... +require('test.testdata.common_testdata').drop_spaces() +--- +... +test_run:cmd('switch shard2') +--- +- true +... +require('test.testdata.common_testdata').drop_spaces() +--- +... +test_run:cmd('switch default') +--- +- true +... +test_run:drop_cluster(SERVERS) +--- +... diff --git a/test/shard_no_redundancy/shard_common_simple_config.test.lua b/test/shard_no_redundancy/shard_common_simple_config.test.lua new file mode 100755 index 0000000..4782620 --- /dev/null +++ b/test/shard_no_redundancy/shard_common_simple_config.test.lua @@ -0,0 +1,74 @@ +env = require('test_run') +test_run = env.new() + +shard = require('shard') + +test_run:cmd("setopt delimiter ';'") +SERVERS = {'shard1', 'shard2'}; +init_shard(SERVERS, { + servers = { + { uri = instance_uri('1'), zone = '0' }, + { uri = instance_uri('2'), zone = '1' }, + }, + login = 'guest', + password = '', + redundancy = 1, +}, 'shard_no_redundancy'); +test_run:cmd("setopt delimiter ''"); + +fio = require('fio') + +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)"):gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path + +graphql = require('graphql') +testdata = require('test.testdata.common_testdata') + +-- init box, upload test data and acquire metadata +-- ----------------------------------------------- + +-- init box and data schema +test_run:cmd('switch shard1') +require('test.testdata.common_testdata').init_spaces() +test_run:cmd('switch shard2') +require('test.testdata.common_testdata').init_spaces() +test_run:cmd('switch default') +shard.reload_schema() + +-- upload test data +testdata.fill_test_data(shard) + +-- acquire metadata +metadata = testdata.get_test_metadata() +schemas = metadata.schemas +collections = metadata.collections +service_fields = metadata.service_fields +indexes = metadata.indexes + +-- build accessor and graphql schemas +-- ---------------------------------- + +test_run:cmd("setopt delimiter ';'") + +gql_wrapper = graphql.new({ + schemas = schemas, + collections = collections, + service_fields = service_fields, + indexes = indexes, + accessor = 'shard' +}); + +test_run:cmd("setopt delimiter ''"); + +testdata.run_queries(gql_wrapper) + +-- clean up +-- -------- + +test_run:cmd('switch shard1') +require('test.testdata.common_testdata').drop_spaces() +test_run:cmd('switch shard2') +require('test.testdata.common_testdata').drop_spaces() +test_run:cmd('switch default') + +test_run:drop_cluster(SERVERS)