diff --git a/.gitmodules b/.gitmodules index cd300b8328..3955ade5ce 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,9 +1,3 @@ -[submodule "modules/cartridge"] - path = modules/cartridge - url = https://github.com/tarantool/cartridge.git -[submodule "modules/cartridge-cli"] - path = modules/cartridge-cli - url = https://github.com/tarantool/cartridge-cli.git [submodule "modules/metrics"] path = modules/metrics url = https://github.com/tarantool/metrics.git diff --git a/build_submodules.sh b/build_submodules.sh index a083ebfc47..e19e7258a0 100755 --- a/build_submodules.sh +++ b/build_submodules.sh @@ -12,50 +12,6 @@ po_dest="${project_root}/locale/ru/LC_MESSAGES" cp README.rst doc/contributing/docs/_includes/README.rst -# Cartridge -cartridge_root="${project_root}/modules/cartridge" - -# Build Cartridge to extract docs -cd "${cartridge_root}" || exit -CMAKE_DUMMY_WEBUI=true tarantoolctl rocks make - -# Copy Cartridge docs, including diagrams and images -cartridge_rst_src="${cartridge_root}/build.luarocks/build.rst" -cartridge_rst_dest="${project_root}/doc/book/cartridge" -cd "${cartridge_rst_src}" || exit -mkdir -p "${cartridge_rst_dest}" -find . -iregex '.*\.\(rst\|png\|puml\|svg\)$' -exec cp -r --parents {} "${cartridge_rst_dest}" \; - -# Copy translation templates -cartridge_pot_src="${cartridge_root}/build.luarocks/build.rst/locale" -cartridge_pot_dest="${project_root}/locale/book/cartridge" -cd "${cartridge_pot_src}" || exit -mkdir -p "${cartridge_pot_dest}" -find . -name '*.pot' -exec cp -rv --parents {} "${cartridge_pot_dest}" \; - -# Copy translations -cartridge_po_src="${cartridge_root}/build.luarocks/build.rst/locale/ru/LC_MESSAGES" -cartridge_po_dest="${po_dest}/book/cartridge" -cd "${cartridge_po_src}" || exit -mkdir -p "${cartridge_po_dest}" -find . -name '*.po' -exec cp -rv --parents {} "${cartridge_po_dest}" \; - - -# Cartridge CLI -cartridge_cli_root="${project_root}/modules/cartridge-cli/doc" -cartridge_cli_dest="${cartridge_rst_dest}/cartridge_cli" -cartridge_cli_po_dest="${po_dest}/book/cartridge/cartridge_cli" - -# Copy Cartridge CLI docs, including diagrams and images -mkdir -p "${cartridge_cli_dest}" -cd ${cartridge_cli_root} || exit -find . -iregex '.*\.\(rst\|png\|puml\|svg\)$' -exec cp -rv --parents {} "${cartridge_cli_dest}" \; - -# Copy translations -mkdir -p "${cartridge_cli_po_dest}" -cd "${cartridge_cli_root}/locale/ru/LC_MESSAGES/doc/" || exit -find . -name '*.po' -exec cp -rv --parents {} "${cartridge_cli_po_dest}" \; - # Monitoring monitoring_root="${project_root}/modules/metrics/doc/monitoring" monitoring_dest="${project_root}/doc/book" diff --git a/conf.py b/conf.py index e9e9c6fd14..eb3c8c1a92 100644 --- a/conf.py +++ b/conf.py @@ -61,7 +61,7 @@ project = u'Tarantool' # |release| The full version, including alpha/beta/rc tags. -release = "2.11.1" +release = "3.0.0" # |version| The short X.Y version. version = '.'.join(release.split('.')[0:2]) @@ -73,10 +73,6 @@ 'how-to/using_docker.rst', 'reference/configuration/cfg_*', 'images', - 'book/cartridge/cartridge_overview.rst', - 'book/cartridge/CONTRIBUTING.rst', - 'book/cartridge/topics', - 'book/cartridge/cartridge_api/modules/cartridge.test-helpers.rst', 'reference/reference_rock/luatest/README.rst', 'reference/reference_rock/luatest/modules/luatest.rst', '**/_includes/*' diff --git a/doc/book/admin/start_stop_instance.rst b/doc/book/admin/start_stop_instance.rst index 759e16f784..c85416a9ef 100644 --- a/doc/book/admin/start_stop_instance.rst +++ b/doc/book/admin/start_stop_instance.rst @@ -124,6 +124,8 @@ With a single ``tt`` call, you can: * connect to a specific instance of an application * stop a specific instance of an application or all its instances +.. _admin-start_stop_instance-multi-instance-layout: + Application layout ~~~~~~~~~~~~~~~~~~ diff --git a/doc/book/admin/upgrades/upgrade_cluster.rst b/doc/book/admin/upgrades/upgrade_cluster.rst index 7444cfd63a..3185e60a6d 100644 --- a/doc/book/admin/upgrades/upgrade_cluster.rst +++ b/doc/book/admin/upgrades/upgrade_cluster.rst @@ -98,11 +98,11 @@ Upgrading storages Before upgrading **storage** instances: -* Disable :doc:`Cartridge failover `: run +* Disable Cartridge failover: run .. code-block:: bash - cartridge failover disable + tt cartridge failover disable or use the Cartridge web interface (**Cluster** tab, **Failover: ** button). @@ -112,8 +112,7 @@ Before upgrading **storage** instances: tarantool> vshard.storage.rebalancer_disable() -* Make sure that the Cartridge ``upgrade_schema`` :doc:`option ` - is ``false``. +* Make sure that the Cartridge ``upgrade_schema`` option is ``false``. .. include:: ../_includes/upgrade_storages.rst @@ -130,11 +129,11 @@ Before upgrading **storage** instances: Once you complete the steps, enable failover or rebalancer back: -* Enable :doc:`Cartridge failover `: run +* Enable Cartridge failover: run .. code-block:: bash - cartridge failover set [mode] + tt cartridge failover set [mode] or use the Cartridge web interface (**Cluster** tab, **Failover: Disabled** button). diff --git a/doc/book/cartridge/cartridge_overview.rst b/doc/book/cartridge/cartridge_overview.rst deleted file mode 100644 index 1f7e75ca3c..0000000000 --- a/doc/book/cartridge/cartridge_overview.rst +++ /dev/null @@ -1,48 +0,0 @@ -.. _cartridge-overview: - -================================================================================ -About Tarantool Cartridge -================================================================================ - -Tarantool Cartridge is the recommended alternative to the -:ref:`old-school practices ` of application development -for Tarantool. - -.. _cluster-app: - -As a software development kit (SDK), Tarantool Cartridge provides you with -utilities and :ref:`templates ` to help: - -* easily set up a development environment for your applications; -* plug the necessary Lua modules. - -The resulting package can be installed and started on one or multiple servers -as one or multiple instantiated services -- independent or organized into a -**cluster**. - -.. NOTE:: - - A Tarantool cluster is a collection of Tarantool instances acting in concert. - While a single Tarantool instance can leverage the performance of a single server - and is vulnerable to failure, the cluster spans multiple servers, utilizes their - cumulative CPU power, and is fault-tolerant. - - To fully utilize the capabilities of a Tarantool cluster, you need to - develop applications keeping in mind they are to run in a cluster environment. - -Further on, Tarantool Cartridge provides your cluster-aware applications with -the following benefits: - -* horizontal scalability and load balancing via built-in automatic sharding; -* asynchronous replication; -* automatic failover; -* centralized cluster control via GUI or API; -* automatic configuration synchronization; -* instance functionality segregation. - -A Tarantool Cartridge cluster can segregate functionality between instances via -built-in and custom (user-defined) :ref:`cluster roles `. -You can toggle instances on and off on the fly during cluster operation. -This allows you to put different types of workloads -(e.g., compute- and transaction-intensive ones) on different physical servers -with dedicated hardware. diff --git a/doc/book/cartridge/images/auth_creds.png b/doc/book/cartridge/images/auth_creds.png deleted file mode 100644 index 04a5c14488..0000000000 Binary files a/doc/book/cartridge/images/auth_creds.png and /dev/null differ diff --git a/doc/book/cartridge/images/bootstrap-vshard.png b/doc/book/cartridge/images/bootstrap-vshard.png deleted file mode 100644 index 71734bc85b..0000000000 Binary files a/doc/book/cartridge/images/bootstrap-vshard.png and /dev/null differ diff --git a/doc/book/cartridge/images/change-weight.png b/doc/book/cartridge/images/change-weight.png deleted file mode 100644 index bdd0aa5dfb..0000000000 Binary files a/doc/book/cartridge/images/change-weight.png and /dev/null differ diff --git a/doc/book/cartridge/images/create-router.png b/doc/book/cartridge/images/create-router.png deleted file mode 100644 index dfb43a331b..0000000000 Binary files a/doc/book/cartridge/images/create-router.png and /dev/null differ diff --git a/doc/book/cartridge/images/create-storage.png b/doc/book/cartridge/images/create-storage.png deleted file mode 100644 index d0a6189e48..0000000000 Binary files a/doc/book/cartridge/images/create-storage.png and /dev/null differ diff --git a/doc/book/cartridge/images/edit-replica-set.png b/doc/book/cartridge/images/edit-replica-set.png deleted file mode 100644 index d4e2d105d4..0000000000 Binary files a/doc/book/cartridge/images/edit-replica-set.png and /dev/null differ diff --git a/doc/book/cartridge/images/enabled-failover.png b/doc/book/cartridge/images/enabled-failover.png deleted file mode 100644 index 48a3f3e298..0000000000 Binary files a/doc/book/cartridge/images/enabled-failover.png and /dev/null differ diff --git a/doc/book/cartridge/images/expelling-instance.png b/doc/book/cartridge/images/expelling-instance.png deleted file mode 100644 index 41d9d9a361..0000000000 Binary files a/doc/book/cartridge/images/expelling-instance.png and /dev/null differ diff --git a/doc/book/cartridge/images/failover-control.png b/doc/book/cartridge/images/failover-control.png deleted file mode 100644 index 6eac960da8..0000000000 Binary files a/doc/book/cartridge/images/failover-control.png and /dev/null differ diff --git a/doc/book/cartridge/images/failover-priority.png b/doc/book/cartridge/images/failover-priority.png deleted file mode 100644 index fa9ece3cf8..0000000000 Binary files a/doc/book/cartridge/images/failover-priority.png and /dev/null differ diff --git a/doc/book/cartridge/images/failover.png b/doc/book/cartridge/images/failover.png deleted file mode 100644 index c51839d432..0000000000 Binary files a/doc/book/cartridge/images/failover.png and /dev/null differ diff --git a/doc/book/cartridge/images/final-cluster.png b/doc/book/cartridge/images/final-cluster.png deleted file mode 100644 index 378cab79b4..0000000000 Binary files a/doc/book/cartridge/images/final-cluster.png and /dev/null differ diff --git a/doc/book/cartridge/images/join-new-set.png b/doc/book/cartridge/images/join-new-set.png deleted file mode 100644 index 58436cf0b0..0000000000 Binary files a/doc/book/cartridge/images/join-new-set.png and /dev/null differ diff --git a/doc/book/cartridge/images/join-router.png b/doc/book/cartridge/images/join-router.png deleted file mode 100644 index 651f07e791..0000000000 Binary files a/doc/book/cartridge/images/join-router.png and /dev/null differ diff --git a/doc/book/cartridge/images/join-storage.png b/doc/book/cartridge/images/join-storage.png deleted file mode 100644 index aa5fa0b18e..0000000000 Binary files a/doc/book/cartridge/images/join-storage.png and /dev/null differ diff --git a/doc/book/cartridge/images/new-unconfig.png b/doc/book/cartridge/images/new-unconfig.png deleted file mode 100644 index f148aaede9..0000000000 Binary files a/doc/book/cartridge/images/new-unconfig.png and /dev/null differ diff --git a/doc/book/cartridge/images/probe-server.png b/doc/book/cartridge/images/probe-server.png deleted file mode 100644 index 872287f95b..0000000000 Binary files a/doc/book/cartridge/images/probe-server.png and /dev/null differ diff --git a/doc/book/cartridge/images/switch-master.png b/doc/book/cartridge/images/switch-master.png deleted file mode 100644 index 59168413c1..0000000000 Binary files a/doc/book/cartridge/images/switch-master.png and /dev/null differ diff --git a/doc/book/cartridge/images/unconfigured-router.png b/doc/book/cartridge/images/unconfigured-router.png deleted file mode 100644 index fdec390d11..0000000000 Binary files a/doc/book/cartridge/images/unconfigured-router.png and /dev/null differ diff --git a/doc/book/cartridge/images/users-tab.png b/doc/book/cartridge/images/users-tab.png deleted file mode 100644 index 302601c6e0..0000000000 Binary files a/doc/book/cartridge/images/users-tab.png and /dev/null differ diff --git a/doc/book/cartridge/images/zero-weight.png b/doc/book/cartridge/images/zero-weight.png deleted file mode 100644 index 926318cab0..0000000000 Binary files a/doc/book/cartridge/images/zero-weight.png and /dev/null differ diff --git a/doc/book/cartridge/images/zero.png b/doc/book/cartridge/images/zero.png deleted file mode 100644 index 50654147e1..0000000000 Binary files a/doc/book/cartridge/images/zero.png and /dev/null differ diff --git a/doc/book/connectors/java.rst b/doc/book/connectors/java.rst index 6cb53ee2f4..d13784fb31 100644 --- a/doc/book/connectors/java.rst +++ b/doc/book/connectors/java.rst @@ -8,7 +8,7 @@ There are two Java connectors available: * `cartridge-java `__ supports both single Tarantool nodes and clusters, as well as applications built using the - :doc:`Cartridge framework ` and its modules. + `Cartridge framework `__ and its modules. The Tarantool team actively updates this module with the newest Tarantool features. * `tarantool-java `__ works with early Tarantool versions (1.6 and later) diff --git a/doc/book/index.rst b/doc/book/index.rst index 0ad7da7169..aed53f9158 100644 --- a/doc/book/index.rst +++ b/doc/book/index.rst @@ -15,7 +15,6 @@ User's Guide ../how-to/index ../concepts/index box/index - cartridge/index admin/index monitoring/index connectors diff --git a/doc/code_snippets/test/config/config_test.lua b/doc/code_snippets/test/config/config_test.lua new file mode 100644 index 0000000000..448897cf77 --- /dev/null +++ b/doc/code_snippets/test/config/config_test.lua @@ -0,0 +1,72 @@ +local fio = require('fio') +local server = require('luatest.server') +local t = require('luatest') +local treegen = require('test.treegen') +local justrun = require('test.justrun') +local g = t.group() + +g.before_all(function() + treegen.init(g) +end) + +g.after_all(function() + treegen.clean(g) +end) + +g.before_each(function(cg) + cg.server = server:new { + box_cfg = {}, + workdir = fio.cwd() .. '/tmp' + } + cg.server:start() + cg.server:exec(function() + box.schema.space.create('bands') + box.space.bands:format({ + { name = 'id', type = 'unsigned' }, + { name = 'band_name', type = 'string' }, + { name = 'year', type = 'unsigned' } + }) + box.space.bands:create_index('primary', { parts = { 'id' } }) + end) +end) + +g.after_each(function(cg) + cg.server:stop() + cg.server:drop() +end) + +g.test_config_option = function() + local dir = treegen.prepare_directory(g, {}, {}) + local file_config = [[ + log: + level: 7 + + memtx: + min_tuple_size: 32 + memory: 100000000 + + groups: + group-001: + replicasets: + replicaset-001: + instances: + instance-001: {} + ]] + treegen.write_script(dir, 'config.yaml', file_config) + + local script = [[ + print(box.cfg.memtx_min_tuple_size) + print(box.cfg.memtx_memory) + print(box.cfg.log_level) + os.exit(0) + ]] + treegen.write_script(dir, 'main.lua', script) + + local env = {TT_LOG_LEVEL = 0} + local opts = {nojson = true, stderr = false} + local args = {'--name', 'instance-001', '--config', 'config.yaml', + 'main.lua'} + local res = justrun.tarantool(dir, env, args, opts) + t.assert_equals(res.exit_code, 0) + t.assert_equals(res.stdout, table.concat({32, 100000000, 0}, "\n")) +end \ No newline at end of file diff --git a/doc/code_snippets/test/config/etcd.yaml b/doc/code_snippets/test/config/etcd.yaml new file mode 100644 index 0000000000..f956fea2bf --- /dev/null +++ b/doc/code_snippets/test/config/etcd.yaml @@ -0,0 +1,5 @@ +config: + etcd: + prefix: /example + endpoints: + - http://localhost:2379 \ No newline at end of file diff --git a/doc/code_snippets/test/config/instances.yml b/doc/code_snippets/test/config/instances.yml new file mode 100644 index 0000000000..812d811a9a --- /dev/null +++ b/doc/code_snippets/test/config/instances.yml @@ -0,0 +1,3 @@ +app.instance-001: +app.instance-002: +app.instance-003: \ No newline at end of file diff --git a/doc/code_snippets/test/config/iproto_global_scope.yaml b/doc/code_snippets/test/config/iproto_global_scope.yaml new file mode 100644 index 0000000000..0fffce8af3 --- /dev/null +++ b/doc/code_snippets/test/config/iproto_global_scope.yaml @@ -0,0 +1,9 @@ +iproto: + listen: "3301" + +groups: + group-001: + replicasets: + replicaset-001: + instances: + instance-001: {} \ No newline at end of file diff --git a/doc/code_snippets/test/config/iproto_group_scope.yaml b/doc/code_snippets/test/config/iproto_group_scope.yaml new file mode 100644 index 0000000000..b9ff151206 --- /dev/null +++ b/doc/code_snippets/test/config/iproto_group_scope.yaml @@ -0,0 +1,8 @@ +groups: + group-001: + iproto: + listen: "3301" + replicasets: + replicaset-001: + instances: + instance-001: {} \ No newline at end of file diff --git a/doc/code_snippets/test/config/iproto_instance_scope.yaml b/doc/code_snippets/test/config/iproto_instance_scope.yaml new file mode 100644 index 0000000000..7e4ab44f4c --- /dev/null +++ b/doc/code_snippets/test/config/iproto_instance_scope.yaml @@ -0,0 +1,8 @@ +groups: + group-001: + replicasets: + replicaset-001: + instances: + instance-001: + iproto: + listen: "3301" \ No newline at end of file diff --git a/doc/code_snippets/test/config/iproto_replicaset_scope.yaml b/doc/code_snippets/test/config/iproto_replicaset_scope.yaml new file mode 100644 index 0000000000..09d43e8d4a --- /dev/null +++ b/doc/code_snippets/test/config/iproto_replicaset_scope.yaml @@ -0,0 +1,8 @@ +groups: + group-001: + replicasets: + replicaset-001: + iproto: + listen: "3301" + instances: + instance-001: {} \ No newline at end of file diff --git a/doc/code_snippets/test/config/myapp.lua b/doc/code_snippets/test/config/myapp.lua new file mode 100644 index 0000000000..1f79e0b7a2 --- /dev/null +++ b/doc/code_snippets/test/config/myapp.lua @@ -0,0 +1,3 @@ +local log = require('log').new("myapp") +local config = require('config') +log.info("%s from app, %s!", config:get('app.cfg.greeting'), box.info.name) diff --git a/doc/code_snippets/test/config/replicaset_automatic.yaml b/doc/code_snippets/test/config/replicaset_automatic.yaml new file mode 100644 index 0000000000..dbd3e5a534 --- /dev/null +++ b/doc/code_snippets/test/config/replicaset_automatic.yaml @@ -0,0 +1,30 @@ +credentials: + users: + replicator: + password: 'topsecret' + roles: [replication] + client: + password: 'secret' + roles: [super] + +iproto: + advertise: + peer: replicator@ + +replication: + failover: election + +groups: + group-001: + replicasets: + replicaset-001: + instances: + instance-001: + iproto: + listen: 127.0.0.1:3301 + instance-002: + iproto: + listen: 127.0.0.1:3302 + instance-003: + iproto: + listen: 127.0.0.1:3303 \ No newline at end of file diff --git a/doc/code_snippets/test/config/replicaset_manual.yaml b/doc/code_snippets/test/config/replicaset_manual.yaml new file mode 100644 index 0000000000..ed11e756a5 --- /dev/null +++ b/doc/code_snippets/test/config/replicaset_manual.yaml @@ -0,0 +1,31 @@ +credentials: + users: + replicator: + password: 'topsecret' + roles: [replication] + client: + password: 'secret' + roles: [super] + +iproto: + advertise: + peer: replicator@ + +replication: + failover: manual + +groups: + group-001: + replicasets: + replicaset-001: + leader: instance-001 + instances: + instance-001: + iproto: + listen: 127.0.0.1:3301 + instance-002: + iproto: + listen: 127.0.0.1:3302 + instance-003: + iproto: + listen: 127.0.0.1:3303 \ No newline at end of file diff --git a/doc/code_snippets/test/config/single_instance_app.yaml b/doc/code_snippets/test/config/single_instance_app.yaml new file mode 100644 index 0000000000..6f47f5add1 --- /dev/null +++ b/doc/code_snippets/test/config/single_instance_app.yaml @@ -0,0 +1,13 @@ +app: + file: 'myapp.lua' + cfg: + greeting: 'Hello' + +groups: + group-001: + replicasets: + replicaset-001: + instances: + instance-001: + iproto: + listen: "3301" \ No newline at end of file diff --git a/doc/code_snippets/test/config/single_instance_templating.yaml b/doc/code_snippets/test/config/single_instance_templating.yaml new file mode 100644 index 0000000000..a08225f236 --- /dev/null +++ b/doc/code_snippets/test/config/single_instance_templating.yaml @@ -0,0 +1,10 @@ +groups: + group-001: + replicasets: + replicaset-001: + instances: + instance-001: + snapshot: + dir: ./var/{{ instance_name }}/snapshots + wal: + dir: ./var/{{ instance_name }}/wals \ No newline at end of file diff --git a/doc/code_snippets/test/justrun.lua b/doc/code_snippets/test/justrun.lua new file mode 100644 index 0000000000..596adf88e9 --- /dev/null +++ b/doc/code_snippets/test/justrun.lua @@ -0,0 +1,144 @@ +local fun = require('fun') +local log = require('log') +local json = require('json') +local fiber = require('fiber') +local popen = require('popen') + +local justrun = {} + +local function collect_stderr(ph) + local f = fiber.new(function() + local fiber_name = "child's stderr collector" + fiber.name(fiber_name, {truncate = true}) + + local chunks = {} + + while true do + local chunk, err = ph:read({stderr = true}) + if chunk == nil then + log.warn(('%s: got error, exiting: %s'):format( + fiber_name, tostring(err))) + break + end + if chunk == '' then + log.info(('%s: got EOF, exiting'):format(fiber_name)) + break + end + table.insert(chunks, chunk) + end + + -- Glue all chunks, strip trailing newline. + return table.concat(chunks):rstrip() + end) + f:set_joinable(true) + return f +end + +local function cancel_stderr_fiber(stderr_fiber) + if stderr_fiber == nil then + return + end + stderr_fiber:cancel() +end + +local function join_stderr_fiber(stderr_fiber) + if stderr_fiber == nil then + return + end + return select(2, assert(stderr_fiber:join())) +end + +-- Run tarantool in given directory with given environment and +-- command line arguments and catch its output. +-- +-- Expects JSON lines as the output and parses it into an array +-- (it can be disabled using `nojson` option). +-- +-- Options: +-- +-- - nojson (boolean, default: false) +-- +-- Don't attempt to decode stdout as a stream of JSON lines, +-- return as is. +-- +-- - stderr (boolean, default: false) +-- +-- Collect stderr and place it into the `stderr` field of the +-- return value +function justrun.tarantool(dir, env, args, opts) + assert(type(dir) == 'string') + assert(type(env) == 'table') + assert(type(args) == 'table') + local opts = opts or {} + assert(type(opts) == 'table') + + -- Prevent system/user inputrc configuration file from + -- influencing testing code. + env['INPUTRC'] = '/dev/null' + + local tarantool_exe = arg[-1] + -- Use popen.shell() instead of popen.new() due to lack of + -- cwd option in popen (gh-5633). + local env_str = table.concat(fun.iter(env):map(function(k, v) + return ('%s=%q'):format(k, v) + end):totable(), ' ') + local command = ('cd %s && %s %s %s'):format(dir, env_str, tarantool_exe, + table.concat(args, ' ')) + log.info(('Running a command: %s'):format(command)) + local mode = opts.stderr and 'rR' or 'r' + local ph = popen.shell(command, mode) + + local stderr_fiber + if opts.stderr then + stderr_fiber = collect_stderr(ph) + end + + -- Read everything until EOF. + local chunks = {} + while true do + local chunk, err = ph:read() + if chunk == nil then + cancel_stderr_fiber(stderr_fiber) + ph:close() + error(err) + end + if chunk == '' then -- EOF + break + end + table.insert(chunks, chunk) + end + + local exit_code = ph:wait().exit_code + local stderr = join_stderr_fiber(stderr_fiber) + ph:close() + + -- If an error occurs, discard the output and return only the + -- exit code. However, return stderr. + if exit_code ~= 0 then + return { + exit_code = exit_code, + stderr = stderr, + } + end + + -- Glue all chunks, strip trailing newline. + local res = table.concat(chunks):rstrip() + log.info(('Command output:\n%s'):format(res)) + + -- Decode JSON object per line into array of tables (if + -- `nojson` option is not passed). + local decoded + if opts.nojson then + decoded = res + else + decoded = fun.iter(res:split('\n')):map(json.decode):totable() + end + + return { + exit_code = exit_code, + stdout = decoded, + stderr = stderr, + } +end + +return justrun diff --git a/doc/code_snippets/test/treegen.lua b/doc/code_snippets/test/treegen.lua new file mode 100644 index 0000000000..987ff20df4 --- /dev/null +++ b/doc/code_snippets/test/treegen.lua @@ -0,0 +1,170 @@ +-- Working tree generator. +-- +-- Generates a tree of Lua files using provided templates and +-- filenames. +-- +-- Basic usage: +-- +-- | local t = require('luatest') +-- | local treegen = require('test.treegen') +-- | +-- | local g = t.group() +-- | +-- | local SCRIPT_TEMPLATE = [[ +-- | <...> +-- | ]] +-- | +-- | g.before_all(function(g) +-- | treegen.init(g) +-- | treegen.add_template(g, '^.*$', SCRIPT_TEMPLATE) +-- | end) +-- | +-- | g.after_all(function(g) +-- | treegen.clean(g) +-- | end) +-- | +-- | g.foobar_test = function(g) +-- | local dir = treegen.prepare_directory(g, +-- | {'foo/bar.lua', 'main.lua'}) +-- | <..test case..> +-- | end + +local fio = require('fio') +local log = require('log') +local fun = require('fun') + +local treegen = {} + +local function find_template(g, script) + for _, template_def in ipairs(g.templates) do + if script:match(template_def.pattern) then + return template_def.template + end + end + error(("treegen: can't find a template for script %q"):format(script)) +end + +-- Write provided script into the given directory. +function treegen.write_script(dir, script, body) + local script_abspath = fio.pathjoin(dir, script) + local flags = {'O_CREAT', 'O_WRONLY', 'O_TRUNC'} + local mode = tonumber('644', 8) + + local scriptdir_abspath = fio.dirname(script_abspath) + log.info(('Creating a directory: %s'):format(scriptdir_abspath)) + fio.mktree(scriptdir_abspath) + + log.info(('Writing a script: %s'):format(script_abspath)) + local fh = fio.open(script_abspath, flags, mode) + fh:write(body) + fh:close() + return script_abspath +end + +-- Generate a script that follows a template and write it at the +-- given path in the given directory. +local function gen_script(g, dir, script, replacements) + local template = find_template(g, script) + local replacements = fun.chain({script = script}, replacements):tomap() + local body = template:gsub('<(.-)>', replacements) + treegen.write_script(dir, script, body) +end + +function treegen.init(g) + g.tempdirs = {} + g.templates = {} +end + +-- Remove all temporary directories created by the test +-- unless KEEP_DATA environment variable is set to a +-- non-empty value. +function treegen.clean(g) + local dirs = table.copy(g.tempdirs) + g.tempdirs = nil + + local keep_data = (os.getenv('KEEP_DATA') or '') ~= '' + + for _, dir in ipairs(dirs) do + if keep_data then + log.info(('Left intact due to KEEP_DATA env var: %s'):format(dir)) + else + log.info(('Recursively removing: %s'):format(dir)) + fio.rmtree(dir) + end + end + + g.templates = nil +end + +function treegen.add_template(g, pattern, template) + table.insert(g.templates, { + pattern = pattern, + template = template, + }) +end + +-- Create a temporary directory with given scripts. +-- +-- The scripts are generated using templates added by +-- treegen.add_template(). +-- +-- Example for {'foo/bar.lua', 'baz.lua'}: +-- +-- / +-- + tmp/ +-- + rfbWOJ/ +-- + foo/ +-- | + bar.lua +-- + baz.lua +-- +-- The return value is '/tmp/rfbWOJ' for this example. +function treegen.prepare_directory(g, scripts, replacements) + local replacements = replacements or {} + + assert(type(scripts) == 'table') + assert(type(replacements) == 'table') + + local dir = fio.tempdir() + + -- fio.tempdir() follows the TMPDIR environment variable. + -- If it ends with a slash, the return value contains a double + -- slash in the middle: for example, if TMPDIR=/tmp/, the + -- result is like `/tmp//rfbWOJ`. + -- + -- It looks harmless on the first glance, but this directory + -- path may be used later to form an URI for a Unix domain + -- socket. As result the URI looks like + -- `unix/:/tmp//rfbWOJ/instance-001.iproto`. + -- + -- It confuses net_box.connect(): it reports EAI_NONAME error + -- from getaddrinfo(). + -- + -- It seems, the reason is a peculiar of the URI parsing: + -- + -- tarantool> uri.parse('unix/:/foo/bar.iproto') + -- --- + -- - host: unix/ + -- service: /foo/bar.iproto + -- unix: /foo/bar.iproto + -- ... + -- + -- tarantool> uri.parse('unix/:/foo//bar.iproto') + -- --- + -- - host: unix + -- path: /foo//bar.iproto + -- ... + -- + -- Let's normalize the path using fio.abspath(), which + -- eliminates the double slashes. + dir = fio.abspath(dir) + + table.insert(g.tempdirs, dir) + + for _, script in ipairs(scripts) do + gen_script(g, dir, script, replacements) + end + + return dir +end + +return treegen diff --git a/doc/concepts/configuration.rst b/doc/concepts/configuration.rst new file mode 100644 index 0000000000..7c12f4c485 --- /dev/null +++ b/doc/concepts/configuration.rst @@ -0,0 +1,618 @@ +.. _configuration: + +Configuration +============= + +There are two approaches to configuring Tarantool: + +* *Since version 3.0*: In a YAML file. + + In a YAML file, you can provide the full cluster topology and specify all configuration options. + You can also use :ref:`etcd ` to store configuration data in one reliable place. + +* *In version 2.11 and earlier*: :ref:`In code ` using the ``box.cfg`` API. + + In this case, configuration is provided in a Lua initialization script. + Starting with the 3.0 version, configuring Tarantool in code is considered a legacy approach. + + +.. _configuration_overview: + +Configuration overview +---------------------- + +A YAML configuration file describes the full topology of a Tarantool cluster. +A cluster's topology includes the following elements, starting from the lower level: + +- An *instance* is a member of the cluster that stores data or might act as a router for handling CRUD requests in a :ref:`sharded ` cluster. +- A *replica set* is a pack of instances that operate on copies of the same databases. + :ref:`Replication ` provides redundancy and increases data availability. +- A *group* provides the ability to organize replica sets. + For example, in a sharded cluster, one group can contain :ref:`storage ` instances and another group can contain :ref:`routers ` used to handle CRUD requests. + +You can flexibly configure a cluster's settings on different levels: from global settings applied to all groups to parameters specific for concrete instances. + + +.. _configuration_file: + +Configuration in a file +~~~~~~~~~~~~~~~~~~~~~~~ + +This section provides an overview on how to configure Tarantool in a YAML file. + +.. _configuration_instance_basic: + +Basic instance configuration +**************************** + +The example below shows a sample configuration of a single Tarantool instance: + +.. literalinclude:: /code_snippets/test/config/iproto_instance_scope.yaml + :language: yaml + :dedent: + +- The ``instances`` section includes only one instance named *instance-001*. + The ``iproto.listen`` option sets a port used to listen for incoming requests. +- The ``replicasets`` section contains one replica set named *replicaset-001*. +- The ``groups`` section contains one group named *group-001*. + + +.. _configuration_scopes: + +Configuration scopes +******************** + +This section shows how to control a scope the specified configuration option is applied to. +Most of the configuration options can be applied to a specific instance, replica set, group, or to all instances globally. + +- *Instance* + + To apply specific configuration options to a concrete instance, + specify such options for this instance only. + In the example below, ``iproto.listen`` is applied to *instance-001* only. + + .. literalinclude:: /code_snippets/test/config/iproto_instance_scope.yaml + :language: yaml + :emphasize-lines: 6-8 + :dedent: + +- *Replica set* + + In this example, ``iproto.listen`` is in effect for all instances in *replicaset-001*. + + .. literalinclude:: /code_snippets/test/config/iproto_replicaset_scope.yaml + :language: yaml + :emphasize-lines: 4-6 + :dedent: + +- *Group* + + In this example, ``iproto.listen`` is in effect for all instances in *group-001*. + + .. literalinclude:: /code_snippets/test/config/iproto_group_scope.yaml + :language: yaml + :emphasize-lines: 2-4 + :dedent: + +- *Global* + + In this example, ``iproto.listen`` is applied to all instances of all groups. + + .. literalinclude:: /code_snippets/test/config/iproto_global_scope.yaml + :language: yaml + :emphasize-lines: 1-2 + :dedent: + + +.. NOTE:: + + The :ref:`Configuration reference ` contains information to which scopes each configuration option can be applied. + + +.. _configuration_replica_set_scopes: + +Replica set configuration and configuration scopes +************************************************** + +This section shows how to configure a :ref:`replica set ` with a manual failover +and describes how specific configuration options work in different configuration scopes. + +.. literalinclude:: /code_snippets/test/config/replicaset_manual.yaml + :language: yaml + :dedent: + +- ``credentials`` (*global*) + + Options in this section grant the specified privileges to the *replicator* user used for replication and + the *client* user that can perform any action. + These options are applied globally to all instances. + +- ``iproto`` (*global*, *instance*) + + The ``iproto`` section is specified on both global and instance levels. + The ``iproto.advertise.peer`` option specifies a user name and a host used by replicas to connect to each other. + In this example, a host is not specified and taken from ``iproto.listen`` set on the instance level. + +- ``replication``: (*global*) + + The ``replication.failover`` global option sets a manual failover for all replica sets. + +- ``leader``: (*replica set*) + + The ``.leader`` option sets a :ref:`master ` instance for the specified replica set. + +To learn more about configuring replication and sharding, see :ref:`Replication tutorials ` and :ref:`Quick start with sharding ` sections. + + +.. _configuration_application: + +Loading an application +********************** + +Using Tarantool as an application server, you can write your own applications in Lua. +In the ``app`` section, you can load the application and provide a custom application configuration in the ``cfg`` section. + +In the example below, the application is loaded from the ``myapp.lua`` file placed next to the YAML configuration file: + +.. literalinclude:: /code_snippets/test/config/single_instance_app.yaml + :language: yaml + :dedent: + +To get a value of the custom ``greeting`` property in the application code, +use the ``config:get()`` function provided by the ``config`` module. + +.. TODO: Add a link to the 'config' module API reference page: https://github.com/tarantool/doc/issues/3662 + +.. literalinclude:: /code_snippets/test/config/myapp.lua + :language: lua + :dedent: + +As a result of :ref:`running ` the *instance-001*, a log should contain the following line: + +.. code-block:: console + + main/103/interactive/myapp I> Hello from app, instance-001! + +The ``app`` section can be placed in any :ref:`configuration scope `. +As an example use case, you can provide different applications for storages and routers in a sharded cluster: + +.. code-block:: yaml + + groups: + storages: + app: + module: storage + # ... + routers: + app: + module: router + # ... + +Learn more about using Tarantool as the application server from :ref:`Developing applications with Tarantool `. + + + +.. _configuration_templating: + +Templating +********** + +In a configuration file, you can use predefined variables that are replaced with actual values at runtime. +In the example below, ``{{ instance_name }}`` is replaced with *instance-001*. + +.. literalinclude:: /code_snippets/test/config/single_instance_templating.yaml + :language: yaml + :dedent: + +As a result, the paths to snapshots and write-ahead logs differ for different instances. + + + +.. _configuration_environment_variable: + +Environment variables +~~~~~~~~~~~~~~~~~~~~~ + +For each configuration parameter, Tarantool provides two sets of predefined environment variables: + +* Variables whose names start with ``TT_`` are used to substitute parameters specified in a configuration file. + This means that these variables have a higher :ref:`priority ` than the options specified in a configuration file. + +* Variables whose names start with ``TT_`` and end with ``_DEFAULT`` are used to specify default values for parameters missing in a configuration file. + These variables have a lower :ref:`priority ` than the options specified in a configuration file. + +For example, ``TT_IPROTO_LISTEN`` and ``TT_IPROTO_LISTEN_DEFAULT`` correspond to the ``iproto.listen`` option. +``TT_SNAPSHOT_DIR`` and ``TT_SNAPSHOT_DIR_DEFAULT`` correspond to the ``snapshot.dir`` option. +To see all the supported environment variables, execute the ``tarantool`` command with the ``--help-env-list`` :ref:`option `. + +.. code-block:: console + + $ tarantool --help-env-list + +Below are a few examples that show how to set environment variables of different types: + +* In the example below, ``TT_IPROTO_LISTEN`` is used to specify a :ref:`listening host and port ` values: + + .. code-block:: console + + $ export TT_IPROTO_LISTEN='127.0.0.1:3311' + +* To specify several listening addresses, separate them by a comma without space: + + .. code-block:: console + + $ export TT_IPROTO_LISTEN='127.0.0.1:3311,127.0.0.1:3312' + +* To assign map values to environment variables, use JSON objects. + In the example below, ``TT_APP_CFG`` is used to specify the value of a custom configuration property for a :ref:`loaded application : + + .. code-block:: console + + $ export TT_APP_CFG='{"greeting":"Hi"}' + + +.. NOTE:: + + The ``TT_INSTANCE_NAME`` and ``TT_CONFIG`` environment variables can be used to :ref:`run ` the specified Tarantool instance with configuration from the given file. + + + + + + +.. _configuration_centralized: + +Centralized configuration +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. admonition:: Enterprise Edition + :class: fact + + Centralized configuration is supported by the `Enterprise Edition `_ only. + +Tarantool enables you to store configuration data in one reliable place using etcd. +To achieve this, you need to: + +1. Put a YAML file with a cluster's configuration to an etcd server: + + .. code-block:: console + + $ etcdctl put /example/config/all.yaml < remote_config.yaml + +2. Provide a local YAML configuration with an etcd endpoint address and key prefix in the ``config`` section: + + .. literalinclude:: /code_snippets/test/config/etcd.yaml + :language: yaml + :dedent: + +Learn more from the following guide: :ref:`Storing configuration in etcd `. + + +.. _configuration_precedence: + +Configuration precedence +~~~~~~~~~~~~~~~~~~~~~~~~ + +Tarantool configuration options are applied in the following order: + +- `TT_*_DEFAULT` :ref:`environment variables `. +- :ref:`Centralized configuration ` stored in etcd. +- Configuration from a :ref:`local YAML file `. +- `TT_*` :ref:`environment variables `. + +This means that `TT_*` environment variables have a higher priority than other configuration ways. + + + +.. _configuration_options_overview: + +Configuration options overview +------------------------------ + +This section gives an overview of some useful configuration options. +All the available options are documented in the :ref:`Configuration reference `. + +.. _configuration_options_connection: + +Connection settings +~~~~~~~~~~~~~~~~~~~ + +To configure an address used to listen for incoming requests, use the ``iproto.listen`` option. +Below are a few examples on how to do this: + +* Set a listening port to ``3301``: + + .. code-block:: yaml + + iproto: + listen: "3301" + +* Set a listening address to ``127.0.0.1:3301``: + + .. code-block:: yaml + + iproto: + listen: "127.0.0.1:3301" + + +* Configure several listening addresses: + + .. code-block:: yaml + + iproto: + listen: "127.0.0.1:3301,127.0.0.1:3303" + +* Enables :ref:`traffic encryption ` for a connection using corresponding URI parameters: + + .. code-block:: yaml + + iproto: + listen: "127.0.0.1:3301?transport=ssl&ssl_key_file=localhost.key&ssl_cert_file=localhost.crt&ssl_ca_file=ca.crt" + + Note that traffic encryption is supported by the `Enterprise Edition `_ only. + + +* Use a Unix domain socket: + + .. code-block:: yaml + + iproto: + listen: "unix/:./{{ instance_name }}.iproto" + + +.. _configuration_options_access_control: + +Access control +~~~~~~~~~~~~~~ + +The ``credentials`` section allows you to grant the specified privileges to users. +In the example below, there are two users: + +* The *replicator* user is used for replication and has a corresponding role. +* The *client* user has the ``super`` role and can perform any action on Tarantool instances. + +.. literalinclude:: /code_snippets/test/config/replicaset_manual.yaml + :language: yaml + :lines: 1-8 + :dedent: + +To learn more, see the :ref:`Access control ` section. + + +.. _configuration_options_memory: + +Memory +~~~~~~ + +The ``memtx.memory`` option specifies how much memory Tarantool allocates to actually store data. + +.. code-block:: yaml + + memtx: + memory: 100000000 + +When the limit is reached, ``INSERT`` or ``UPDATE`` requests fail with :ref:`ER_MEMORY_ISSUE `. + + +.. _configuration_options_directories: + +Snapshots and write-ahead logs +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The ``snapshot.dir`` and ``wal.dir`` options can be used to configure directories for storing snapshots and write-ahead logs. + +.. code-block:: yaml + + instance-001: + snapshot: + dir: 'var/snapshots' + wal: + dir: 'var/wals' + + + +.. _configuration_run_instance: + +Starting Tarantool instances +---------------------------- + +.. _configuration_run_instance_tt: + +Starting instances using the tt utility +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The :ref:`tt ` utility is the recommended way to start Tarantool instances. + +Instance files or directories are organized into applications in the ``instances_enabled`` directory. +The example below shows how a :ref:`layout ` of the application called ``app`` might look: + +.. code-block:: none + + instances.enabled + └── app + ├── config.yaml + ├── myapp.lua + └── instances.yml + +* ``config.yaml`` is a :ref:`configuration file `. +* ``myapp.lua`` is a Lua script containing an :ref:`application to load `. +* ``instances.yml`` specifies :ref:`instances ` to run in the current environment. + This file might look as follows: + + .. literalinclude:: /code_snippets/test/config/instances.yml + :language: yaml + :dedent: + +To start all instances, use the ``tt start app`` command: + + .. code-block:: console + + $ tt start app + • Starting an instance [app:instance-001]... + • Starting an instance [app:instance-002]... + • Starting an instance [app:instance-003]... + +You can learn more from the :ref:`Starting and stopping instances ` section. + + + +.. _configuration_run_instance_tarantool: + +Starting an instance using the tarantool command +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The ``tarantool`` command provides additional :ref:`options ` that might be helpful for development purposes. +Below is the syntax for starting a Tarantool instance configured in a file: + +.. code-block:: console + + $ tarantool --name INSTANCE_NAME --config CONFIG_FILE_PATH [OPTION ...] + +The command below starts ``instance-001`` configured in the ``config.yaml`` file: + +.. code-block:: console + + $ tarantool --name instance-001 --config config.yaml + + +.. _configuration_command_options: + +Command-line options +******************** + +Options that can be passed when :ref:`running a Tarantool instance `: + +.. option:: -h, --help + + Print an annotated list of all available options and exit. + +.. option:: --help-env-list + + **Since:** :tarantool-release:`3.0.0` + + Show a list of :ref:`environment variables ` that can be used to configure Tarantool. + +.. _index-tarantool_version: + +.. option:: -v, -V, --version + + Print the product name and version. + + **Example** + + .. code-block:: console + + % tarantool --version + Tarantool 3.0.0-entrypoint-746-g36ef3fb43 + Target: Darwin-arm64-Release + ... + + In this example: + + * ``3.0.0`` is a Tarantool version. + Tarantool follows semantic versioning, which is described in the :ref:`Tarantool release policy ` section. + + * ``Target`` is the platform Tarantool is built on. + Platform-specific details may follow this line. + + +.. option:: -c, --config PATH + + **Since:** :tarantool-release:`3.0.0` + + Set a path to a :ref:`YAML configuration file `. + You can also configure this value using the ``TT_CONFIG`` environment variable. + + See also: :ref:`Starting an instance using the tarantool command ` + +.. option:: -n, --name INSTANCE + + **Since:** :tarantool-release:`3.0.0` + + Set the name of an instance to run. + You can also configure this value using the ``TT_INSTANCE_NAME`` environment variable. + + See also: :ref:`Starting an instance using the tarantool command ` + + +.. option:: -i + + Enter an :ref:`interactive mode `. + + **Example** + + .. code-block:: console + + % tarantool -i + + +.. option:: -e EXPR + + Execute the 'EXPR' string. See also: `lua man page `_. + + **Example** + + .. code-block:: console + + % tarantool -e "print('Hello, world!')" + Hello, world! + +.. option:: -l NAME + + Require the 'NAME' library. See also: `lua man page `_. + + **Example** + + .. code-block:: console + + % tarantool -l luatest.coverage script.lua + +.. option:: -j cmd + + Perform a LuaJIT control command. See also: `Command Line Options `_. + + **Example** + + .. code-block:: console + + % tarantool -j off app.lua + +.. option:: -b ... + + Save or list bytecode. See also: `Command Line Options `_. + + **Example** + + .. code-block:: console + + % tarantool -b test.lua test.out + +.. option:: -d SCRIPT + + Activate a debugging session for 'SCRIPT'. See also: `luadebug.lua `_. + + **Example** + + .. code-block:: console + + % tarantool -d app.lua + + +.. option:: -- + + Stop handling options. See also: `lua man page `_. + + +.. option:: - + + Stop handling options and execute the standard input as a file. See also: `lua man page `_. + + + + +.. toctree:: + :hidden: + + configuration/configuration_etcd + configuration/configuration_code + configuration/configuration_migrating diff --git a/doc/concepts/configuration/configuration_code.rst b/doc/concepts/configuration/configuration_code.rst new file mode 100644 index 0000000000..189306ef45 --- /dev/null +++ b/doc/concepts/configuration/configuration_code.rst @@ -0,0 +1,341 @@ +.. _configuration_code: + +Configuration in code +===================== + +.. box_cfg_legacy_note_start + +.. NOTE:: + + Starting with the 3.0 version, the recommended way of configuring Tarantool is using a :ref:`configuration file `. + Configuring Tarantool in code is considered a legacy approach. + +.. box_cfg_legacy_note_end + +This topic covers specifics of configuring Tarantool in code using the ``box.cfg`` API. +In this case, a configuration is stored in an :ref:`initialization file ` - a Lua script with the specified configuration options. +You can find all the available options in the :ref:`Configuration reference `. + + +.. _index-init_label: + +Initialization file +------------------- + +If the command to :ref:`start Tarantool ` includes ``LUA_INITIALIZATION_FILE``, then +Tarantool begins by invoking the Lua program in the file, which by convention +may have the name ``script.lua``. The Lua program may get further arguments +from the command line or may use operating-system functions, such as ``getenv()``. +The Lua program almost always begins by invoking ``box.cfg()``, if the database +server will be used or if ports need to be opened. For example, suppose +``script.lua`` contains the lines + +.. _index-init-example: + +.. code-block:: lua + + #!/usr/bin/env tarantool + box.cfg{ + listen = os.getenv("LISTEN_URI"), + memtx_memory = 33554432, + pid_file = "tarantool.pid", + wal_max_size = 2500 + } + print('Starting ', arg[1]) + +and suppose the environment variable LISTEN_URI contains 3301, +and suppose the command line is ``~/tarantool/src/tarantool script.lua ARG``. +Then the screen might look like this: + +.. code-block:: console + + $ export LISTEN_URI=3301 + $ ~/tarantool/src/tarantool script.lua ARG + ... main/101/script.lua C> Tarantool 2.8.3-0-g01023dbc2 + ... main/101/script.lua C> log level 5 + ... main/101/script.lua I> mapping 33554432 bytes for memtx tuple arena... + ... main/101/script.lua I> recovery start + ... main/101/script.lua I> recovering from './00000000000000000000.snap' + ... main/101/script.lua I> set 'listen' configuration option to "3301" + ... main/102/leave_local_hot_standby I> ready to accept requests + Starting ARG + ... main C> entering the event loop + +If you wish to start an interactive session on the same terminal after +initialization is complete, you can use :ref:`console.start() `. + + +.. _box-cfg-params-env: + +Environment variables +--------------------- + +Starting from version :doc:`2.8.1 `, you can specify configuration parameters via special environment variables. +The name of a variable should have the following pattern: ``TT_``, +where ```` is the uppercase name of the corresponding :ref:`box.cfg parameter `. + +For example: + +* ``TT_LISTEN`` -- corresponds to the ``box.cfg.listen`` option. +* ``TT_MEMTX_DIR`` -- corresponds to the ``box.cfg.memtx_dir`` option. + +In case of an array value, separate the array elements by comma without space: + +.. code-block:: console + + export TT_REPLICATION="localhost:3301,localhost:3302" + +If you need to pass :ref:`additional parameters for URI `, use the ``?`` and ``&`` delimiters: + +.. code-block:: console + + export TT_LISTEN="localhost:3301?param1=value1¶m2=value2" + +An empty variable (``TT_LISTEN=``) has the same effect as an unset one meaning that the corresponding configuration parameter won't be set when calling ``box.cfg{}``. + + + +.. _index-local_hot_standby: +.. _index-replication_port: +.. _index-slab_alloc_arena: +.. _index-replication_source: +.. _index-snap_dir: +.. _index-wal_dir: +.. _index-wal_mode: +.. _index-checkpoint daemon: + +.. _box_cfg_params: + + +Configuration parameters +------------------------ + +Configuration parameters have the form: + +:extsamp:`{**{box.cfg}**}{[{*{key = value}*} [, {*{key = value ...}*}]]}` + +Since ``box.cfg`` may contain many configuration parameters and since some of the +parameters (such as directory addresses) are semi-permanent, it's best to keep +``box.cfg`` in a Lua file. Typically this Lua file is the initialization file +which is specified on the Tarantool command line. + +Most configuration parameters are for allocating resources, opening ports, and +specifying database behavior. All parameters are optional. +A few parameters are dynamic, that is, they can be changed at runtime by calling ``box.cfg{}`` a second time. +For example, the command below sets the :ref:`listen port ` to ``3301``. + +.. code-block:: tarantoolsession + + tarantool> box.cfg{ listen = 3301 } + 2023-05-10 13:28:54.667 [31326] main/103/interactive I> tx_binary: stopped + 2023-05-10 13:28:54.667 [31326] main/103/interactive I> tx_binary: bound to [::]:3301 + 2023-05-10 13:28:54.667 [31326] main/103/interactive/box.load_cfg I> set 'listen' configuration option to 3301 + --- + ... + + +To see all the non-null parameters, execute ``box.cfg`` (no parentheses). + +.. code-block:: tarantoolsession + + tarantool> box.cfg + --- + - replication_skip_conflict: false + wal_queue_max_size: 16777216 + feedback_host: https://feedback.tarantool.io + memtx_dir: . + memtx_min_tuple_size: 16 + -- other parameters -- + ... + +To see a particular parameter value, call a corresponding ``box.cfg`` option. +For example, ``box.cfg.listen`` shows the specified :ref:`listen address `. + +.. code-block:: tarantoolsession + + tarantool> box.cfg.listen + --- + - 3301 + ... + + + +.. _index-uri: + +Listen URI +---------- + +Some configuration parameters and some functions depend on a URI (Universal Resource Identifier). +The URI string format is similar to the +`generic syntax for a URI schema `_. +It may contain (in order): + +* user name for login +* password +* host name or host IP address +* port number. + +Only a port number is always mandatory. A password is mandatory if a user +name is specified, unless the user name is 'guest'. + +Formally, the URI +syntax is ``[host:]port`` or ``[username:password@]host:port``. +If host is omitted, then "0.0.0.0" or "[::]" is assumed +meaning respectively any IPv4 address or any IPv6 address +on the local machine. +If ``username:password`` is omitted, then the "guest" user is assumed. Some examples: + +.. container:: table + + .. rst-class:: left-align-column-1 + .. rst-class:: left-align-column-2 + + +-----------------------------+------------------------------+ + | URI fragment | Example | + +=============================+==============================+ + | port | 3301 | + +-----------------------------+------------------------------+ + | host:port | 127.0.0.1:3301 | + +-----------------------------+------------------------------+ + | username:password@host:port | notguest:sesame@mail.ru:3301 | + +-----------------------------+------------------------------+ + +In code, the URI value can be passed as a number (if only a port is specified) or a string: + +.. code-block:: lua + + box.cfg { listen = 3301 } + + box.cfg { listen = "127.0.0.1:3301" } + +In certain circumstances, a Unix domain socket may be used +where a URI is expected, for example, "unix/:/tmp/unix_domain_socket.sock" or +simply "/tmp/unix_domain_socket.sock". + +The :ref:`uri ` module provides functions that convert URI strings into their +components, or turn components into URI strings. + +.. _index-uri-several: + +Specifying several URIs +~~~~~~~~~~~~~~~~~~~~~~~ + +Starting from version 2.10.0, a user can open several listening iproto sockets on a Tarantool instance +and, consequently, can specify several URIs in the configuration parameters +such as :ref:`box.cfg.listen ` and :ref:`box.cfg.replication `. + +URI values can be set in a number of ways: + +* As a string with URI values separated by commas. + + .. code-block:: lua + + box.cfg { listen = "127.0.0.1:3301, /unix.sock, 3302" } + +* As a table that contains URIs in the string format. + + .. code-block:: lua + + box.cfg { listen = {"127.0.0.1:3301", "/unix.sock", "3302"} } + +* As an array of tables with the ``uri`` field. + + .. code-block:: lua + + box.cfg { listen = { + {uri = "127.0.0.1:3301"}, + {uri = "/unix.sock"}, + {uri = 3302} + } + } + +* In a combined way -- an array that contains URIs in both the string and the table formats. + + .. code-block:: lua + + box.cfg { listen = { + "127.0.0.1:3301", + { uri = "/unix.sock" }, + { uri = 3302 } + } + } + +.. _index-uri-several-params: + +Also, starting from version 2.10.0, it is possible to specify additional parameters for URIs. +You can do this in different ways: + +* Using the ``?`` delimiter when URIs are specified in a string format. + + .. code-block:: lua + + box.cfg { listen = "127.0.0.1:3301?p1=value1&p2=value2, /unix.sock?p3=value3" } + +* Using the ``params`` table: a URI is passed in a table with additional parameters in the "params" table. + Parameters in the "params" table overwrite the ones from a URI string ("value2" overwrites "value1" for ``p1`` in the example below). + + .. code-block:: lua + + box.cfg { listen = { + "127.0.0.1:3301?p1=value1", + params = {p1 = "value2", p2 = "value3"} + } + } + +* Using the ``default_params`` table for specifying default parameter values. + + In the example below, two URIs are passed in a table. + The default value for the ``p3`` parameter is defined in the ``default_params`` table + and used if this parameter is not specified in URIs. + Parameters in the ``default_params`` table are applicable to all the URIs passed in a table. + + .. code-block:: lua + + box.cfg { listen = { + "127.0.0.1:3301?p1=value1", + { uri = "/unix.sock", params = { p2 = "value2" } }, + default_params = { p3 = "value3" } + } + } + +The recommended way for specifying URI with additional parameters is the following: + +.. code-block:: lua + + box.cfg { listen = { + {uri = "127.0.0.1:3301", params = {p1 = "value1"}}, + {uri = "/unix.sock", params = {p2 = "value2"}}, + {uri = 3302, params = {p3 = "value3"}} + } + } + +In case of a single URI, the following syntax also works: + +.. code-block:: lua + + box.cfg { listen = { + uri = "127.0.0.1:3301", + params = { p1 = "value1", p2 = "value2" } + } + } + + + +.. _configuration_code_run_instance_tarantool: + +Starting a Tarantool instance +----------------------------- + +Below is the syntax for starting a Tarantool instance configured in a Lua initialization script: + +.. code-block:: console + + $ tarantool LUA_INITIALIZATION_FILE [OPTION ...] + +The ``tarantool`` command also provides a set of :ref:`options ` that might be helpful for development purposes. + +The command below starts a Tarantool instance configured in the ``script.lua`` file: + +.. code-block:: console + + $ tarantool script.lua diff --git a/doc/concepts/configuration/configuration_etcd.rst b/doc/concepts/configuration/configuration_etcd.rst new file mode 100644 index 0000000000..1b44c540b0 --- /dev/null +++ b/doc/concepts/configuration/configuration_etcd.rst @@ -0,0 +1,97 @@ +.. _configuration_etcd: + +Storing configuration in etcd +============================= + +.. admonition:: Enterprise Edition + :class: fact + + Centralized configuration is supported by the `Enterprise Edition `_ only. + +.. TODO + https://github.com/tarantool/doc/issues/3658 + + - Install and configure etcd (authentication, TLS) + - Local etcd configuration (mention env vars) + - endpoints + - key prefix + - auth + - TLS + - http (timeout, socket) + - Put a remote config + - etcdctl put + - tt cluster publish + - Show cluster config + - etcdctl get + - tt cluster show + - Start app + - Local config + - Env vars + - Reload config + - auto + - manual (config.reload) + + + Local config (``config.yaml``): + + .. literalinclude:: /code_snippets/test/config/etcd.yaml + :language: yaml + :dedent: + + Remote config (``remote_config.yaml``): + + .. literalinclude:: /code_snippets/test/config/replicaset_manual.yaml + :language: yaml + :dedent: + + Put a remote config: + + .. code-block:: console + + $ etcdctl put /example/config/all.yaml < remote_config.yaml + + Put a remote config using ``tt cluster``: + + .. code-block:: console + + $ tt cluster publish "http://localhost:2379/tt" remote_config.yaml + + Searches keys by the following path: ``/prefix/config/*``. + See https://github.com/tarantool/doc/issues/3725 + + Manual: + + .. code-block:: yaml + + config: + reload: 'manual' + + Reload config (on all instances): + + .. code-block:: lua + + require('config'):reload() + + Authentication: + + .. code-block:: console + + $ etcdctl --user root --password foobar role grant-permission tt readwrite /tt/config/all + $ etcdctl --user root --password foobar role grant-permission tt --prefix=true readwrite /tt/ + + $ etcdctl --user root --password foobar user grant-role testuser tt + + + .. code-block:: yaml + + config: + etcd: + http: + request: + timeout: 3 + prefix: /tt + endpoints: + - http://localhost:2379 + username: testuser + password: foobar + diff --git a/doc/concepts/configuration/configuration_migrating.rst b/doc/concepts/configuration/configuration_migrating.rst new file mode 100644 index 0000000000..c557d741ae --- /dev/null +++ b/doc/concepts/configuration/configuration_migrating.rst @@ -0,0 +1,9 @@ +.. _configuration_migrating_declarative: + +Migrating to declarative configuration +====================================== + +.. TODO + https://github.com/tarantool/doc/issues/3661 + 1) Configuration applying idempotence: how the config's 'target state' approach differs from the 'state changes' box.cfg() approach. + 2) How non-dynamic box.cfg() options are applied (no error, wait for restart). diff --git a/doc/concepts/data_model/schema_desc.rst b/doc/concepts/data_model/schema_desc.rst index cecf2566f6..21e9aefa03 100644 --- a/doc/concepts/data_model/schema_desc.rst +++ b/doc/concepts/data_model/schema_desc.rst @@ -86,14 +86,7 @@ The schema would look something like this: This alternative is simpler to use, and you do not have to dive deep into Lua. -``DDL`` is a built-in -:doc:`Cartridge ` module. -Cartridge is a cluster solution for Tarantool. In its WebUI, there is a separate tab -called "Code". On this tab, in the ``schema.yml`` file, you can define the schema, check its correctness, -and apply it to the whole cluster. - -If you do not use Cartridge, you can still use the DDL module: -put the following Lua code into the file that you use to run Tarantool. +To use the DDL module, put the following Lua code into the file that you use to run Tarantool. This file is usually called ``init.lua``. .. code:: lua diff --git a/doc/concepts/index.rst b/doc/concepts/index.rst index 60b72ec810..46bcfe065a 100644 --- a/doc/concepts/index.rst +++ b/doc/concepts/index.rst @@ -53,9 +53,6 @@ Application server Using Tarantool as an application server, you can write applications in Lua, C, or C++. You can also create reusable :ref:`modules `. -A convenient way to serve a clustered application on Tarantool is using :ref:`Tarantool Cartridge ` -- -a framework for developing, deploying, and managing applications. - To increase the speed of code execution, Tarantool has a Lua Just-In-Time compiler (LuaJIT) on board. LuaJIT compiles hot paths in the code -- paths that are used many times -- thus making the application work faster. @@ -114,11 +111,11 @@ For details, check the :ref:`Storage engines ` section. .. toctree:: :hidden: + configuration data_model/index coop_multitasking atomic modules - Tarantool Cartridge sharding/index replication/index triggers diff --git a/doc/concepts/modules.rst b/doc/concepts/modules.rst index ccd7cdcc2d..6fbc3b847e 100644 --- a/doc/concepts/modules.rst +++ b/doc/concepts/modules.rst @@ -3,9 +3,8 @@ Modules ======= -Any logic that is used in Tarantool can be packaged as an application -(like a :ref:`Cartridge application `) or a reusable **module**. -A module is an optional library that enhances Tarantool functionality. +Any logic that is used in Tarantool can be packaged as an application or a reusable **module**. +A module is an optional library that extends Tarantool functionality. It can be used by Tarantool applications or other modules. Modules allow for easier code management and hot code reload without restarting the Tarantool instance. Like applications, modules in Tarantool can be written in Lua, diff --git a/doc/contributing/contributing.rst b/doc/contributing/contributing.rst index 5c9dd555d7..76840ff1df 100644 --- a/doc/contributing/contributing.rst +++ b/doc/contributing/contributing.rst @@ -132,8 +132,8 @@ There are several ways to improve the documentation: see how it works. This can be done automatically in Docker. To learn more, check the `README of the tarantool/doc repository `_. -Some projects, like `Tarantool Cartridge `_, -have their documentation in the code repository. +Some Tarantool projects have their documentation in the code repository. +This is typical for modules, for example, `metrics `_. This is done on purpose, so the developers themselves can update it faster. You can find instructions for building such documentation in the code repository. @@ -161,8 +161,6 @@ Here are some of our official modules: the persistent message queue. * `metrics `_: Ready-to-use solution for collecting metrics. -* `cartridge `_: Framework for writing - distributed applications. Official modules are provided in our organization on GitHub. @@ -311,11 +309,8 @@ help with application deployment, or allow working with Kubernetes. Here are some of the tools created by the Tarantool team: -* `ansible-cartridge `_: - an Ansible role to deploy Cartridge applications. -* `cartridge-cli `_: - a CLI utility for creating applications, launching clusters locally on Cartridge, - and solving operation problems. +* `tt `_: + a CLI utility for creating and managing Tarantool applications. * `tarantool-operator `_: a Kubernetes operator for cluster orchestration. diff --git a/doc/contributing/docs/infra.rst b/doc/contributing/docs/infra.rst index 7276247a1a..0271c6e2a5 100644 --- a/doc/contributing/docs/infra.rst +++ b/doc/contributing/docs/infra.rst @@ -15,9 +15,8 @@ The documentation source files are mainly stored in the `documentation repository `_. However, in some cases, they are stored in the repositories of other Tarantool-related products -or modules -- `Cartridge `_, -`Monitoring `__, -and others. +or modules, such as +`Monitoring `__. If you are working with source files from a product or module repository, add that repository as a submodule to the @@ -103,32 +102,6 @@ The ``${project_root}`` variable is defined earlier in the file as ``project_roo This is because the documentation build has to start from the documentation repository root directory. -cartridge_cli -^^^^^^^^^^^^^ - -The content source file for the ``cartridge_cli`` submodule is -``README.rst``, located in the directory of the submodule repository. -In the final documentation view, the content should appear here: -``https://www.tarantool.io/en/doc/latest/book/cartridge/cartridge_cli/``. - -To make this work: - -* Create a directory at ``./doc/book/cartridge/cartridge_cli``. -* Copy ``./modules/cartridge_cli/README.rst`` to - ``./doc/book/cartridge/cartridge_cli/index.rst``. - -Here ar the corresponding settings in ``build_submodules.sh``: - -.. code-block:: bash - - rst_dest="${project_root}/doc/book/cartridge" - cartridge_cli_root="${project_root}/modules/cartridge-cli" - cartridge_cli_dest="${rst_dest}/cartridge_cli" - cartridge_cli_index_dest="${cartridge_cli_dest}/index.rst" - - mkdir -p "${cartridge_cli_dest}" - yes | cp -rf "${cartridge_cli_root}/README.rst" "${cartridge_cli_index_dest}" - .. _guidelines_doc_submodules_gitignore: 3. Update .gitignore diff --git a/doc/contributing/docs/localization/_includes/glossary-cartridge.csv b/doc/contributing/docs/localization/_includes/glossary-cartridge.csv deleted file mode 100644 index 67502c4740..0000000000 --- a/doc/contributing/docs/localization/_includes/glossary-cartridge.csv +++ /dev/null @@ -1,10 +0,0 @@ -Term [en];Term [ru];Description [en];Description [ru] -;приложение на Tarantool Cartridge;;Если без предлога, то теряется смысл: читается так, как будто Tarantool Cartridge — это название приложения. А это не так. -Tarantool Cartridge application;Tarantool Cartridge — это фреймворк;;" на базе которого можно разработать свое приложение.""" -Cartridge;Cartridge;; -production environment;производственная среда;Production environment is a term used mostly by developers to describe the setting where software and other products are actually put into operation for their intended uses by end users.; -failover;восстановление после сбоев;In computing and related technologies such as networking, failover is switching to a redundant or standby computer server, system, hardware component or network upon the failure or abnormal termination of the previously active application, server, system, hardware component, or network.; -replicaset;набор реплик;; -directory;директория;; -bucket;сегмент;; -check;выберите, выбрать;To select a checkbox; \ No newline at end of file diff --git a/doc/contributing/docs/localization/_includes/glossary.csv b/doc/contributing/docs/localization/_includes/glossary.csv index 5dc02a7245..8cdfb97728 100644 --- a/doc/contributing/docs/localization/_includes/glossary.csv +++ b/doc/contributing/docs/localization/_includes/glossary.csv @@ -1,7 +1,6 @@ Term [en];Term [ru];Description [en];Description [ru] space;спейс;A space is a container for tuples.; -;"https://www.tarantool.io/en/doc/latest/book/box/data_model/#spaces""";;NOUN -tuple;кортеж;A tuple plays the same role as a “row” or a “record”. The number of tuples in a space is unlimited. Tuples in Tarantool are stored as MsgPack arrays. https://www.tarantool.io/en/doc/latest/book/box/data_model/#tuples; +tuple;кортеж;A tuple plays the same role as a “row” or a “record”. The number of tuples in a space is unlimited. Tuples in Tarantool are stored as MsgPack arrays.; Tarantool;Tarantool;НЕ ПЕРЕВОДИТЬ; primary index;первичный индекс;The first index defined on a space is called the primary key index, and it must be unique. All other indexes are called secondary indexes, and they may be non-unique. https://www.tarantool.io/en/doc/latest/book/box/data_model/#indexes; fiber;файбер;A fiber is a set of instructions which are executed with cooperative multitasking. Fibers managed by the fiber module are associated with a user-supplied function called the fiber function. https://www.tarantool.io/en/doc/latest/reference/reference_lua/fiber/#fibers; @@ -16,8 +15,6 @@ implicit casting;неявное приведение типов;; database;база данных;; Release policy;Релизная политика;A set of rules for releasing and naming new distributions of Tarantool: where we add new features and where we don't, how we give them numbers, what versions are suitable to use in production.; field;поле;Fields are distinct data values, contained in a tuple. They play the same role as «row columns» or «record fields» in relational databases.; -;;; -;"https://www.tarantool.io/ru/doc/latest/book/box/data_model/#term-field""";;NOUN leader election;выборы лидера;(in a replica set, by the Raft algorithm); replica set;набор реплик;; heartbeat;контрольный сигнал;; @@ -43,14 +40,18 @@ expression;выражение;; predicate;предикат;(SQL) Predicates, which specify conditions that can be evaluated to SQL three-valued logic (3VL) (true/false/unknown) or Boolean truth values and are used to limit the effects of statements and queries, or to change program flow.; query;запрос;(SQL) Queries retrieve the data based on specific criteria. A query is a statement that returns a result set (possibly empty).; result set;результат запроса;(SQL) An SQL result set is a set of rows from a database, as well as metadata about the query such as the column names, and the types and sizes of each column. A result set is effectively a table.; -resultset;результат запроса;(SQL) An SQL result set is a set of rows from a database, as well as metadata about the query such as the column names, and the types and sizes of each column. A result set is effectively a table.; statement;инструкция;(SQL) A statement is any text that the database engine recognizes as a valid command.;(SQL) Любой текст, который распознаётся движком БД как команда. Инструкция состоит из ключевых слов и выражений языка SQL, которые предписывают Tarantool выполнять какие-либо действия с базой данных. -;"Tarantool: A statement consists of SQL-language keywords and expressions that direct Tarantool to do something with a database. https://www.tarantool.io/en/doc/latest/reference/reference_sql/sql_user_guide/#statements""";; +;;"Tarantool: A statement consists of SQL-language keywords and expressions that direct Tarantool to do something with a database. https://www.tarantool.io/en/doc/latest/reference/reference_sql/sql_user_guide/#statements""";; batch;пакет (инструкций);(SQL) A series of SQL statements sent to the server at once is called a batch.;(SQL) Серия SQL-инструкций (statements), отправляемая на сервер вместе production configuration;конфигурация производственной среды;; -deployment;;Transforming a mechanical, electrical, or computer system from a packaged to an operational state. IT infrastructure deployment typically involves defining the sequence of operations or steps, often referred to as a deployment plan, that must be carried to deliver changes into a target system environment.; +deployment;развертывание;Transforming a mechanical, electrical, or computer system from a packaged to an operational state. IT infrastructure deployment typically involves defining the sequence of operations or steps, often referred to as a deployment plan, that must be carried to deliver changes into a target system environment.; roll back;отменить;;транзакцию deploy to production;;IT infrastructure deployment typically involves defining the sequence of operations or steps, often referred to as a deployment plan, that must be carried to deliver changes into a target system environment. Production environment is a setting where software and other products are actually put into operation for their intended uses by end users; operations;эксплуатация;(DevOps) Information technology operations, or IT operations, are the set of all processes and services that are both provisioned by an IT staff to their internal or external clients and used by themselves, to run themselves as a business. ; to deploy;;Transforming a mechanical, electrical, or computer system from a packaged to an operational state. IT infrastructure deployment typically involves defining the sequence of operations or steps, often referred to as a deployment plan, that must be carried to deliver changes into a target system environment.; -deployment plan;;A sequence of operations or steps that must be carried to deliver changes into a target system environment.; \ No newline at end of file +deployment plan;;A sequence of operations or steps that must be carried to deliver changes into a target system environment.; +production environment;производственная среда;Production environment is a term used mostly by developers to describe the setting where software and other products are actually put into operation for their intended uses by end users.; +failover;восстановление после сбоев;In computing and related technologies such as networking, failover is switching to a redundant or standby computer server, system, hardware component or network upon the failure or abnormal termination of the previously active application, server, system, hardware component, or network.; +directory;директория;; +bucket;сегмент;; +select;выберите, выбрать;To select a checkbox; \ No newline at end of file diff --git a/doc/contributing/docs/localization/glossaries.rst b/doc/contributing/docs/localization/glossaries.rst index 97fb063058..b9917c9747 100644 --- a/doc/contributing/docs/localization/glossaries.rst +++ b/doc/contributing/docs/localization/glossaries.rst @@ -1,18 +1,8 @@ Glossaries ========== -Tarantool Core --------------- - .. csv-table:: :file: _includes/glossary.csv :header-rows: 1 :delim: ; -Cartridge ---------- - -.. csv-table:: - :file: _includes/glossary-cartridge.csv - :header-rows: 1 - :delim: ; diff --git a/doc/contributing/docs/localization/locstate.rst b/doc/contributing/docs/localization/locstate.rst index 80c38d5255..8c38ac3754 100644 --- a/doc/contributing/docs/localization/locstate.rst +++ b/doc/contributing/docs/localization/locstate.rst @@ -15,18 +15,10 @@ State of localization - |doc| - 352 000 - * - Cartridge - - |cartridge| - - 14 000 - * - Tarantool Ansible Role - |tarantool-ansible-role| - 11 000 - * - Cartridge CLI - - |cartridge-cli| - - 6 500 - * - Tarantool Enterprise Edition - |tarantool-enterprise| - 6 000 @@ -58,12 +50,6 @@ State of localization .. |doc| image:: https://badges.crowdin.net/tarantool-docs/localized.svg :target: https://crowdin.com/project/tarantool-docs/ru# -.. |cartridge| image:: https://badges.crowdin.net/tarantool-cartridge-docs/localized.svg - :target: https://crowdin.com/project/tarantool-cartridge-docs/ru# - -.. |cartridge-cli| image:: https://badges.crowdin.net/tarantool-cartridge-cli/localized.svg - :target: https://crowdin.com/project/tarantool-cartridge-cli/ru# - .. |tarantool-enterprise| image:: https://badges.crowdin.net/tarantool-enterprise-docs/localized.svg :target: https://crowdin.com/project/tarantool-enterprise-docs/ru# diff --git a/doc/contributing/docs/sphinx-warnings.rst b/doc/contributing/docs/sphinx-warnings.rst index e7743614d1..51210c2c75 100644 --- a/doc/contributing/docs/sphinx-warnings.rst +++ b/doc/contributing/docs/sphinx-warnings.rst @@ -163,7 +163,7 @@ This may happen when you refer to a wrong path to a document. Check the path. -If the path points to ``cartridge`` or another submodule, check that you've +If the path points to a submodule, check that you've :doc:`built the submodules content ` before building docs. diff --git a/doc/enterprise/admin.rst b/doc/enterprise/admin.rst index 8f9d19b1bf..a5a2578d2f 100644 --- a/doc/enterprise/admin.rst +++ b/doc/enterprise/admin.rst @@ -1,119 +1,6 @@ .. _enterprise-admin: -=============================================================================== Cluster administrator's guide -=============================================================================== +============================= -This guide focuses on Enterprise-specific administration features available -on top of Tarantool Community Edition with Tarantool Cartridge framework: - -* :ref:`space explorer ` -* :ref:`upgrade of environment-independent applications in production ` - -Otherwise, consult the following documentation for: - -* basic information on - :doc:`deploying and managing a Tarantool cluster ` -* more information on - :doc:`managing Tarantool instances ` - -.. _space_explorer: - -------------------------------------------------------------------------------- -Exploring spaces -------------------------------------------------------------------------------- - -The web interface lets you connect (in the browser) to any instance in the cluster -and see what spaces it stores (if any) and their contents. - -To explore spaces: - -#. Open the **Space Explorer** tab in the menu on the left: - - .. image:: images/space_explr_tab.png - :align: center - :scale: 80% - -#. Click **connect** next to an instance that stores data. The basic sanity-check - (``test.py``) of the example application puts sample data to one replica - set (shard), so its master and replica store the data in their spaces: - - .. image:: images/spaces_with_data.png - :align: center - :scale: 80% - - When connected to a instance, the space explorer shows a table with basic - information on its spaces. For more information, see the - :doc:`box.space reference `. - - To see hidden spaces, tick the corresponding checkbox: - - .. image:: images/hidden_spaces.png - :align: center - :scale: 80% - -#. Click the space's name to see its format and contents: - - .. image:: images/space_contents.png - :align: center - :scale: 70% - - To search the data, select an index and, optionally, its iteration type from - the drop-down lists, and enter the index value: - - .. image:: images/space_search.png - :align: center - :scale: 80% - -.. _enterprise-production-upgrade: - -------------------------------------------------------------------------------- -Upgrading in production -------------------------------------------------------------------------------- - -To upgrade either a single instance or a cluster, you need a new version of the -packaged (archived) application. - -A single instance upgrade is simple: - -#. Upload the package (archive) to the server. -#. Stop the current instance. -#. Deploy the new one as described in :ref:`deploying packaged applications ` - (or :ref:`archived ones `). - -.. _enterprise-cluster-upgrade: - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Cluster upgrade -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To upgrade a cluster, choose one of the following scenarios: - -* **Cluster shutdown**. Recommended for backward-incompatible updates, requires - downtime. - -* **Instance by instance**. Recommended for backward-compatible updates, does - not require downtime. - -To upgrade the cluster, do the following: - -#. Schedule a downtime or plan for the instance-by-instance upgrade. - -#. Upload a new application package (archive) to all servers. - -Next, execute the chosen scenario: - -* **Cluster shutdown**: - - #. Stop all instances on all servers. - #. Deploy the new package (archive) on every server. - -* **Instance by instance**. Do the following in every replica set in succession: - - #. Stop a replica on any server. - #. Deploy the new package (archive) in place of the old replica. - #. Promote the new replica to a master (see - :ref:`Switching the replica set's master ` - section in the Tarantool manual). - #. Redeploy the old master and the rest of the instances in the replica set. - #. Be prepared to resolve possible logic conflicts. +.. TODO: rewrite for TCM instead of Cartridge \ No newline at end of file diff --git a/doc/enterprise/audit.rst b/doc/enterprise/audit.rst index 15d26a16da..927d5f7266 100644 --- a/doc/enterprise/audit.rst +++ b/doc/enterprise/audit.rst @@ -43,9 +43,9 @@ is not sufficiently secure. Closed HTTP ports ----------------- -Tarantool accepts HTTP connections on a specific port, configured with -``http_port: `` value -(see :ref:`configuring Cartridge instances `). +.. TODO: update for new EE config + +Tarantool accepts HTTP connections on a specific port. It must be only available on the same host for nginx to connect to it. Check that the configured HTTP port is closed @@ -103,8 +103,6 @@ Authorization in the web UI --------------------------- Using the web interface must require logging in with a username and password. -See more details in the documentation on -:ref:`configuring web interface authorization `. Running under the tarantool user -------------------------------- @@ -128,8 +126,7 @@ This should be checked on each Tarantool instance. The :ref:`snapshot_count ` value determines the number of kept snapshots. Configuration values are primarily set in the configuration files -but :doc:`can be overridden ` -with environment variables and command-line arguments. +but can be overridden with environment variables and command-line arguments. So, it's best to check both the values in the configuration files and the actual values using the console: diff --git a/doc/enterprise/cartridge-auth.rst b/doc/enterprise/cartridge-auth.rst index 3cd3a7b796..858fbd5753 100644 --- a/doc/enterprise/cartridge-auth.rst +++ b/doc/enterprise/cartridge-auth.rst @@ -3,141 +3,4 @@ LDAP authorization ================== -This chapter describes how to manage the access roles for LDAP users authorizing in your Cartridge application. - -Setting up this feature is twofold: - -* :ref:`enabling the feature ` for your application -* :ref:`specifying configuration parameters `. - -.. note:: - - For information on setting up the authorization of external users in your application, refer to :ref:`ldap_auth`. - -.. _enterprise-cartridge-auth-enable: - -Enabling LDAP authorization ---------------------------- - -First, you should enable LDAP authorization function in your :ref:`application development project `: - -* set up dependency to the ``cartridge-auth-extension`` module that is available in the :ref:`Enterprise Edition's package `. -* update the configuration in the application initialization file. - -.. note:: - - If you don't have a development project yet, refer to :doc:`dev` on how to create it. - -1. In your development project, find a ``.rockspec`` file and specify the following dependency: - - .. code-block:: bash - - dependencies = { - 'cartridge-auth-extension' - } - -2. In an initialization Lua file of your project, specify the ``cartridge-auth-extension`` :ref:`cluster role ` in the :ref:`Cartridge configuration `. - The role enables storing authorized users and validating the :ref:`LDAP configuration `. - - .. code-block:: lua - - cartridge.cfg({ - roles = { - 'cartridge-auth-extension', - }, - auth_backend_name = 'cartridge-auth-extension', - }) - -3. Deploy and start your application. For details, refer to :doc:`dev`. - -.. _enterprise-cartridge-auth-config: - -Configuring LDAP authorization ------------------------------- - -After starting your application, you need to configure LDAP authorization. It can be done via the GUI administrative console. - -1. In a web browser, open the GUI administrative console of your application. - -2. If you have the application instances already configured, proceed to the next step. Otherwise, refer to :ref:`cartridge-deployment` on how to configure the cluster. - -3. In the GUI administrative console, navigate to the **Code** tab. Create the following YAML configuration files and specify the necessary parameters. - Below is the example of configuration and the :ref:`description of parameters `. - -.. note:: - - If you set the authorization mode as ``local`` in the ``auth_extension.yml`` file, you don't need to define LDAP configuration parameters in the ``ldap.yml`` file. - - -* ``auth_extension.yml`` - - .. code-block:: yaml - - method: local+ldap - -* ``ldap.yml`` - - .. code-block:: yaml - - - domain: 'test.glauth.com' - organizational_units: ['all_staff'] - hosts: - - localhost:3893 - use_tls: false - use_active_directory: false - search_timeout: 2 - roles: - - role: 'admin' - domain_groups: - - 'cn=superusers,ou=groups,dc=glauth,dc=com' - - 'cn=users,ou=groups,dc=glauth,dc=com' - options: - LDAP_OPT_DEBUG_LEVEL: 10 - -* ``auth.yml`` - - .. code-block:: yaml - - enabled: true - -.. _enterprise-cartridge-auth-config-params: - -**Configuration parameters:** - -* ``method`` -- authorization mode. Possible values: - - * ``local`` -- only local users can be authorized in the application. "Local" refers to users created in the application. - * ``ldap`` -- only LDAP users can be authorized. - * ``local+ldap`` -- both local and LDAP users can be authorized. - -* ``domain`` -- domain name that is used in the domain login ID (``user_name@domain``). - -* ``organizational_units`` -- names of the organizational units or user groups. - -* ``hosts`` -- LDAP server addresses. - -* ``use_tls`` -- boolean flag that defines TLS usage. Defaults to ``false``. - -* ``use_active_directory`` -- boolean flag that defines usage of the Active Directory. Defaults to ``false``. - If set to ``true``, use the login ID in the email format (``user_name@domain``). - The ID should be equal to the ``userPrincipalName`` Active Directory attribute value because the latter is used in the Active Directory filter. - -* ``search_timeout`` -- LDAP server response timeout. Defaults to 2 seconds. - -* ``roles`` -- user roles assigned to a user depending on the LDAP groups the user belongs to: - - * ``role`` -- user role; - * ``domain_groups`` -- LDAP groups where ``cn`` -- common name; ``ou`` -- organization unit name; ``dc`` -- domain component. - -* ``options`` -- the OpenLDAP library options. Supported options: - - * LDAP_OPT_X_TLS_REQUIRE_CERT - * LDAP_OPT_PROTOCOL_VERSION - * LDAP_OPT_DEBUG_LEVEL - * LDAP_OPT_X_TLS_CACERTFILE - * LDAP_OPT_X_TLS_CACERTDIR. - - For description of the options, refer to the `OpenLDAP documentation `__. - -* ``enabled`` -- boolean flag. If set to ``true``, enables mandatory authentication mode in the application web interface. - +.. TODO: rewrite for TCM and rename the file \ No newline at end of file diff --git a/doc/enterprise/dev.rst b/doc/enterprise/dev.rst index b1c5a08220..f16cc41598 100644 --- a/doc/enterprise/dev.rst +++ b/doc/enterprise/dev.rst @@ -1,448 +1,6 @@ .. _enterprise-app-development: -=============================================================================== Developer's guide -=============================================================================== +================= -To develop an application, use the Tarantool Cartridge framework that is -:ref:`installed ` as part of Tarantool Enterprise Edition. - -Here is a summary of the commands you need: - -#. Create a cluster-aware application from the template: - - .. code-block:: bash - - $ tt create cartridge --name -d /path/to - -#. Develop your application: - - .. code-block:: bash - - $ cd /path/to/ - $ ... - -#. Package your application: - - .. code-block:: bash - - $ tt pack [rpm|tgz] /path/to/ - -#. Deploy your application: - - * For ``rpm`` package: - - 1. Upload the package to all servers dedicated to Tarantool. - 2. Install the package: - - .. code-block:: bash - - $ yum install -.rpm - - 3. Launch the application. - - .. code-block:: bash - - $ systemctl start - - * For ``tgz`` archive: - - 1. Upload the archive to all servers dedicated to Tarantool. - 2. Unpack the archive: - - .. code-block:: bash - - $ tar -xzvf -.tar.gz -C /home//apps - - 3. Launch the application - - .. code-block:: bash - - $ tarantool init.lua - -For details and examples, consult the following documentation: - -* a `getting started guide `_ - that walks you through developing and deploying a simple clustered application using - Tarantool Cartridge, -* a :doc:`detailed manual ` - on creating and managing clustered Tarantool applications using - Tarantool Cartridge. - -Further on, this guide focuses on Enterprise-specific developer features available -on top of Tarantool Community Edition with the Tarantool Cartridge framework: - -* :ref:`LDAP authorization in the web interface `, -* :ref:`environment-independent applications `, -* :ref:`sample applications with Enterprise flavors `. - -.. _ldap_auth: - -------------------------------------------------------------------------------- -Implementing LDAP authorization in the web interface -------------------------------------------------------------------------------- - -If you run an LDAP server in your organization, you can connect Tarantool -Enterprise to it and let it handle the authorization. In this case, follow the -:ref:`general recipe ` -where in the first step add the ``ldap`` module to the ``.rockspec`` file -as a dependency and consider implementing the ``check_password`` function -the following way: - -.. code-block:: Lua - :emphasize-lines: 4, 10, 13 - - -- auth.lua - -- Require the LDAP module at the start of the file - local ldap = require('ldap') - ... - -- Add a function to check the credentials - local function check_password(username, password) - - -- Configure the necessary LDAP parameters - local user = string.format("cn=%s,ou=tarantool,dc=glauth,dc=com", username) - - -- Connect to the LDAP server - local ld, err = ldap.open("localhost:3893", user, password) - - -- Return an authentication success or failure - if not ld then - return false - end - return true - end - ... - -.. _enterprise-env-independent-apps: - --------------------------------------------------------------------------------- -Delivering environment-independent applications --------------------------------------------------------------------------------- - -Tarantool Enterprise Edition allows you to build environment-independent applications. - -An environment-independent application is an assembly (in one directory) of: - -* files with Lua code, -* ``tarantool`` executable, -* plugged external modules (if necessary). - -When started by the ``tarantool`` executable, the application provides a -service. - -The modules are Lua rocks installed into a virtual environment (under the -application directory) similar to Python's ``virtualenv`` and Ruby's bundler. - -Such an application has the same structure both in development and -production-ready phases. All the application-related code resides in one place, -ready to be packed and copied over to any server. - -.. _enterprise-app-package: - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Packaging applications -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Once custom cluster role(s) are defined and the application is developed, pack -it and all its dependencies (module binaries) together with the ``tarantool`` -executable. - -This will allow you to upload, install, and run your application on any server in -one go. - -To pack the application, say: - -.. code-block:: console - - $ tt pack [rpm|tgz] /path/to/ - -where specify a path to your development environment -- the Git repository -containing your application code, -- and one of the following build options: - -* ``rpm`` to build an RPM package (recommended), or -* ``tgz`` to build a ``tar + gz`` archive - (choose this option only if you do not have root - privileges on servers dedicated for Tarantool Enterprise). - -This will create a package (or compressed archive) named -``--`` (e.g., ``myapp-1.2.1-12.rpm``) -containing your environment-independent application. - -Next, proceed to deploying :ref:`packaged applications ` -(or :ref:`archived ones `) on your servers. - -.. _enterprise-packaged-app: - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Deploying packaged applications -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To deploy your packaged application, do the following on every server dedicated -for Tarantool Enterprise: - -#. Upload the package created in the :ref:`previous step `. - -#. Install: - - .. code-block:: console - - $ yum install -.rpm - -#. Start one or multiple Tarantool instances with the corresponding services - as described below. - - * A single instance: - - .. code-block:: console - - $ systemctl start - - This will start an instantiated ``systemd`` service that will listen to port - ``3301``. - - * Multiple instances on one or multiple servers: - - .. code-block:: console - - $ systemctl start @instance_1 - $ systemctl start @instance_2 - ... - $ systemctl start @instance_ - - where ``@instance_`` is the instantiated service name - for ``systemd`` with an incremental ```` (unique for every - instance) to be added to the ``3300`` port the instance will listen to - (e.g., ``3301``, ``3302``, etc.). - -#. In case it is a cluster-aware application, proceed to - :ref:`deploying the cluster `. - -To stop all services on a server, use the ``systemctl stop`` command and specify -instance names one by one. For example: - -.. code-block:: console - - $ systemctl stop @instance_1 @instance_2 ... @instance_ - -.. _enterprise-archived-app: - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Deploying archived applications -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -While the RPM package places your application to ``/usr/share/tarantool/`` -on your server by default, the ``tar + gz`` archive does not enforce any structure -apart from just the ``/`` directory, so you are responsible for placing -it appropriately. - -.. NOTE:: - - RPM packages are recommended for deployment. Deploy archives only if - you do not have root privileges. - -To place and deploy the application, do the following on every server dedicated -for Tarantool Enterprise: - -#. Upload the archive, decompress, and extract it to the ``/home//apps`` - directory: - - .. code-block:: console - - $ tar -xzvf -.tar.gz -C /home//apps - -#. Start Tarantool instances with the corresponding services. - - To manage instances and configuration, use tools like ``ansible``, - ``systemd``, and ``supervisord``. - -#. In case it is a cluster-aware application, proceed to - :ref:`deploying the cluster `. - -.. _enterprise-code-upgrade: - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Upgrading code -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -All instances in the cluster are to run the same code. This includes all the -components: custom roles, applications, module binaries, ``tarantool``, and -``tt`` (if necessary) executables. - -Pay attention to possible backward incompatibility that any component may -introduce. This will help you choose a scenario for an -:ref:`upgrade in production `. Keep in mind that -you are responsible for code compatibility and handling conflicts should -inconsistencies occur. - -To upgrade any of the components, prepare a new version of the package (archive): - -#. Update the necessary files in your development environment (directory): - - * Your own source code: custom roles and/or applications. - * Module binaries. - * Executables. Replace them with ones from the new bundle. - -#. Increment the version as described in - :ref:`application versioning `. - -#. Repack the updated files as described in :ref:`packaging applications `. - -#. Choose an upgrade scenario as described in the :ref:`Upgrading in production ` - section. - -.. _enterprise-run-app: - -------------------------------------------------------------------------------- -Running sample applications -------------------------------------------------------------------------------- - -The Enterprise distribution package includes sample applications in the -``examples/`` directory that showcase basic Tarantool functionality. - -.. contents:: Sample applications: - :depth: 1 - :local: - -.. _enterprise-pg-write-through-cache: - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Write-through cache application for PostgreSQL -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The example in ``pg_writethrough_cache/`` shows how Tarantool can cache data -written *through* it to a PostgreSQL database to speed up the reads. - -The sample application requires a deployed PostgreSQL database and the following -rock modules: - -.. code-block:: console - - $ tt rocks install http - $ tt rocks install pg - $ tt rocks install argparse - -Look through the code in the files to get an understanding of what the application -does. - -To run the application for a local PostgreSQL database, say: - -.. code-block:: console - - $ tarantool cachesrv.lua --binary-port 3333 --http-port 8888 --database postgresql://localhost/postgres - -.. _enterprise-ora-write-behind-cache: - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Write-behind cache application for Oracle -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The example in ``ora-writebehind-cache/`` shows how Tarantool can cache writes -and queue them to an Oracle database to speed up both writes and reads. - -.. _enterprise-ora-write-behind-cache_reqs: - -******************************************************************************* -Application requirements -******************************************************************************* - -The sample application requires: - -* deployed Oracle database; -* Oracle tools: `Instant Client and SQL Plus `_, - both of version 12.2; - - .. NOTE:: - - In case the Oracle Instant Client errors out on ``.so`` files - (Oracle's dynamic libraries), put them to some directory and add it to the - ``LD_LIBRARY_PATH`` environment variable. - - For example: ``export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PWD/`` - -* rock modules listed in the ``rockspec`` file. - -To install the modules, run the following command in the ``examples/ora_writebehind_cache`` -directory: - -.. code-block:: console - - $ tt rocks make oracle_rb_cache-0.1.0-1.rockspec - -If you do not have a deployed Oracle instance at hand, run a dummy in a Docker -container: - -#. In the browser, log in to `Oracle container registry `_, - click **Database**, and accept the Oracle's Enterprise Terms and Restrictions. - -#. In the ``ora-writebehind-cache/`` directory, log in to the repository under - the Oracle account, pull, and run an image using the prepared scripts: - - .. code-block:: console - - $ docker login container-registry.oracle.com - Login: - Password: - Login Succeeded - $ docker pull container-registry.oracle.com/database/enterprise:12.2.0.1 - $ docker run -itd \ - -p 1521:1521 \ - -p 5500:5500 \ - --name oracle \ - -v "$(pwd)"/setupdb/configDB.sh:/home/oracle/setup/configDB.sh \ - -v "$(pwd)"/setupdb/runUserScripts.sh:/home/oracle/setup/runUserScripts.sh \ - -v "$(pwd)"/startupdb:/opt/oracle/scripts/startup \ - container-registry.oracle.com/database/enterprise:12.2.0.1 - -When all is set and done, run the example application. - -.. _enterprise-ora-write-behind-cache_run: - -******************************************************************************* -Running write-behind cache -******************************************************************************* - -To launch the application, run the following in the ``examples/ora_writebehind_cache`` -directory: - -.. code-block:: console - - $ tarantool init.lua - -The application supports the following requests: - -* Get: ``GET http://:/account/id``; -* Add: ``POST http://:/account/`` with the following data: - - .. code-block:: console - - {"clng_clng_id":1,"asut_asut_id":2,"creation_data":"01-JAN-19","navi_user":"userName"} - -* Update: ``POST http://:/account/id`` with the same data as in the add request; -* Remove: ``DELETE http://:/account/id`` where ``id`` is an account identifier. - -Look for sample CURL scripts in the ``examples/ora_writebehind_cache/testing`` -directory and check ``README.md`` for more information on implementation. - -.. _enterprise-docker-app: - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Hello-world application in Docker -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The example in the ``docker/`` directory contains a hello-world application -that you can pack in a Docker container and run on CentOS 7. - -The ``hello.lua`` file is the entry point and it is very bare-bones, so you -can add your code here. - -#. To build the container, say: - - .. code-block:: console - - $ docker build -t tarantool-enterprise-docker -f Dockerfile ../.. - -#. To run it: - - .. code-block:: console - - $ docker run --rm -t -i tarantool-enterprise-docker +.. TODO: rewrite completely for new EE application development workflow \ No newline at end of file diff --git a/doc/enterprise/index.rst b/doc/enterprise/index.rst index b301c6cf4e..474c12bfac 100644 --- a/doc/enterprise/index.rst +++ b/doc/enterprise/index.rst @@ -14,11 +14,10 @@ Tarantool Enterprise Edition The Enterprise Edition provides an `extended feature set `__ for developing and managing clustered Tarantool applications, for example: - * :ref:`Static package ` - for standalone Linux systems. - * Tarantool :ref:`bindings to OpenLDAP `. + * Static package for standalone Linux systems. + * Tarantool bindings to OpenLDAP. * Security :ref:`audit log `. - * Enterprise :ref:`database connectivity `: + * Enterprise database connectivity: Oracle and any ODBC-supported DBMS (for example, MySQL, Microsoft SQL Server). * SSL support for :ref:`traffic encryption `. @@ -36,11 +35,10 @@ Tarantool Enterprise Edition Enterprise-версия предлагает `дополнительные возможности `__ по разработке и эксплуатации кластерных приложений, например: - * :ref:`Статическая сборка ` - для автономных Linux-систем. - * :ref:`Модуль интеграции с OpenLDAP `. + * Статическая сборка для автономных Linux-систем. + * Модуль интеграции с OpenLDAP. * :ref:`Журнал аудита безопасности `. - * Подключения к :ref:`корпоративным базам данных `: + * Подключения к корпоративным базам данных: Oracle и любым СУБД с интерфейсом ODBC (MySQL, Microsoft SQL Server и т.д.). (например, MySQL, Microsoft SQL Server). * :ref:`Шифрование трафика ` с помощью SSL. @@ -64,7 +62,6 @@ Tarantool Enterprise Edition flight_recorder audit_log space_upgrade - migration system_metrics deprecated rocksref diff --git a/doc/enterprise/migration.rst b/doc/enterprise/migration.rst deleted file mode 100644 index 71af6ddddf..0000000000 --- a/doc/enterprise/migration.rst +++ /dev/null @@ -1,60 +0,0 @@ -Migration from Tarantool Cartridge -================================== - -If your company uses a service based on Tarantool Community Edition and -Tarantool Cartridge, follow the steps below to update these components to -Tarantool Enterprise Edition. - -As a reference, the instructions below use a template service created with -:ref:`tt`, the Tarantool CLI utility. - -Service build pipeline ----------------------- - -Get access to the source code and build pipeline of your service. Here is an -example of what the service build pipeline might look like for CentOS/RHEL 7: - -.. code-block:: bash - - curl -L https://tarantool.io/release/2/installer.sh | bash - yum -y install tarantool tarantool-devel tt git gcc gcc-с++ cmake - tt pack rpm - - -Update the pipeline -------------------- - -In the installation section of your pipeline, replace open-source ``tarantool`` -packages with Tarantool Enterprise SDK: - -.. code-block:: bash - - curl -L \ - https://${TOKEN}@download.tarantool.io/enterprise/release/${OS}/${ARCH}/${VERSION}/tarantool-enterprise-sdk-${VERSION_OS_ARCH_POSTFIX}.tar.gz \ - > sdk.tar.gz - - # for example, the URL for the Linux build of Tarantool 2.10.4 for the x86_64 platform will be: - # https://${TOKEN}@download.tarantool.io/enterprise/release/linux/x86_64/2.10/tarantool-enterprise-sdk-gc64-2.10.4-0-r523.linux.x86_64.tar.gz - - tar -xvf sdk.tar.gz - source tarantool-enterprise/env.sh - tt pack rpm - -Now the pipeline will produce a new service artifact, which includes -Tarantool Enterprise Edition. - -Update the service ------------------- - -Update your service to the new version like you usually update Tarantool in -your organization. You don't have to interrupt access to the service. -To learn how to do it with ``ansible-cartridge``, -`check this example `__. - - -That's it! ----------- - -You can now use Tarantool Enterprise Edition's features in your installation. -For example, to enable the audit log, -:ref:`set up the audit_log parameter in your node configuration `. diff --git a/doc/enterprise/rocksref.rst b/doc/enterprise/rocksref.rst index e85f1f79f8..0efc91a046 100644 --- a/doc/enterprise/rocksref.rst +++ b/doc/enterprise/rocksref.rst @@ -14,27 +14,6 @@ Open source modules * `avro-schema `_ is an assembly of `Apache Avro `_ schema tools; -* :doc:`cartridge ` - is a high-level cluster management interface that contains - several modules: - - * ``rpc`` implements remote procedure calls between cluster instances and - allows roles running on some instances to interact with other roles on - other instances. - * ``service-registry`` implements inter-role interaction and allows different - roles to interact with each other in the scope of one instance. - * ``confapplier`` implements cluster-wide configuration validation and - application via a two-phase commit. - * ``auth`` manages authentication. - * ``pool`` reuses Tarantool's ``net.box`` connections. - * ``admin`` implements administration functions. - -* :doc:`cartridge-cli ` - is the command-line interface for the ``cartridge`` module. - - .. important:: - - ``cartridge-cli`` is deprecated in favor of the :ref:`tt CLI utility `. * :ref:`checks ` is a type checker of functional arguments. This library that declares @@ -85,8 +64,6 @@ Closed source modules same high-level language (Lua) and with minimal delay. * :doc:`task ` is a module for managing background tasks in a Tarantool cluster. -* :doc:`space-explorer ` - is a module for exploring Tarantool spaces in ``cartridge``. .. _enterprise-rocks-install: diff --git a/doc/enterprise/setup.rst b/doc/enterprise/setup.rst index 551e4d5c34..03bc35613a 100644 --- a/doc/enterprise/setup.rst +++ b/doc/enterprise/setup.rst @@ -88,9 +88,7 @@ the administration server should be able to access the following TCP ports on Tarantool servers: * 22 to use the SSH protocol, -* ports specified in - :ref:`instance configuration ` - (``http_port`` parameter) to monitor the HTTP-metrics. +* ports specified in instance configuration to monitor the HTTP-metrics. Additionally, it is recommended to apply the following settings for ``sysctl`` on all Tarantool servers: @@ -137,13 +135,6 @@ Archive contents: ``tarantoolctl`` is deprecated in favor of the :ref:`tt CLI utility `. -* ``cartridge`` is the utility script to help you set up a development - environment for applications and pack them for easy deployment. - - .. important:: - - ``cartridge`` utility is deprecated in favor of the :ref:`tt CLI utility `. - * ``examples/`` is the directory containing sample applications: * ``pg_writethrough_cache/`` is an application showcasing how Tarantool can diff --git a/doc/getting_started/change_schema_dynamically.rst b/doc/getting_started/change_schema_dynamically.rst deleted file mode 100644 index 3577cf0349..0000000000 --- a/doc/getting_started/change_schema_dynamically.rst +++ /dev/null @@ -1,29 +0,0 @@ -.. _getting_started-schema_changing: - -================================================================================= -Updating the data schema -================================================================================= - -When working with data, it is sometimes necessary to change the original data schema. - -In the previous sections, we described a cluster-wide data schema in the YAML format. -The ``ddl`` module is responsible for applying the schema on the cluster. This module does not allow -to modify the schema after applying it. - -The easiest way to change it is to delete the database snapshots and create -a schema from scratch. Of course, this is only acceptable during application -development and debugging. -For production scenarios, read the section on :ref:`migrations `. - -To remove snapshots: - -* If you are using Tarantool in the cloud, - click the "Reset configuration" button. -* If you've started Tarantool locally via ``cartridge start``, - run ``cartridge clean`` in the application directory. -* If you've started Tarantool in a different way, - delete the snapshots and xlogs manually. - These files have the .snap and .xlog extensions respectively, - and they are located in the Tarantool working directory. - -To understand how the Tarantool data schema works, read the :ref:`Data model ` section. diff --git a/doc/getting_started/connecting_to_cluster.rst b/doc/getting_started/connecting_to_cluster.rst deleted file mode 100644 index 06665a5340..0000000000 --- a/doc/getting_started/connecting_to_cluster.rst +++ /dev/null @@ -1,68 +0,0 @@ -.. _connecting_to_cluster: - -================================================================================= -Connecting to the cluster -================================================================================= - -In the last section, we set up a cluster, created a schema, and wrote data through the HTTP API. -Now we can connect to the cluster from code and work with data. - -.. note:: - - If you are using Tarantool without Cartridge, go to the - :ref:`Connecting from your favorite language ` section. - If you are undergoing training, read on. - -You may have noticed that we used the ``crud`` module in the HTTP handler code. -The code looked something like this: - -.. code:: lua - - local crud = require ('crud') - - function add_user(request) - local result, err = crud.insert_object ('users', {user_id = uuid.new (), fullname = fullname}) - end - - -This module allows you to work with data in a cluster. The syntax here is similar to -what the Tarantool ``box`` module offers. -You will learn more about the ``box`` module in the following sections. - -The ``crud`` module contains a set of stored procedures. -To work with them, we must activate special roles on all routers and storages. -We selected those roles in the previous section, so we don't need to do anything. -The roles are named accordingly: "crud-router" and "crud-storage". - -To write and read data in the Tarantool cluster from code, we will call stored -procedures of the ``crud`` module. - -In Python, it looks like this: - -.. code:: python - - res = conn.call('crud.insert', 'users', , 'Jim Carrey') - users = conn.call('crud.select', 'users', {limit: 100}) - -All functions of the ``crud`` module are described -in the `README of our GitHub repository `_. - -Here is an incomplete list: - -* ``insert`` -* ``select`` -* ``get`` -* ``delete`` -* ``min``\/``max`` -* ``replace``\/``upsert`` -* ``truncate`` - -To learn how to call stored procedures in your programming language, see the corresponding section: - -* :ref:`for Python ` -* :ref:`for Go ` -* :ref:`for PHP ` -* :doc:`for C++ ` - -For connectors to other languages, check the README for the connector of your choice -`on GitHub `_. diff --git a/doc/getting_started/getting_started_imcp.rst b/doc/getting_started/getting_started_imcp.rst deleted file mode 100644 index 8c232eb0ac..0000000000 --- a/doc/getting_started/getting_started_imcp.rst +++ /dev/null @@ -1,532 +0,0 @@ -.. _getting_started-imcp: - -First steps -=========== - -This is the recommended guide for getting to know the product. - -.. note:: - - You also might want to check out our - :ref:`basic Tarantool tutorial `. - It shows how to launch one Tarantool instance, - create a space, build an index, and write data. - - We recommend that beginners go through the current tutorial first - and then see the basic tutorial to dive deeper into the product. - -If you just want to run the complete tutorial code quickly, go to -:ref:`Launching an application `. - -Installation -~~~~~~~~~~~~ - -**Launch in the cloud** - -This tutorial is also available in the cloud. It's free, and it's the fastest way to start. -To follow this tutorial in the cloud, go to `try.tarantool.io `__. - -However, you will still need to install Tarantool -if you want to get better acquainted with it. - -**Run locally** - -**For Linux/macOS users:** - -* Install Tarantool from the `Download page `__. -* Install `Node.js `_, which is required for the tutorial frontend. -* Install the ``cartridge-cli`` utility through your package manager: - - .. code-block:: bash - - sudo yum install cartridge-cli - - .. code-block:: bash - - brew install cartridge-cli - - To learn more, check the ``cartridge-cli`` - :doc:`installation guide `. - -* Clone the `Getting Started tutorial repository `__. - - Everything is ready and organized in this repository. - In the cloned directory, run the following: - - .. code-block:: bash - - cartridge build - cartridge start - -.. note:: - - In case of a problem with `cartridge build`, run it with the `--verbose` flag - to learn about the source of the problem. If there is a problem with Node.js (`npm`): - - 1. Check that Node.js is in the `$PATH`. - - 2. Try forcefully removing the `node_modules` directory from the dependencies' directories: - - `rm -rf analytics/node_modules front/node_modules` - - After that, try running `cartridge build` again. - If all else fails, please `file us an issue on GitHub `_. - - -You're all set! At http://localhost:8081, you will see the Tarantool Cartridge UI. - -**Running in Docker:** - -.. code-block:: bash - - docker run -p 3301:3301 -p 8081:8081 tarantool/getting-started - -That's it! At http://localhost:8081, you will see the Tarantool Cartridge UI. - -**For Windows users:** - -Use Docker to get started. - - -Getting to know Tarantool -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Today, we will solve a high-performance challenge for TikTok using -Tarantool. - -You will implement a counter of likes for videos. -First, you will create base tables and search indexes. -Then you will set up an HTTP API for mobile clients. - -The challenge doesn't require you to write any additional code. -Everything will be implemented on the Tarantool platform. - -If you accidentally do something wrong while following the instructions, -there is a magic button to help you reset all changes. -It is called **"Reset Configuration"**. You can find it at the top of the "Cluster" page. - -Configuring a cluster [1 minute] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**Everything you need to know to get started:** - -A Tarantool cluster has two service roles: router and storage. - -* Storage is used to store data. -* Router is an intermediary between clients and storages. - It accepts a client's request, takes data from the proper storage, - and returns it to the client. - -We see that we have 5 unconfigured instances on the "Cluster" tab. - -.. figure:: images/hosts-list.png - :alt: List of all nodes - - List of all nodes - -Let's create one router and one storage for a start. - -First, click the "Configure" button on the "router" instance and configure -it as in the screenshot below: - -.. figure:: images/router-configuration.png - :alt: Configuring a router - - Configuring a router - -Next, we configure the "s1-master" instance: - -.. figure:: images/storage-configuration.png - :alt: Configuring s1-master - - Configuring s1-master - -It will look something like this: - -.. figure:: images/first-configuration-result.png - :alt: Cluster view after first setup - - Cluster view after first setup - -Let's enable sharding in the cluster using the "Bootstrap vshard" button. It is -located in the top right corner. - -Creating a data schema [2 minutes] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Let's start with the data schema -- take a look at the **Code** tab on the left. - -There you can find a file called ``schema.yml``. In this file, you can -describe the entire cluster's data schema, edit the current schema, -validate its correctness, and apply it to the whole cluster. - -First, let's create the necessary tables. In Tarantool, they are called spaces. - -We need to store: - -* Users -* Videos with descriptions -* Likes for each video - -Copy the schema description from the code block below and paste it in the ``schema.yml`` file on the **Code** tab. -Click the "Apply" button. -After that, the data schema will be described in the cluster. - -This is what our data schema will look like: - - .. code-block:: yaml - - spaces: - users: - engine: memtx - is_local: false - temporary: false - sharding_key: - - "user_id" - format: - - {name: bucket_id, type: unsigned, is_nullable: false} - - {name: user_id, type: uuid, is_nullable: false} - - {name: fullname, type: string, is_nullable: false} - indexes: - - name: user_id - unique: true - parts: [{path: user_id, type: uuid, is_nullable: false}] - type: HASH - - name: bucket_id - unique: false - parts: [{path: bucket_id, type: unsigned, is_nullable: false}] - type: TREE - videos: - engine: memtx - is_local: false - temporary: false - sharding_key: - - "video_id" - format: - - {name: bucket_id, type: unsigned, is_nullable: false} - - {name: video_id, type: uuid, is_nullable: false} - - {name: description, type: string, is_nullable: true} - indexes: - - name: video_id - unique: true - parts: [{path: video_id, type: uuid, is_nullable: false}] - type: HASH - - name: bucket_id - unique: false - parts: [{path: bucket_id, type: unsigned, is_nullable: false}] - type: TREE - - likes: - engine: memtx - is_local: false - temporary: false - sharding_key: - - "video_id" - format: - - {name: bucket_id, type: unsigned, is_nullable: false} - - {name: like_id, type: uuid, is_nullable: false} - - {name: user_id, type: uuid, is_nullable: false} - - {name: video_id, type: uuid, is_nullable: false} - - {name: timestamp, type: string, is_nullable: true} - indexes: - - name: like_id - unique: true - parts: [{path: like_id, type: uuid, is_nullable: false}] - type: HASH - - name: bucket_id - unique: false - parts: [{path: bucket_id, type: unsigned, is_nullable: false}] - type: TREE - -It's simple. Let's take a closer look at the essential points. - -Tarantool has two built-in storage engines: memtx and vinyl. -memtx stores all data in RAM while asynchronously writing to -disk so that nothing gets lost. - -Vinyl is a classic engine for storing data on the -hard drive. It is optimized for write-intensive scenarios. - -In TikTok, there are a lot of simultaneous readings and -posts: users watch videos, like them, and comment on them. -Therefore, let's use memtx. - -The configuration above describes three memtx spaces (tables) -and the necessary indexes for each of the spaces. - -Each space has two indexes: - -* The primary key, which is required to read/write data. -* An index on the bucket_id field, which is a service field used for sharding. - -**Important:** The name ``bucket_id`` is reserved. If you choose -another name, sharding won't work for this space. -If you don't use sharding in your project, you can remove the second index. - -To understand which field to shard data by, Tarantool uses -``sharding_key``. ``sharding_key`` points to fields in the space by -which database records will be sharded. There can be more than one such field, but -in this example, we will only use one. When some data is inserted, -Tarantool forms a hash from this field, calculates the bucket number, -and selects the storage to record the data into. - -Yes, buckets can repeat, and each storage stores a specific range of buckets. - -Here are a couple more interesting facts: - -* The ``parts`` field in the index description can contain several fields, - which allows building a composite index. You won't need it in this tutorial. -* Tarantool does not support foreign keys, so you have to check manually - upon insertion - that ``video_id`` and ``user_id`` exist in the ``likes`` space. - -Writing data [5 minutes] -~~~~~~~~~~~~~~~~~~~~~~~~ - -We will write data to the Tarantool cluster using the CRUD module. -You don't have to specify the shard you want to read from or write to -- the module -does it for you. - -**Important:** All cluster operations must be performed only on the router -and using the CRUD module. - -Let's connect the CRUD module in the code and write three procedures: - -* User creation -* Adding a video -* Liking a video - -The procedures must be described in a special file. To do this, go to -the "Code" tab. Create a new directory called ``extensions``, and -in this directory, create the file ``api.lua``. - -Paste the code below into ``api.lua`` and click "Apply". - -.. code-block:: lua - - local cartridge = require('cartridge') - local crud = require('crud') - local uuid = require('uuid') - local json = require('json') - - function add_user(request) - local fullname = request:post_param("fullname") - local result, err = crud.insert_object('users', {user_id = uuid.new(), fullname = fullname}) - if err ~= nil then - return {body = json.encode({status = "Error!", error = err}), status = 500} - end - - return {body = json.encode({status = "Success!", result = result}), status = 200} - end - - function add_video(request) - local description = request:post_param("description") - local result, err = crud.insert_object('videos', {video_id = uuid.new(), description = description}) - if err ~= nil then - return {body = json.encode({status = "Error!", error = err}), status = 500} - end - - return {body = json.encode({status = "Success!", result = result}), status = 200} - end - - function like_video(request) - local video_id = request:post_param("video_id") - local user_id = request:post_param("user_id") - - local result, err = crud.insert_object('likes', {like_id = uuid.new(), - video_id = uuid.fromstr(video_id), - user_id = uuid.fromstr(user_id)}) - if err ~= nil then - return {body = json.encode({status = "Error!", error = err}), status = 500} - end - - return {body = json.encode({status = "Success!", result = result}), status = 200} - end - - return { - add_user = add_user, - add_video = add_video, - like_video = like_video, - } - -Setting up HTTP API [2 minutes] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Clients will visit the Tarantool cluster using the HTTP protocol. -The cluster already has a built-in HTTP server. - -To configure HTTP paths, you need to write a configuration -file. Go to the "Code" tab. Create the file ``config.yml`` -in the ``extensions`` directory, which you created on the last step. - -Paste the configuration example below into ``config.yml`` and click "Apply". - -.. code-block:: yaml - - --- - functions: - - add_user: - module: extensions.api - handler: add_user - events: - - http: {path: "/add_user", method: POST} - - add_video: - module: extensions.api - handler: add_video - events: - - http: {path: "/add_video", method: POST} - - like_video: - module: extensions.api - handler: like_video - events: - - http: {path: "/like_video", method: POST} - ... - -Done! Let's make test requests from the console. - -.. code-block:: bash - - curl -X POST --data "fullname=Taran Tool" url/add_user - -.. note:: - - In the requests, substitute ``url`` with the address of your sandbox. - The protocol must be strictly HTTP. - - For example, if you're following this tutorial with Try Tarantool, this request will look something like this - (note that your hash is different): - - .. code-block:: bash - - curl -X POST --data "fullname=Taran Tool" http://artpjcvnmwctc4qppejgf57.try.tarantool.io/add_user - - But if you've bootstrapped Tarantool locally, the request will look as follows: - - .. code-block:: bash - - curl -X POST --data "fullname=Taran Tool" http://localhost:8081/add_user - -We've just created a user and got their UUID. Let's remember it. - -.. code-block:: bash - - curl -X POST --data "description=My first tiktok" url/add_video - -Let's say a user has added their first video with a description. -The video clip also has a UUID. Let's remember it, too. - -In order to "like" the video, you need to specify the user UUID and the video UUID from the previous steps. -Substitute the ellipses in the command below with the corresponding UUIDs: - -.. code-block:: bash - - curl -X POST --data "video_id=...&user_id=..." url/like_video - -The result will be something like this: - -.. figure:: images/console.png - :alt: Test queries in the console - - Test queries in the console - -In our example, you can "like" the video as many times as you want. -It makes no sense in the real life, but it will help us understand how -sharding works -- more precisely, the ``sharding_key`` parameter. - -Our ``sharding_key`` for the ``likes`` is ``video_id``. -We also specified a ``sharding_key`` for the ``videos`` space. It means -that likes will be stored on the same storage as videos. -This ensures data locality with regard to storage and allows -getting all the information you need in one network trip to Storage. - -More details are described on the next step. - -Looking at the data [1 minute] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. note:: - - The following instructions are for Tarantool Enterprise Edition and the Try Tarantool cloud service. - - The Space-Explorer tool is unavailable in the open-source version. Use the console to view data. - - Check our documentation to learn more about :doc:`data viewing `. - To learn how to connect to a Tarantool instance, :ref:`read the basic Tarantool manual `. - - -Go to the "Space-Explorer" tab to see all the nodes in the cluster. -As we have only one storage and one router started so far, the data is stored -on only one node. - -Let's go to the node ``s1-master``: click "Connect" and select the necessary space. -Check that everything is in place and move on. - -.. figure:: images/hosts.png - :alt: Space Explorer, host list - - Space Explorer, host list - -.. figure:: images/likes.png - :alt: Space Explorer, view likes - - Space Explorer, viewing likes - - -Scaling the cluster [1 minute] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Let's create a second shard. Click on the "Cluster" tab, select -``s2-master``, and click "Configure". Select the roles as shown in the picture: - -.. figure:: images/configuring-server.png - :alt: Cluster, new shard configuration screen - - Cluster, new shard configuration screen - -Click on the necessary roles and create a shard (replica set). - -Checking how sharding works [1 minute] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Now we have two shards -- two logical nodes that -share data among themselves. The router decides what piece of data goes to what shard. -By default, the router uses the hash function from the field ``sharding_key`` -we've specified in the DDL. - -To enable a new shard, you need to set its weight to one. -Go back to the "Cluster" tab, open the ``s2-master`` settings, -set the Replica set weight to 1, and apply. - -Something has already happened. Let's go to Space-Explorer and check the node -``s2-master``. It turns out that some of the data from the first shard -has already migrated here! The scaling is done automatically. - -Now let's try adding more data to the cluster via the HTTP API. -We can check back later and make sure that the new data is also evenly -distributed across the two shards. - -Disconnecting a shard for a while [1 minute] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In the ``s1-master`` settings, set Replica set weight to 0 and -apply. Wait for a few seconds, then go to Space-Explorer and look at the -data in ``s2-master``. You will see that all the data has been migrated to -the remaining shard automatically. - -Now we can safely disable the first shard for maintenance. - -See also -~~~~~~~~ - -* README of the `DDL `__ module to create - your own data schema. -* README of the `CRUD `__ module to - learn more about the API and create your own cluster queries. - - -To continue to the next steps of the tutorial, -click the button in the bottom right corner -or select the section in the table of contents on the left. diff --git a/doc/getting_started/images/configuring-server.png b/doc/getting_started/images/configuring-server.png deleted file mode 100644 index 0ee06f3d07..0000000000 Binary files a/doc/getting_started/images/configuring-server.png and /dev/null differ diff --git a/doc/getting_started/images/console.png b/doc/getting_started/images/console.png deleted file mode 100755 index 602357530e..0000000000 Binary files a/doc/getting_started/images/console.png and /dev/null differ diff --git a/doc/getting_started/images/first-configuration-result.png b/doc/getting_started/images/first-configuration-result.png deleted file mode 100644 index f1bdee66a7..0000000000 Binary files a/doc/getting_started/images/first-configuration-result.png and /dev/null differ diff --git a/doc/getting_started/images/hosts-list.png b/doc/getting_started/images/hosts-list.png deleted file mode 100644 index 199bfe814b..0000000000 Binary files a/doc/getting_started/images/hosts-list.png and /dev/null differ diff --git a/doc/getting_started/images/hosts.png b/doc/getting_started/images/hosts.png deleted file mode 100644 index 59f41c98a3..0000000000 Binary files a/doc/getting_started/images/hosts.png and /dev/null differ diff --git a/doc/getting_started/images/likes.png b/doc/getting_started/images/likes.png deleted file mode 100644 index 164b1daf82..0000000000 Binary files a/doc/getting_started/images/likes.png and /dev/null differ diff --git a/doc/getting_started/images/router-configuration.png b/doc/getting_started/images/router-configuration.png deleted file mode 100644 index 12129afa9c..0000000000 Binary files a/doc/getting_started/images/router-configuration.png and /dev/null differ diff --git a/doc/getting_started/images/scaling.png b/doc/getting_started/images/scaling.png deleted file mode 100644 index d2e75a89eb..0000000000 Binary files a/doc/getting_started/images/scaling.png and /dev/null differ diff --git a/doc/getting_started/images/storage-configuration.png b/doc/getting_started/images/storage-configuration.png deleted file mode 100644 index d9a9d7f424..0000000000 Binary files a/doc/getting_started/images/storage-configuration.png and /dev/null differ diff --git a/doc/getting_started/index.rst b/doc/getting_started/index.rst index 3fdcd016a8..ca4215227a 100644 --- a/doc/getting_started/index.rst +++ b/doc/getting_started/index.rst @@ -3,41 +3,15 @@ .. _getting_started: -******************************************************************************** Getting started -******************************************************************************** +*************** -The :ref:`First steps ` section -will get you acquainted with Tarantool in 15 minutes. -We will be creating a basic microservice for TikTok. +.. TODO: New Getting Started https://github.com/tarantool/doc/issues/3636 + +The :ref:`Creating your first Tarantool database ` section +will get you acquainted with Tarantool. We will start Tarantool, create a data schema, and write our first data. You'll get an understanding of the technology and learn about the basic terms and features. -In the :ref:`Connecting to cluster ` section, -we'll show you how to read or write data to Tarantool -from your Python/Go/PHP application or another programming language. - -After connecting to the database for the first time, you might want to change the data schema. -In the section :ref:`Updating the data schema `, -we'll discuss the approaches to changing the data schema and the associated limitations. - -To make our code work with Tarantool, -we may want to transfer some of our data logic to Tarantool. -In the section :ref:`Writing cluster application code `, -we'll write a "Hello, World!" program in the Lua language, -which will work in our Tarantool cluster. -This will give you a basic understanding of how the role mechanism works. -In this way, you'll understand what part of your business logic you would like -to write in/migrate to Tarantool. - -.. toctree:: - :maxdepth: 1 - - getting_started_imcp - connecting_to_cluster - change_schema_dynamically - writing_cluster_code - To continue exploring Tarantool and its ecosystem, you might want to check out Tarantool :doc:`tutorials and guides <../how-to/index>`. -The :ref:`Cartridge beginner tutorial ` can also be found there. diff --git a/doc/getting_started/writing_cluster_code.rst b/doc/getting_started/writing_cluster_code.rst deleted file mode 100644 index 7a8f8ec10a..0000000000 --- a/doc/getting_started/writing_cluster_code.rst +++ /dev/null @@ -1,17 +0,0 @@ -.. _getting_started-wrirting_cluster-code: - -================================================================================= -Writing code in a cluster application -================================================================================= - -In the "Getting Started" tutorial, -we wrote the application code directly in the browser. -We used the file ``config.yml`` to describe HTTP endpoint handlers. -This is a convenient and fast way to write code -that allows you to use Tarantool as a repository without any additional HTTP service. -This functionality is implemented through the ``cartridge-extensions`` module. -It is also included in the tutorial default application. - -However, in Tarantool, you can implement absolutely any business logic on top of a cluster. -This :doc:`Cartridge getting started section ` -covers the cluster roles mechanism and writing a cluster application from scratch. diff --git a/doc/how-to/getting_started_cartridge.rst b/doc/how-to/getting_started_cartridge.rst deleted file mode 100644 index dc21e2edc2..0000000000 --- a/doc/how-to/getting_started_cartridge.rst +++ /dev/null @@ -1,231 +0,0 @@ -.. _getting_started_cartridge: - -Creating your first Tarantool Cartridge application -=================================================== - -Here we'll walk you through developing a simple cluster application. - -First, -:doc:`set up the development environment `. - -Next, create an application named ``myapp``. Run: - -.. code-block:: console - - $ cartridge create --name myapp - -This will create a Tarantool Cartridge application in the ``./myapp`` directory, -with a handful of -:doc:`template files and directories ` -inside. - -Go inside and make a dry run: - -.. code-block:: console - - $ cd ./myapp - $ cartridge build - $ cartridge start - -This will build the application locally, start 5 instances of Tarantool -and a :ref:`stateboard ` (state provider), and -run the application as it is, with no business logic yet. - -Why 5 instances and a stateboard? See the ``instances.yml`` file in your application directory. -It contains the :ref:`configuration ` of all instances -that you can use in the cluster. By default, it defines configuration for 5 -Tarantool instances and a stateboard. - -.. code-block:: yaml - - --- - myapp.router: - advertise_uri: localhost:3301 - http_port: 8081 - - myapp.s1-master: - advertise_uri: localhost:3302 - http_port: 8082 - - myapp.s1-replica: - advertise_uri: localhost:3303 - http_port: 8083 - - myapp.s2-master: - advertise_uri: localhost:3304 - http_port: 8084 - - myapp.s2-replica: - advertise_uri: localhost:3305 - http_port: 8085 - - myapp-stateboard: - listen: localhost:4401 - password: passwd - -You can already see these instances in the cluster management web interface at -http://localhost:8081 (here 8081 is the HTTP port of the first instance -specified in ``instances.yml``). - -.. image:: images/cluster_dry_run-border-5px.png - :align: center - :scale: 40% - -Okay, press ``Ctrl + C`` to stop the cluster for a while. - -Now it’s time to add some business logic to your application. -This will be an evergreen "Hello world!"" -- just to keep things simple. - -Rename the template file ``app/roles/custom.lua`` to ``hello-world.lua``. - -.. code-block:: console - - $ mv app/roles/custom.lua app/roles/hello-world.lua - -This will be your *role*. In Tarantool Cartridge, a role is a Lua module that -implements some instance-specific functions and/or logic. -Further on we'll show how to add code to a role, build it, enable and test. - -There is already some code in the role's ``init()`` function. - -.. code-block:: lua - :emphasize-lines: 5-7 - - local function init(opts) -- luacheck: no unused args - -- if opts.is_master then - -- end - - local httpd = assert(cartridge.service_get('httpd'), "Failed to get httpd service") - httpd:route({method = 'GET', path = '/hello'}, function() - return {body = 'Hello world!'} - end) - - return true - end - -This exports an HTTP endpoint ``/hello``. For example, http://localhost:8081/hello -if you address the first instance from the ``instances.yml`` file. -If you open it in a browser after enabling the role (we'll do it here a bit later), -you'll see "Hello world!" on the page. - -Let's add some more code there. - -.. code-block:: lua - :emphasize-lines: 9-11 - - local function init(opts) -- luacheck: no unused args - -- if opts.is_master then - -- end - - local httpd = cartridge.service_get('httpd') - httpd:route({method = 'GET', path = '/hello'}, function() - return {body = 'Hello world!'} - end) - - local log = require('log') - log.info('Hello world!') - - return true - end - -This writes "Hello, world!" to the console when the role gets enabled, -so you'll have a chance to spot this. No rocket science. - -Next, amend ``role_name`` in the "return" section of the ``hello-world.lua`` file. -You'll see this section at the bottom of the file. -This text will be displayed as a label for your role in the cluster management -web interface. - -.. code-block:: lua - :emphasize-lines: 2 - - return { - role_name = 'Hello world!', - init = init, - stop = stop, - validate_config = validate_config, - apply_config = apply_config, - -- dependencies = {'cartridge.roles.vshard-router'}, - } - -The final thing to do before you can run the application is to add your role to -the list of available cluster roles in the ``init.lua`` file in the project root directory. - -.. code-block:: lua - :emphasize-lines: 8 - - local cartridge = require('cartridge') - - local ok, err = cartridge.cfg({ - roles = { - 'cartridge.roles.vshard-storage', - 'cartridge.roles.vshard-router', - 'cartridge.roles.metrics', - 'app.roles.hello-world', - }, - }) - -Now the cluster will be aware of your role. - -Why ``app.roles.hello-world``? By default, the role name here should match the -path from the application root (``./myapp``) to the role file -(``app/roles/hello-world.lua``). - -Great! Your role is ready. Re-build the application and re-start the cluster now: - -.. code-block:: console - - $ cartridge build - $ cartridge start - -Now all instances are up, but idle, waiting for you to enable roles for them. - -Instances (replicas) in a Tarantool Cartridge cluster are organized into -*replica sets*. Roles are enabled per replica set, so all instances in a -replica set have the same roles enabled. - -Let's create a replica set containing just one instance and enable your role: - -#. Open the cluster management web interface at http://localhost:8081. -#. Next to the **router** instance, click :guilabel:`Configure`. -#. Check the role ``Hello world!`` to enable it. Notice that the role name here - matches the label text that you specified in the ``role_name`` parameter in - the ``hello-world.lua`` file. -#. (Optionally) Specify the replica set name, for example - "hello-world-replica-set". - - .. image:: images/cluster_create_replica_set-border-5px.png - :align: center - :scale: 40% - -#. Click :guilabel:`Create replica set` and see the newly-created replica set - in the web interface. - - .. image:: images/cluster_new_replica_set-border-5px.png - :align: center - :scale: 40% - -Your custom role got enabled. Find the "Hello world!" message in console, -like this: - -.. image:: images/cluster_hello_world_console-border-5px.png - :align: center - :scale: 40% - -Finally, open the HTTP endpoint of this instance at -http://localhost:8081/hello and see the reply to your GET request. - -.. image:: images/cluster_hello_http-border-5px.png - :align: center - :scale: 40% - -Everything is up and running! What's next? - -* Follow the :ref:`administrator's guide ` to set up the rest of the - cluster and try some cool cluster management features -- for example, enable failover. -* Check out the Cartridge :doc:`developer's guide ` - and implement more sophisticated business logic for your role. -* :doc:`Pack ` your application for easy distribution. - Choose what you like: a DEB or RPM package, a TGZ archive, or a Docker image. - Archives and packages can be deployed with `ansible-cartridge `__. -* Read the :doc:`Cartridge documentation `. diff --git a/doc/how-to/index.rst b/doc/how-to/index.rst index bf313d80c3..f95685c0af 100644 --- a/doc/how-to/index.rst +++ b/doc/how-to/index.rst @@ -18,7 +18,6 @@ If you are new to Tarantool, please see our Creating Tarantool database getting_started_connectors - getting_started_cartridge db/index getting_started_net_box vshard_quick diff --git a/doc/index.rst b/doc/index.rst index 3c0d334c1b..3255bf13da 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -55,7 +55,6 @@ how-to/index concepts/index CRUD operations - Cluster on Cartridge book/admin/index book/connectors enterprise/index diff --git a/doc/pdf_toc.rst b/doc/pdf_toc.rst index 101621b4b2..97dd5c09f3 100644 --- a/doc/pdf_toc.rst +++ b/doc/pdf_toc.rst @@ -14,7 +14,6 @@ how-to/index concepts/index CRUD operations - book/cartridge/index book/admin/index book/connectors enterprise/index diff --git a/doc/reference/configuration/configuration_reference.rst b/doc/reference/configuration/configuration_reference.rst new file mode 100644 index 0000000000..44f6d16903 --- /dev/null +++ b/doc/reference/configuration/configuration_reference.rst @@ -0,0 +1,10 @@ +.. _configuration_reference: + +Configuration reference +======================= + +This topic describes all :ref:`configuration parameters ` provided by Tarantool. + +.. TODO + https://github.com/tarantool/doc/issues/3664 + diff --git a/doc/reference/configuration/index.rst b/doc/reference/configuration/index.rst index be2a510d02..1840ce5ecc 100644 --- a/doc/reference/configuration/index.rst +++ b/doc/reference/configuration/index.rst @@ -1,503 +1,71 @@ -.. _index-book_cfg: - -================================================================================ -Configuration reference -================================================================================ - -This reference covers all options and parameters which can be set for Tarantool -on the command line or in an :ref:`initialization file `. - -Tarantool is started by entering either of the following command: - -.. cssclass:: highlight -.. parsed-literal:: - - $ **tarantool** - - $ **tarantool** *options* - - $ **tarantool** *lua-initialization-file* **[** *arguments* **]** - --------------------------------------------------------------------------------- -Command options --------------------------------------------------------------------------------- - -.. option:: -h, --help - - Print an annotated list of all available options and exit. - -.. _index-tarantool_version: - -.. option:: -v, -V, --version - - Print the product name and version. - - **Example** - - .. code-block:: console - - % tarantool --version - Tarantool 2.11.1-0-g96877bd - Target: Darwin-arm64-Release - ... - - In this example: - - * ``2.11.1`` is a Tarantool version. - Tarantool follows semantic versioning, which is described in the :ref:`Tarantool release policy ` section. - - * ``Target`` is the platform Tarantool is built on. - Platform-specific details may follow this line. - - -.. option:: -e EXPR - - Execute the 'EXPR' string. See also: `lua man page `_. - - **Example** - - .. code-block:: console - - % tarantool -e "print('Hello, world!')" - Hello, world! - -.. option:: -l NAME - - Require the 'NAME' library. See also: `lua man page `_. - - **Example** - - .. code-block:: console - - % tarantool -l luatest.coverage script.lua - -.. option:: -j cmd - - Perform a LuaJIT control command. See also: `Command Line Options `_. - - **Example** - - .. code-block:: console - - % tarantool -j off app.lua - -.. option:: -b ... - - Save or list bytecode. See also: `Command Line Options `_. - - **Example** - - .. code-block:: console - - % tarantool -b test.lua test.out - -.. option:: -d SCRIPT - - Activate a debugging session for 'SCRIPT'. See also: `luadebug.lua `_. - - **Example** - - .. code-block:: console - - % tarantool -d app.lua - - -.. option:: -i [SCRIPT] - - Enter an :ref:`interactive mode ` after executing 'SCRIPT'. - - **Example** - - .. code-block:: console - - % tarantool -i - - -.. option:: -- - - Stop handling options. See also: `lua man page `_. - - -.. option:: - - - Stop handling options and execute the standard input as a file. See also: `lua man page `_. - - -.. _index-uri: - --------------------------------------------------------------------------------- -URI --------------------------------------------------------------------------------- - -Some configuration parameters and some functions depend on a URI (Universal Resource Identifier). -The URI string format is similar to the -`generic syntax for a URI schema `_. -It may contain (in order): - -* user name for login -* password -* host name or host IP address -* port number. - -Only a port number is always mandatory. A password is mandatory if a user -name is specified, unless the user name is 'guest'. - -Formally, the URI -syntax is ``[host:]port`` or ``[username:password@]host:port``. -If host is omitted, then "0.0.0.0" or "[::]" is assumed -meaning respectively any IPv4 address or any IPv6 address -on the local machine. -If ``username:password`` is omitted, then the "guest" user is assumed. Some examples: - -.. container:: table - - .. rst-class:: left-align-column-1 - .. rst-class:: left-align-column-2 - - +-----------------------------+------------------------------+ - | URI fragment | Example | - +=============================+==============================+ - | port | 3301 | - +-----------------------------+------------------------------+ - | host:port | 127.0.0.1:3301 | - +-----------------------------+------------------------------+ - | username:password@host:port | notguest:sesame@mail.ru:3301 | - +-----------------------------+------------------------------+ - -In code, the URI value can be passed as a number (if only a port is specified) or a string: - -.. code-block:: lua - - box.cfg { listen = 3301 } - - box.cfg { listen = "127.0.0.1:3301" } - -In certain circumstances, a Unix domain socket may be used -where a URI is expected, for example, "unix/:/tmp/unix_domain_socket.sock" or -simply "/tmp/unix_domain_socket.sock". - -The :ref:`uri ` module provides functions that convert URI strings into their -components, or turn components into URI strings. - -.. _index-uri-several: - -Specifying several URIs -~~~~~~~~~~~~~~~~~~~~~~~ - -Starting from version 2.10.0, a user can open several listening iproto sockets on a Tarantool instance -and, consequently, can specify several URIs in the configuration parameters -such as :ref:`box.cfg.listen ` and :ref:`box.cfg.replication `. - -URI values can be set in a number of ways: - -* As a string with URI values separated by commas. - - .. code-block:: lua - - box.cfg { listen = "127.0.0.1:3301, /unix.sock, 3302" } - -* As a table that contains URIs in the string format. - - .. code-block:: lua - - box.cfg { listen = {"127.0.0.1:3301", "/unix.sock", "3302"} } - -* As an array of tables with the ``uri`` field. - - .. code-block:: lua - - box.cfg { listen = { - {uri = "127.0.0.1:3301"}, - {uri = "/unix.sock"}, - {uri = 3302} - } - } - -* In a combined way -- an array that contains URIs in both the string and the table formats. - - .. code-block:: lua - - box.cfg { listen = { - "127.0.0.1:3301", - { uri = "/unix.sock" }, - { uri = 3302 } - } - } - -.. _index-uri-several-params: - -Also, starting from version 2.10.0, it is possible to specify additional parameters for URIs. -You can do this in different ways: - -* Using the ``?`` delimiter when URIs are specified in a string format. - - .. code-block:: lua - - box.cfg { listen = "127.0.0.1:3301?p1=value1&p2=value2, /unix.sock?p3=value3" } - -* Using the ``params`` table: a URI is passed in a table with additional parameters in the "params" table. - Parameters in the "params" table overwrite the ones from a URI string ("value2" overwrites "value1" for ``p1`` in the example below). - - .. code-block:: lua - - box.cfg { listen = { - "127.0.0.1:3301?p1=value1", - params = {p1 = "value2", p2 = "value3"} - } - } - -* Using the ``default_params`` table for specifying default parameter values. - - In the example below, two URIs are passed in a table. - The default value for the ``p3`` parameter is defined in the ``default_params`` table - and used if this parameter is not specified in URIs. - Parameters in the ``default_params`` table are applicable to all the URIs passed in a table. - - .. code-block:: lua - - box.cfg { listen = { - "127.0.0.1:3301?p1=value1", - { uri = "/unix.sock", params = { p2 = "value2" } }, - default_params = { p3 = "value3" } - } - } - -The recommended way for specifying URI with additional parameters is the following: - -.. code-block:: lua - - box.cfg { listen = { - {uri = "127.0.0.1:3301", params = {p1 = "value1"}}, - {uri = "/unix.sock", params = {p2 = "value2"}}, - {uri = 3302, params = {p3 = "value3"}} - } - } - -In case of a single URI, the following syntax also works: - -.. code-block:: lua - - box.cfg { listen = { - uri = "127.0.0.1:3301", - params = { p1 = "value1", p2 = "value2" } - } - } - -.. _index-init_label: - --------------------------------------------------------------------------------- -Initialization file --------------------------------------------------------------------------------- - -If the command to start Tarantool includes :codeitalic:`lua-initialization-file`, then -Tarantool begins by invoking the Lua program in the file, which by convention -may have the name "``script.lua``". The Lua program may get further arguments -from the command line or may use operating-system functions, such as ``getenv()``. -The Lua program almost always begins by invoking ``box.cfg()``, if the database -server will be used or if ports need to be opened. For example, suppose -``script.lua`` contains the lines - -.. _index-init-example: - -.. code-block:: lua - - #!/usr/bin/env tarantool - box.cfg{ - listen = os.getenv("LISTEN_URI"), - memtx_memory = 33554432, - pid_file = "tarantool.pid", - wal_max_size = 2500 - } - print('Starting ', arg[1]) - -and suppose the environment variable LISTEN_URI contains 3301, -and suppose the command line is ``~/tarantool/src/tarantool script.lua ARG``. -Then the screen might look like this: - -.. code-block:: console - - $ export LISTEN_URI=3301 - $ ~/tarantool/src/tarantool script.lua ARG - ... main/101/script.lua C> Tarantool 2.8.3-0-g01023dbc2 - ... main/101/script.lua C> log level 5 - ... main/101/script.lua I> mapping 33554432 bytes for memtx tuple arena... - ... main/101/script.lua I> recovery start - ... main/101/script.lua I> recovering from './00000000000000000000.snap' - ... main/101/script.lua I> set 'listen' configuration option to "3301" - ... main/102/leave_local_hot_standby I> ready to accept requests - Starting ARG - ... main C> entering the event loop - -If you wish to start an interactive session on the same terminal after -initialization is complete, you can use :ref:`console.start() `. - -.. _index-local_hot_standby: -.. _index-replication_port: -.. _index-slab_alloc_arena: -.. _index-replication_source: -.. _index-snap_dir: -.. _index-wal_dir: -.. _index-wal_mode: -.. _index-checkpoint daemon: - -.. _box_cfg_params: - --------------------------------------------------------------------------------- -Configuration parameters --------------------------------------------------------------------------------- - -Configuration parameters have the form: - -:extsamp:`{**{box.cfg}**}{[{*{key = value}*} [, {*{key = value ...}*}]]}` - -Since ``box.cfg`` may contain many configuration parameters and since some of the -parameters (such as directory addresses) are semi-permanent, it's best to keep -``box.cfg`` in a Lua file. Typically this Lua file is the initialization file -which is specified on the Tarantool command line. - -Most configuration parameters are for allocating resources, opening ports, and -specifying database behavior. All parameters are optional. -A few parameters are dynamic, that is, they can be changed at runtime by calling ``box.cfg{}`` a second time. -For example, the command below sets the :ref:`listen port ` to ``3301``. - -.. code-block:: tarantoolsession - - tarantool> box.cfg{ listen = 3301 } - 2023-05-10 13:28:54.667 [31326] main/103/interactive I> tx_binary: stopped - 2023-05-10 13:28:54.667 [31326] main/103/interactive I> tx_binary: bound to [::]:3301 - 2023-05-10 13:28:54.667 [31326] main/103/interactive/box.load_cfg I> set 'listen' configuration option to 3301 - --- - ... - - -To see all the non-null parameters, execute ``box.cfg`` (no parentheses). - -.. code-block:: tarantoolsession - - tarantool> box.cfg - --- - - replication_skip_conflict: false - wal_queue_max_size: 16777216 - feedback_host: https://feedback.tarantool.io - memtx_dir: . - memtx_min_tuple_size: 16 - -- other parameters -- - ... - -To see a particular parameter value, call a corresponding ``box.cfg`` option. -For example, ``box.cfg.listen`` shows the specified :ref:`listen address `. - -.. code-block:: tarantoolsession - - tarantool> box.cfg.listen - --- - - 3301 - ... - - -.. _box-cfg-params-prior: - -Methods of setting and priorities -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Tarantool configuration parameters can be specified in different ways. -The priority of parameter sources is the following, from higher to lower: - -* ``box.cfg`` options -* :ref:`environment variables ` -* :ref:`tt configuration ` -* default values. - -.. _box-cfg-params-env: - -Setting via environment variables -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Starting from version :doc:`2.8.1 `, you can specify configuration parameters via special environment variables. -The name of a variable should have the following pattern: ``TT_``, -where ```` is the uppercase name of the corresponding :ref:`box.cfg parameter `. - -For example: - -* ``TT_LISTEN`` -- corresponds to the ``box.cfg.listen`` option. -* ``TT_MEMTX_DIR`` -- corresponds to the ``box.cfg.memtx_dir`` option. - -In case of an array value, separate the array elements by comma without space: - -.. code-block:: console - - export TT_REPLICATION="localhost:3301,localhost:3302" - -If you need to pass :ref:`additional parameters for URI `, use the ``?`` and ``&`` delimiters: - -.. code-block:: console - - export TT_LISTEN="localhost:3301?param1=value1¶m2=value2" +.. _index-book_cfg: +.. _box-cfg-params-ref: -An empty variable (``TT_LISTEN=``) has the same effect as an unset one meaning that the corresponding configuration parameter won't be set when calling ``box.cfg{}``. +Configuration reference (box.cfg) +================================= -.. _box-cfg-params-ref: +.. include:: /concepts/configuration/configuration_code.rst + :start-after: box_cfg_legacy_note_start + :end-before: box_cfg_legacy_note_end -Reference -~~~~~~~~~ -The sections that follow describe all configuration parameters for basic operations, storage, -binary logging and snapshots, replication, networking, logging, and feedback. +This topic describes all configuration parameters +that can be specified :ref:`in code ` using the ``box.cfg`` API. .. contents:: :local: :depth: 1 Basic parameters -^^^^^^^^^^^^^^^^ +---------------- .. include:: cfg_basic.rst Configuring the storage -^^^^^^^^^^^^^^^^^^^^^^^ +----------------------- .. include:: cfg_storage.rst .. _book_cfg_checkpoint_daemon: Checkpoint daemon -^^^^^^^^^^^^^^^^^ +----------------- .. include:: cfg_snapshot_daemon.rst Binary logging and snapshots -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +---------------------------- .. include:: cfg_binary_logging_snapshots.rst .. _index-hot_standby: Hot standby -^^^^^^^^^^^ +----------- .. include:: cfg_hot_standby.rst Replication -^^^^^^^^^^^ +----------- .. include:: cfg_replication.rst Networking -^^^^^^^^^^ +---------- .. include:: cfg_networking.rst Logging -^^^^^^^ +------- .. include:: cfg_logging.rst Feedback -^^^^^^^^ +-------- .. include:: cfg_feedback.rst Deprecated parameters -^^^^^^^^^^^^^^^^^^^^^ +--------------------- .. include:: cfg_deprecated.rst diff --git a/doc/reference/index.rst b/doc/reference/index.rst index eb61be86a6..ae8a1cfce6 100644 --- a/doc/reference/index.rst +++ b/doc/reference/index.rst @@ -10,6 +10,7 @@ Reference .. toctree:: :maxdepth: 2 + configuration/configuration_reference configuration/index tooling/index reference_sql/index diff --git a/doc/reference/reference_rock/index.rst b/doc/reference/reference_rock/index.rst index 279ed717e4..3e19e710b3 100644 --- a/doc/reference/reference_rock/index.rst +++ b/doc/reference/reference_rock/index.rst @@ -9,7 +9,6 @@ This reference covers third-party Lua modules for Tarantool. .. toctree:: :maxdepth: 1 - ../../book/cartridge/index membership Module metrics <../../book/monitoring/index> Module luatest diff --git a/doc/reference/tooling/tt_cli/_includes/cartridge_deprecation_note.rst b/doc/reference/tooling/tt_cli/_includes/cartridge_deprecation_note.rst new file mode 100644 index 0000000000..2d7242f791 --- /dev/null +++ b/doc/reference/tooling/tt_cli/_includes/cartridge_deprecation_note.rst @@ -0,0 +1,5 @@ +.. important:: + + The Tarantool Cartridge framework is deprecated and is not compatible with + Tarantool 3.0 and later. This command is added for backward compatibility with + earlier versions. \ No newline at end of file diff --git a/doc/reference/tooling/tt_cli/build.rst b/doc/reference/tooling/tt_cli/build.rst index 03a101225e..25d5e3948e 100644 --- a/doc/reference/tooling/tt_cli/build.rst +++ b/doc/reference/tooling/tt_cli/build.rst @@ -46,16 +46,34 @@ and located in the application directory. the pre-build and post-build scripts can also have names ``cartridge.pre-build`` and ``cartridge.post-build``. -If your application depends on closed-source rocks, or if the build should contain -rocks from a project added as a submodule, **install** these -dependencies using the pre-build script **before** building. -For example, add the following line: +``tt.pre-build`` is helpful when your application depends on closed-source rocks, +or if the build should contain rocks from a project added as a submodule. +You can **install** these dependencies using the pre-build script **before** building. +Example: .. code-block:: bash + #!/bin/sh + + # The main purpose of this script is to build non-standard rocks modules. + # The script will run before `tt rocks make` during application build. + tt rocks make --chdir ./third_party/proj -Learn more about :doc:`pre-build and post-build scripts `. +``tt.post.build`` is a script that runs after ``tt rocks make``. The main purpose +of this script is to remove build artifacts from the final package. Example: + +.. code-block:: bash + + #!/bin/sh + + # The main purpose of this script is to remove build artifacts from the resulting package. + # The script will run after `tt rocks make` during application build. + + rm -rf third_party + rm -rf node_modules + rm -rf doc + Examples -------- diff --git a/doc/reference/tooling/tt_cli/cartridge.rst b/doc/reference/tooling/tt_cli/cartridge.rst index 101bb07b7d..6366ab433f 100644 --- a/doc/reference/tooling/tt_cli/cartridge.rst +++ b/doc/reference/tooling/tt_cli/cartridge.rst @@ -3,6 +3,8 @@ Managing a Cartridge application ================================ +.. include:: _includes/cartridge_deprecation_note.rst + .. code-block:: console $ tt cartridge COMMAND {[OPTION ...]|SUBCOMMAND} diff --git a/doc/reference/tooling/tt_cli/index.rst b/doc/reference/tooling/tt_cli/index.rst index d7b9412874..562784dcba 100644 --- a/doc/reference/tooling/tt_cli/index.rst +++ b/doc/reference/tooling/tt_cli/index.rst @@ -60,9 +60,10 @@ Replacement for tarantooctl and Cartridge CLI --------------------------------------------- A multi-purpose tool for working with Tarantool from the command line, ``tt`` has -come to replace :ref:`tarantoolctl ` -and :doc:`Cartridge CLI ` command-line utilities. -The instructions on migration to ``tt`` are provided on the corresponding documentation -pages: :ref:`tarantoolctl ` and :doc:`Cartridge CLI `. +come to replace the deprecated utilities :ref:`tarantoolctl ` +and `Cartridge CLI `_ command-line utilities. +The instructions on migration to ``tt`` are provided in the `tt GitHub reposirory `_. + +.. TODO: change the link to the migration guide when it's ready. diff --git a/doc/reference/tooling/tt_cli/init.rst b/doc/reference/tooling/tt_cli/init.rst index a4a9cee48c..ec022654dc 100644 --- a/doc/reference/tooling/tt_cli/init.rst +++ b/doc/reference/tooling/tt_cli/init.rst @@ -15,6 +15,8 @@ Creating a tt environment Details ------- +.. include:: _includes/cartridge_deprecation_note.rst + ``tt init`` checks the existence of configuration files for Cartridge (``cartridge.yml``) or the ``tarantoolctl`` utility (``.tarantoolctl``) in the current directory. If such files are found, ``tt`` generates an environment that uses the same diff --git a/doc/reference/tooling/tt_cli/pack.rst b/doc/reference/tooling/tt_cli/pack.rst index 8d1b62b705..6084ef4c4e 100644 --- a/doc/reference/tooling/tt_cli/pack.rst +++ b/doc/reference/tooling/tt_cli/pack.rst @@ -49,6 +49,8 @@ Options Package a Cartridge CLI-compatible archive. + .. include:: _includes/cartridge_deprecation_note.rst + .. option:: --deps STRINGS **Applicable to:** ``deb``, ``rpm`` diff --git a/doc/singlehtml.rst b/doc/singlehtml.rst index fbf3575639..55eafc61f1 100644 --- a/doc/singlehtml.rst +++ b/doc/singlehtml.rst @@ -14,7 +14,6 @@ how-to/index concepts/index CRUD operations - Cluster on Cartridge book/admin/index book/connectors reference/index diff --git a/doc/toctree.rst b/doc/toctree.rst index 353ccd2047..e85d81075d 100644 --- a/doc/toctree.rst +++ b/doc/toctree.rst @@ -10,7 +10,6 @@ how-to/index concepts/index CRUD operations - Cluster on Cartridge book/admin/index book/connectors enterprise/index diff --git a/modules/cartridge b/modules/cartridge deleted file mode 160000 index 766d24ba5e..0000000000 --- a/modules/cartridge +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 766d24ba5e3bc2d1b575362e7030ea485aac1faf diff --git a/modules/cartridge-cli b/modules/cartridge-cli deleted file mode 160000 index 0624cc0904..0000000000 --- a/modules/cartridge-cli +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 0624cc0904dfa92a4ec31ac2bf80358ea4218d23 diff --git a/pull_submodules.py b/pull_submodules.py index 25c1dfce3e..79a9f27ac4 100755 --- a/pull_submodules.py +++ b/pull_submodules.py @@ -6,8 +6,6 @@ modules_dir = 'modules' modules = { - 'cartridge': 'INPUT_CARTRIDGE', - 'cartridge-cli': 'INPUT_CARTRIDGE_CLI', 'grafana-dashboard': 'INPUT_GRAFANA', 'luatest': 'INPUT_LUATEST', 'metrics': 'INPUT_METRICS',