diff --git a/docs/commands.md b/docs/commands.md index f633e2935..a4023a16a 100644 --- a/docs/commands.md +++ b/docs/commands.md @@ -2,8 +2,6 @@ ## AI.CONFIG LOADBACKEND -### AI.CONFIG LOADBACKEND Example - Load a DL/ML backend. By default, RedisAI starts with the ability to set and get tensor data, but setting and running models and scritps requires a computing backend to be loaded. This command allows to dynamically load a backend by specifying the backend identifier and the path to the backend library. Currently, once loaded, a backend cannot be unloaded, and there can be at most one backend per identifier loaded. @@ -16,6 +14,8 @@ AI.CONFIG LOADBACKEND It is possible to specify backends at the command-line when starting `redis-server`, see example below. +### AI.CONFIG LOADBACKEND Example + > Load the TORCH backend ```sql @@ -234,3 +234,80 @@ If needed, input tensors are copied to the device specified in `AI.SCRIPTSET` be ```sql AI.SCRIPTRUN addscript addtwo INPUTS a b OUTPUTS c ``` + +## AI.INFO + +Return information about runs of a `MODEL` or a `SCRIPT`. + +At each `MODELRUN` or `SCRIPTRUN`, RedisAI will collect statistcs specific for each `MODEL` or `SCRIPT`, +specific for the node (hence nodes in a cluster will have to be queried individually for their info). +The following information is collected: + +- `KEY`: the key being run +- `TYPE`: either `MODEL` or `SCRIPT` +- `BACKEND`: the type of backend (always `TORCH` for `SCRIPT`) +- `DEVICE`: the device where the run has been executed +- `DURATION`: cumulative duration in microseconds +- `SAMPLES`: cumulative number of samples obtained from the 0-th (batch) dimension (for `MODEL` only) +- `CALLS`: number of calls +- `ERRORS`: number of errors generated after the run has been submitted (i.e. excluding errors generated during parsing of the command) + +```sql +AI.INFO +``` + +Statistcs are accumulated until the same command with an extra `RESETSTAT` argument is called. This resets the statistics relative to the model or script. + +```sql +AI.INFO RESETSTAT +``` + +The command can be called on a key until that key is removed using `MODELDEL` or `SCRIPTDEL`. + +### AI.INFO Example + +```sql +AI.INFO amodel + +> 1) KEY +> 2) "amodel" +> 3) TYPE +> 4) MODEL +> 5) BACKEND +> 6) TORCH +> 7) DEVICE +> 8) CPU +> 9) DURATION +> 10) (integer) 6511 +> 11) SAMPLES +> 12) (integer) 2 +> 13) CALLS +> 14) (integer) 1 +> 15) ERRORS +> 16) (integer) 0 +``` + +```sql +AI.INFO amodel RESETSTAT + +> OK + +AI.INFO amodel + +> 1) KEY +> 2) "amodel" +> 3) TYPE +> 4) MODEL +> 5) BACKEND +> 6) TORCH +> 7) DEVICE +> 8) CPU +> 9) DURATION +> 10) (integer) 0 +> 11) SAMPLES +> 12) (integer) 0 +> 13) CALLS +> 14) (integer) 0 +> 15) ERRORS +> 16) (integer) 0 +``` diff --git a/src/backends.c b/src/backends.c index 4bdc957d7..78051dd8f 100644 --- a/src/backends.c +++ b/src/backends.c @@ -21,7 +21,6 @@ RedisModuleString* RAI_GetModulePath(RedisModuleCtx *ctx) { return module_path; } - RedisModuleString* RAI_GetBackendsPath(RedisModuleCtx *ctx) { Dl_info info; RedisModuleString* backends_path = NULL; @@ -36,6 +35,20 @@ RedisModuleString* RAI_GetBackendsPath(RedisModuleCtx *ctx) { return backends_path; } +const char* RAI_BackendName(int backend) { + switch (backend) { + case RAI_BACKEND_TENSORFLOW: + return "TF"; + case RAI_BACKEND_TFLITE: + return "TFLITE"; + case RAI_BACKEND_TORCH: + return "TORCH"; + case RAI_BACKEND_ONNXRUNTIME: + return "ONNX"; + } + return NULL; +} + int RAI_LoadBackend_TensorFlow(RedisModuleCtx *ctx, const char *path) { if (RAI_backends.tf.model_run != NULL) { RedisModule_Log(ctx, "warning", "Could not load TF backend: backend already loaded"); diff --git a/src/backends.h b/src/backends.h index 04731c759..77b4cf1a1 100644 --- a/src/backends.h +++ b/src/backends.h @@ -35,4 +35,6 @@ char* RAI_BackendsPath; int RAI_LoadBackend(RedisModuleCtx *ctx, int backend, const char *path); int RAI_LoadDefaultBackend(RedisModuleCtx *ctx, int backend); -#endif \ No newline at end of file +const char* RAI_BackendName(int backend); + +#endif diff --git a/src/model.c b/src/model.c index 0a3733571..6f92f50ea 100644 --- a/src/model.c +++ b/src/model.c @@ -135,25 +135,11 @@ static void RAI_Model_AofRewrite(RedisModuleIO *aof, RedisModuleString *key, voi array_append(outputs_, RedisModule_CreateString(ctx, model->outputs[i], strlen(model->outputs[i]))); } - char backend[256] = ""; - switch (model->backend) { - case RAI_BACKEND_TENSORFLOW: - strcpy(backend, "TF"); - break; - case RAI_BACKEND_TFLITE: - strcpy(backend, "TFLITE"); - break; - case RAI_BACKEND_TORCH: - strcpy(backend, "TORCH"); - break; - case RAI_BACKEND_ONNXRUNTIME: - strcpy(backend, "ONNX"); - break; - } + const char* backendstr = RAI_BackendName(model->backend); RedisModule_EmitAOF(aof, "AI.MODELSET", "slccvcvb", key, - backend, model->devicestr, + backendstr, model->devicestr, "INPUTS", inputs_, model->ninputs, "OUTPUTS", outputs_, model->noutputs, buffer, len); diff --git a/src/model_struct.h b/src/model_struct.h index 58cf8accd..e2be0b143 100644 --- a/src/model_struct.h +++ b/src/model_struct.h @@ -17,8 +17,6 @@ typedef struct RAI_Model { char **outputs; size_t noutputs; long long refCount; - long long backend_calls; - long long backend_us; void* data; } RAI_Model; diff --git a/src/redisai.c b/src/redisai.c index 9ef9d25c0..7760d5cb1 100644 --- a/src/redisai.c +++ b/src/redisai.c @@ -98,6 +98,8 @@ typedef struct RunQueueInfo { static AI_dict *run_queues = NULL; static long long perqueueThreadPoolSize = REDISAI_DEFAULT_THREADS_PER_QUEUE; +static AI_dict *run_stats = NULL; + int freeRunQueueInfo(RunQueueInfo* info) { int result = REDISMODULE_OK; if (info->run_queue) { @@ -150,17 +152,17 @@ int ensureRunQueue(const char* devicestr) { } long long ustime(void) { - struct timeval tv; - long long ust; + struct timeval tv; + long long ust; - gettimeofday(&tv, NULL); - ust = ((long long)tv.tv_sec)*1000000; - ust += tv.tv_usec; - return ust; + gettimeofday(&tv, NULL); + ust = ((long long)tv.tv_sec)*1000000; + ust += tv.tv_usec; + return ust; } mstime_t mstime(void) { - return ustime()/1000; + return ustime()/1000; } enum RedisAI_DataFmt { @@ -466,6 +468,75 @@ int RedisAI_TensorGet_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv // ================================ +struct RedisAI_RunInfo { + RedisModuleBlockedClient *client; + RedisModuleString *runkey; + RedisModuleString **outkeys; + RAI_ModelRunCtx *mctx; + RAI_ScriptRunCtx *sctx; + int status; + long long duration_us; + RAI_Error* err; +}; + +struct RedisAI_RunStats { + RedisModuleString *key; + int type; // model or script + RAI_Backend backend; + char* devicestr; + long long duration_us; + long long samples; + long long calls; + long long nerrors; +}; + +void RedisAI_FreeRunInfo(RedisModuleCtx *ctx, struct RedisAI_RunInfo *rinfo) { + if (rinfo->mctx) { + for(int i = 0 ; i < RAI_ModelRunCtxNumOutputs(rinfo->mctx) ; ++i){ + RedisModule_FreeString(ctx, rinfo->outkeys[i]); + } + RedisModule_Free(rinfo->outkeys); + RAI_ModelRunCtxFree(rinfo->mctx); + } + else if (rinfo->sctx) { + for(int i = 0 ; i < RAI_ScriptRunCtxNumOutputs(rinfo->sctx) ; ++i){ + RedisModule_FreeString(ctx, rinfo->outkeys[i]); + } + RedisModule_Free(rinfo->outkeys); + RAI_ScriptRunCtxFree(rinfo->sctx); + } + + if (rinfo->err) { + RAI_ClearError(rinfo->err); + RedisModule_Free(rinfo->err); + } + + RedisModule_Free(rinfo); +} + +void RedisAI_FreeRunStats(RedisModuleCtx *ctx, struct RedisAI_RunStats *rstats) { + RedisModule_FreeString(ctx, rstats->key); + RedisModule_Free(rstats->devicestr); +} + +void *RedisAI_RunSession(void *arg) { + struct RedisAI_RunInfo *rinfo = (struct RedisAI_RunInfo*)arg; + rinfo->err = RedisModule_Calloc(1, sizeof(RAI_Error)); + const long long start = ustime(); + if (rinfo->mctx) { + rinfo->status = RAI_ModelRun(rinfo->mctx, rinfo->err); + } + else if (rinfo->sctx) { + rinfo->status = RAI_ScriptRun(rinfo->sctx, rinfo->err); + } + rinfo->duration_us = ustime()-start; + + if (rinfo->client != NULL) { + RedisModule_UnblockClient(rinfo->client, rinfo); + } + return NULL; +} + // key backend device [INPUTS name1 name2] [OUTPUTS name1 name2] modelbuf int RedisAI_ModelSet_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { RedisModule_AutoMemory(ctx); @@ -708,59 +779,15 @@ int RedisAI_ModelDel_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, RedisModule_DeleteKey(key); RedisModule_CloseKey(key); - return RedisModule_ReplyWithSimpleString(ctx, "OK"); -} - -struct RedisAI_RunInfo { - RedisModuleBlockedClient *client; - RedisModuleString **outkeys; - RAI_ModelRunCtx *mctx; - RAI_ScriptRunCtx *sctx; - int status; - long long duration_us; - RAI_Error* err; -}; - -void RedisAI_FreeRunInfo(RedisModuleCtx *ctx, struct RedisAI_RunInfo *rinfo) { - if (rinfo->mctx) { - for(int i = 0 ; i < RAI_ModelRunCtxNumOutputs(rinfo->mctx) ; ++i){ - RedisModule_FreeString(ctx, rinfo->outkeys[i]); - } - RedisModule_Free(rinfo->outkeys); - RAI_ModelRunCtxFree(rinfo->mctx); - } - else if (rinfo->sctx) { - for(int i = 0 ; i < RAI_ScriptRunCtxNumOutputs(rinfo->sctx) ; ++i){ - RedisModule_FreeString(ctx, rinfo->outkeys[i]); - } - RedisModule_Free(rinfo->outkeys); - RAI_ScriptRunCtxFree(rinfo->sctx); - } - - if (rinfo->err) { - RAI_ClearError(rinfo->err); - RedisModule_Free(rinfo->err); - } - - RedisModule_Free(rinfo); -} - -void *RedisAI_RunSession(void *arg) { - struct RedisAI_RunInfo *rinfo = (struct RedisAI_RunInfo*)arg; - rinfo->err = RedisModule_Calloc(1, sizeof(RAI_Error)); - const long long start = ustime(); - if (rinfo->mctx) { - rinfo->status = RAI_ModelRun(rinfo->mctx, rinfo->err); - } - else if (rinfo->sctx) { - rinfo->status = RAI_ScriptRun(rinfo->sctx, rinfo->err); + const char* key_cstr = RedisModule_StringPtrLen(keystr, NULL); + AI_dictEntry *stats_entry = AI_dictFind(run_stats, key_cstr); + if (stats_entry) { + struct RedisAI_RunStats *rstats = AI_dictGetVal(stats_entry); + AI_dictDelete(run_stats, key_cstr); + RedisAI_FreeRunStats(ctx, rstats); } - rinfo->duration_us = ustime()-start; - if (rinfo->client != NULL) { - RedisModule_UnblockClient(rinfo->client, rinfo); - } - return NULL; + return RedisModule_ReplyWithSimpleString(ctx, "OK"); } void RedisAI_FreeData(RedisModuleCtx *ctx, void *rinfo) { @@ -798,8 +825,34 @@ int RedisAI_Run_Reply(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { REDISMODULE_NOT_USED(argc); struct RedisAI_RunInfo *rinfo = RedisModule_GetBlockedClientPrivateData(ctx); + const char* runkey = RedisModule_StringPtrLen(rinfo->runkey, NULL); + AI_dictEntry *stats_entry = AI_dictFind(run_stats, runkey); + + struct RedisAI_RunStats *rstats = NULL; + if (stats_entry == NULL) { + rstats = RedisModule_Calloc(1, sizeof(struct RedisAI_RunStats)); + RedisModule_RetainString(ctx, rinfo->runkey); + rstats->key = rinfo->runkey; + rstats->type = rinfo->mctx ? 0 : 1; + if (rinfo->mctx) { + rstats->backend = rinfo->mctx->model->backend; + rstats->devicestr = RedisModule_Strdup(rinfo->mctx->model->devicestr); + } + else { + rstats->backend = RAI_BACKEND_TORCH; + rstats->devicestr = RedisModule_Strdup(rinfo->sctx->script->devicestr); + } + + AI_dictAdd(run_stats, (void*)runkey, (void*)rstats); + } + else { + rstats = AI_dictGetVal(stats_entry); + } + if (rinfo->status) { RedisModule_Log(ctx, "warning", "ERR %s", rinfo->err->detail); + rstats->calls += 1; + rstats->nerrors += 1; int ret = RedisModule_ReplyWithError(ctx, rinfo->err->detail_oneline); RedisAI_FreeRunInfo(ctx, rinfo); return ret; @@ -807,15 +860,14 @@ int RedisAI_Run_Reply(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { size_t num_outputs = 0; if (rinfo->mctx) { - (rinfo->mctx->model->backend_calls)++; - (rinfo->mctx->model->backend_us) += rinfo->duration_us; num_outputs = RAI_ModelRunCtxNumOutputs(rinfo->mctx); } else if (rinfo->sctx) { - (rinfo->sctx->script->backend_calls)++; - (rinfo->sctx->script->backend_us) += rinfo->duration_us; num_outputs = RAI_ScriptRunCtxNumOutputs(rinfo->sctx); } + + int64_t batch_size = 0; + for (size_t i=0; ioutkeys[i], REDISMODULE_READ|REDISMODULE_WRITE); @@ -825,11 +877,16 @@ int RedisAI_Run_Reply(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { RedisModule_ModuleTypeGetType(outkey) == RedisAI_TensorType)) { RedisModule_CloseKey(outkey); RedisAI_FreeRunInfo(ctx, rinfo); + rstats->calls += 1; + rstats->nerrors += 1; return RedisModule_ReplyWithError(ctx, REDISMODULE_ERRORMSG_WRONGTYPE); } RAI_Tensor *t = NULL; if (rinfo->mctx) { t = RAI_ModelRunCtxOutputTensor(rinfo->mctx, i); + if (t && batch_size == 0) { + batch_size = RAI_TensorDim(t, 0); + } } else if (rinfo->sctx) { t = RAI_ScriptRunCtxOutputTensor(rinfo->sctx, i); @@ -844,6 +901,13 @@ int RedisAI_Run_Reply(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { } } + rstats->duration_us += rinfo->duration_us; + rstats->calls += 1; + + if (rinfo->mctx) { + rstats->samples += batch_size; + } + // FIXME This crashes Redis, we need to investigate. //RedisModule_CloseKey(rinfo->modelkey); @@ -936,6 +1000,8 @@ int RedisAI_ModelRun_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, } struct RedisAI_RunInfo *rinfo = RedisModule_Calloc(1, sizeof(struct RedisAI_RunInfo)); + RedisModule_RetainString(ctx, keystr); + rinfo->runkey = keystr; rinfo->mctx = RAI_ModelRunCtxCreate(mto); rinfo->sctx = NULL; rinfo->outkeys = NULL; @@ -1054,9 +1120,6 @@ int RedisAI_ScriptRun_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv RedisModuleString* keystr; AC_GetRString(&ac, &keystr, 0); - // TODO we run synchronously for now, but we could have - // - A: a separate thread and queue for scripts - // - B: the same thread and queue for models and scripts RedisModuleKey *key = RedisModule_OpenKey(ctx, keystr, REDISMODULE_READ); int type = RedisModule_KeyType(key); if (type == REDISMODULE_KEYTYPE_EMPTY) { @@ -1137,6 +1200,8 @@ int RedisAI_ScriptRun_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv struct RedisAI_RunInfo *rinfo = RedisModule_Calloc(1, sizeof(struct RedisAI_RunInfo)); rinfo->mctx = NULL; rinfo->sctx = sctx; + RedisModule_RetainString(ctx, keystr); + rinfo->runkey = keystr; rinfo->outkeys = outkeys; rinfo->err = NULL; AI_dictEntry *entry = AI_dictFind(run_queues, sto->devicestr); @@ -1250,6 +1315,14 @@ int RedisAI_ScriptDel_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv RedisModule_DeleteKey(key); RedisModule_CloseKey(key); + const char* key_cstr = RedisModule_StringPtrLen(keystr, NULL); + AI_dictEntry *stats_entry = AI_dictFind(run_stats, key_cstr); + if (stats_entry) { + struct RedisAI_RunStats *rstats = AI_dictGetVal(stats_entry); + AI_dictDelete(run_stats, key_cstr); + RedisAI_FreeRunStats(ctx, rstats); + } + return RedisModule_ReplyWithSimpleString(ctx, "OK"); } @@ -1332,6 +1405,73 @@ int RedisAI_ScriptSet_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv return REDISMODULE_OK; } +// key +// key RESETSTAT +int RedisAI_Info_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + RedisModule_AutoMemory(ctx); + + if (argc != 2 && argc != 3) return RedisModule_WrongArity(ctx); + + ArgsCursor ac; + ArgsCursor_InitRString(&ac, argv+1, argc-1); + + const char* runkey; + AC_GetString(&ac, &runkey, NULL, 0); + + AI_dictEntry *stats_entry = AI_dictFind(run_stats, runkey); + + if (!stats_entry) { + return RedisModule_ReplyWithError(ctx, "ERR cannot find run info for key"); + } + + struct RedisAI_RunStats *rstats = AI_dictGetVal(stats_entry); + + if (!AC_IsAtEnd(&ac)) { + const char* opt; + AC_GetString(&ac, &opt, NULL, 0); + + if (strcasecmp(opt, "RESETSTAT") == 0) { + rstats->duration_us = 0; + rstats->samples = 0; + rstats->calls = 0; + rstats->nerrors = 0; + RedisModule_ReplyWithSimpleString(ctx, "OK"); + return REDISMODULE_OK; + } + } + + RedisModule_ReplyWithArray(ctx, 16); + + RedisModule_ReplyWithSimpleString(ctx, "KEY"); + RedisModule_ReplyWithString(ctx, rstats->key); + RedisModule_ReplyWithSimpleString(ctx, "TYPE"); + if (rstats->type == 0) { + RedisModule_ReplyWithSimpleString(ctx, "MODEL"); + } + else { + RedisModule_ReplyWithSimpleString(ctx, "SCRIPT"); + } + RedisModule_ReplyWithSimpleString(ctx, "BACKEND"); + RedisModule_ReplyWithSimpleString(ctx, RAI_BackendName(rstats->backend)); + RedisModule_ReplyWithSimpleString(ctx, "DEVICE"); + RedisModule_ReplyWithSimpleString(ctx, rstats->devicestr); + RedisModule_ReplyWithSimpleString(ctx, "DURATION"); + RedisModule_ReplyWithLongLong(ctx, rstats->duration_us); + RedisModule_ReplyWithSimpleString(ctx, "SAMPLES"); + if (rstats->type == 0) { + RedisModule_ReplyWithLongLong(ctx, rstats->samples); + } + else { + RedisModule_ReplyWithLongLong(ctx, -1); + } + RedisModule_ReplyWithSimpleString(ctx, "CALLS"); + RedisModule_ReplyWithLongLong(ctx, rstats->calls); + RedisModule_ReplyWithSimpleString(ctx, "ERRORS"); + RedisModule_ReplyWithLongLong(ctx, rstats->nerrors); + + return REDISMODULE_OK; +} + int RedisAI_Config_LoadBackend(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { RedisModule_AutoMemory(ctx); @@ -1549,6 +1689,10 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) == REDISMODULE_ERR) return REDISMODULE_ERR; + if (RedisModule_CreateCommand(ctx, "ai.info", RedisAI_Info_RedisCommand, "readonly", 1, 1, 1) + == REDISMODULE_ERR) + return REDISMODULE_ERR; + if (RedisModule_CreateCommand(ctx, "ai.config", RedisAI_Config_RedisCommand, "write", 1, 1, 1) == REDISMODULE_ERR) return REDISMODULE_ERR; @@ -1616,6 +1760,8 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) RedisModule_Log(ctx, "warning", "Queue not initialized for device CPU" ); return REDISMODULE_ERR; } + + run_stats = AI_dictCreate(&AI_dictTypeHeapStrings, NULL); return REDISMODULE_OK; } diff --git a/src/script_struct.h b/src/script_struct.h index 5fba77fc4..b9e1d4ab9 100644 --- a/src/script_struct.h +++ b/src/script_struct.h @@ -13,8 +13,6 @@ typedef struct RAI_Script { // CUDA allocator for dlpack char* devicestr; long long refCount; - long long backend_calls; - long long backend_us; } RAI_Script; typedef struct RAI_ScriptCtxParam { diff --git a/test/basic_tests.py b/test/basic_tests.py index 39ba37398..3a1317443 100644 --- a/test/basic_tests.py +++ b/test/basic_tests.py @@ -31,6 +31,11 @@ def check_cuda(): return os.system('which nvcc') +def info_to_dict(info): + info = [el.decode('ascii') if type(el) is bytes else el for el in info] + return dict(zip(info[::2], info[1::2])) + + def run_test_multiproc(env, n_procs, fn, args=tuple()): procs = [] @@ -242,6 +247,39 @@ def test_run_tf_model(env): con.execute_command('AI.MODELRUN', 'm', 'INPUTS', 'a', 'b', 'OUTPUTS', 'c') + info = con.execute_command('AI.INFO', 'm') + info_dict_0 = info_to_dict(info) + + env.assertEqual(info_dict_0['KEY'], 'm') + env.assertEqual(info_dict_0['TYPE'], 'MODEL') + env.assertEqual(info_dict_0['BACKEND'], 'TF') + env.assertTrue(info_dict_0['DURATION'] > 0) + env.assertEqual(info_dict_0['SAMPLES'], 2) + env.assertEqual(info_dict_0['CALLS'], 1) + env.assertEqual(info_dict_0['ERRORS'], 0) + + con.execute_command('AI.MODELRUN', 'm', 'INPUTS', 'a', 'b', 'OUTPUTS', 'c') + + info = con.execute_command('AI.INFO', 'm') + info_dict_1 = info_to_dict(info) + + env.assertTrue(info_dict_1['DURATION'] > info_dict_0['DURATION']) + env.assertEqual(info_dict_1['SAMPLES'], 4) + env.assertEqual(info_dict_1['CALLS'], 2) + env.assertEqual(info_dict_1['ERRORS'], 0) + + ret = con.execute_command('AI.INFO', 'm', 'RESETSTAT') + env.assertEqual(ret, b'OK') + + con.execute_command('AI.MODELRUN', 'm', 'INPUTS', 'a', 'b', 'OUTPUTS', 'c') + info = con.execute_command('AI.INFO', 'm') + info_dict_2 = info_to_dict(info) + + env.assertTrue(info_dict_2['DURATION'] < info_dict_1['DURATION']) + env.assertEqual(info_dict_2['SAMPLES'], 2) + env.assertEqual(info_dict_2['CALLS'], 1) + env.assertEqual(info_dict_2['ERRORS'], 0) + tensor = con.execute_command('AI.TENSORGET', 'c', 'VALUES') values = tensor[-1] env.assertEqual(values, [b'4', b'9', b'4', b'9']) @@ -845,6 +883,40 @@ def test_run_script(env): con.execute_command('AI.SCRIPTRUN', 'ket', 'bar', 'INPUTS', 'a', 'b', 'OUTPUTS', 'c') + info = con.execute_command('AI.INFO', 'ket') + info_dict_0 = info_to_dict(info) + + env.assertEqual(info_dict_0['KEY'], 'ket') + env.assertEqual(info_dict_0['TYPE'], 'SCRIPT') + env.assertEqual(info_dict_0['BACKEND'], 'TORCH') + env.assertTrue(info_dict_0['DURATION'] > 0) + env.assertEqual(info_dict_0['SAMPLES'], -1) + env.assertEqual(info_dict_0['CALLS'], 4) + env.assertEqual(info_dict_0['ERRORS'], 3) + + con.execute_command('AI.SCRIPTRUN', 'ket', 'bar', 'INPUTS', 'a', 'b', 'OUTPUTS', 'c') + + info = con.execute_command('AI.INFO', 'ket') + info_dict_1 = info_to_dict(info) + + env.assertTrue(info_dict_1['DURATION'] > info_dict_0['DURATION']) + env.assertEqual(info_dict_1['SAMPLES'], -1) + env.assertEqual(info_dict_1['CALLS'], 5) + env.assertEqual(info_dict_1['ERRORS'], 3) + + ret = con.execute_command('AI.INFO', 'ket', 'RESETSTAT') + env.assertEqual(ret, b'OK') + + con.execute_command('AI.SCRIPTRUN', 'ket', 'bar', 'INPUTS', 'a', 'b', 'OUTPUTS', 'c') + + info = con.execute_command('AI.INFO', 'ket') + info_dict_2 = info_to_dict(info) + + env.assertTrue(info_dict_2['DURATION'] < info_dict_1['DURATION']) + env.assertEqual(info_dict_2['SAMPLES'], -1) + env.assertEqual(info_dict_2['CALLS'], 1) + env.assertEqual(info_dict_2['ERRORS'], 0) + tensor = con.execute_command('AI.TENSORGET', 'c', 'VALUES') values = tensor[-1] env.assertEqual(values, [b'4', b'6', b'4', b'6'])