Skip to content

Test create tensor through gears #620

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Mar 1, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
67 changes: 18 additions & 49 deletions src/tensor.c
Original file line number Diff line number Diff line change
Expand Up @@ -99,10 +99,11 @@ RAI_Tensor *RAI_TensorNew(void) {
RAI_Tensor *ret = RedisModule_Calloc(1, sizeof(*ret));
ret->refCount = 1;
ret->len = LEN_UNKOWN;
return ret;
}

RAI_Tensor *RAI_TensorCreateWithDLDataType(DLDataType dtype, long long *dims, int ndims,
int tensorAllocMode) {
bool empty) {

size_t dtypeSize = Tensor_DataTypeSize(dtype);
if (dtypeSize == 0) {
Expand All @@ -124,20 +125,14 @@ RAI_Tensor *RAI_TensorCreateWithDLDataType(DLDataType dtype, long long *dims, in
}

DLDevice device = (DLDevice){.device_type = kDLCPU, .device_id = 0};
void *data = NULL;
switch (tensorAllocMode) {
case TENSORALLOC_ALLOC:
data = RedisModule_Alloc(len * dtypeSize);
break;
case TENSORALLOC_CALLOC:

// If we return an empty tensor, we initialize the data with zeros to avoid security
// issues. Otherwise, we only allocate without initializing (for better performance)
void *data;
if (empty) {
data = RedisModule_Calloc(len, dtypeSize);
break;
case TENSORALLOC_NONE:
/* shallow copy no alloc */
default:
/* assume TENSORALLOC_NONE
shallow copy no alloc */
break;
} else {
data = RedisModule_Alloc(len * dtypeSize);
}

ret->tensor = (DLManagedTensor){.dl_tensor = (DLTensor){.device = device,
Expand Down Expand Up @@ -214,27 +209,11 @@ RAI_Tensor *_TensorCreateWithDLDataTypeAndRString(DLDataType dtype, size_t dtype
return ret;
}

RAI_Tensor *RAI_TensorCreate(const char *dataType, long long *dims, int ndims, int hasdata) {
// Important note: the tensor data must be initialized after the creation.
RAI_Tensor *RAI_TensorCreate(const char *dataType, long long *dims, int ndims) {
DLDataType dtype = RAI_TensorDataTypeFromString(dataType);
return RAI_TensorCreateWithDLDataType(dtype, dims, ndims, TENSORALLOC_ALLOC);
}

#if 0
void RAI_TensorMoveFrom(RAI_Tensor* dst, RAI_Tensor* src) {
if (--dst->refCount <= 0){
RedisModule_Free(t->tensor.shape);
if (t->tensor.strides) {
RedisModule_Free(t->tensor.strides);
}
RedisModule_Free(t->tensor.data);
RedisModule_Free(t);
}
dst->tensor.ctx = src->tensor.ctx;
dst->tensor.data = src->tensor.data;

dst->refCount = 1;
return RAI_TensorCreateWithDLDataType(dtype, dims, ndims, false);
}
#endif

RAI_Tensor *RAI_TensorCreateByConcatenatingTensors(RAI_Tensor **ts, long long n) {

Expand Down Expand Up @@ -273,7 +252,7 @@ RAI_Tensor *RAI_TensorCreateByConcatenatingTensors(RAI_Tensor **ts, long long n)

DLDataType dtype = RAI_TensorDataType(ts[0]);

RAI_Tensor *ret = RAI_TensorCreateWithDLDataType(dtype, dims, ndims, TENSORALLOC_ALLOC);
RAI_Tensor *ret = RAI_TensorCreateWithDLDataType(dtype, dims, ndims, false);

for (long long i = 0; i < n; i++) {
memcpy(RAI_TensorData(ret) + batch_offsets[i] * sample_size * dtype_size,
Expand All @@ -300,7 +279,7 @@ RAI_Tensor *RAI_TensorCreateBySlicingTensor(RAI_Tensor *t, long long offset, lon

DLDataType dtype = RAI_TensorDataType(t);

RAI_Tensor *ret = RAI_TensorCreateWithDLDataType(dtype, dims, ndims, TENSORALLOC_ALLOC);
RAI_Tensor *ret = RAI_TensorCreateWithDLDataType(dtype, dims, ndims, false);

memcpy(RAI_TensorData(ret), RAI_TensorData(t) + offset * sample_size * dtype_size,
len * sample_size * dtype_size);
Expand Down Expand Up @@ -329,14 +308,14 @@ int RAI_TensorDeepCopy(RAI_Tensor *t, RAI_Tensor **dest) {

DLDataType dtype = RAI_TensorDataType(t);

RAI_Tensor *ret = RAI_TensorCreateWithDLDataType(dtype, dims, ndims, TENSORALLOC_ALLOC);
RAI_Tensor *ret = RAI_TensorCreateWithDLDataType(dtype, dims, ndims, false);

memcpy(RAI_TensorData(ret), RAI_TensorData(t), sample_size * dtype_size);
*dest = ret;
return 0;
}

// Beware: this will take ownership of dltensor
// Beware: this will take ownership of dltensor.
RAI_Tensor *RAI_TensorCreateFromDLTensor(DLManagedTensor *dl_tensor) {

RAI_Tensor *ret = RAI_TensorNew();
Expand Down Expand Up @@ -419,19 +398,15 @@ int RAI_TensorSetValueFromLongLong(RAI_Tensor *t, long long i, long long val) {
case 8:
((int8_t *)data)[i] = val;
break;
break;
case 16:
((int16_t *)data)[i] = val;
break;
break;
case 32:
((int32_t *)data)[i] = val;
break;
break;
case 64:
((int64_t *)data)[i] = val;
break;
break;
default:
return 0;
}
Expand All @@ -440,19 +415,15 @@ int RAI_TensorSetValueFromLongLong(RAI_Tensor *t, long long i, long long val) {
case 8:
((uint8_t *)data)[i] = val;
break;
break;
case 16:
((uint16_t *)data)[i] = val;
break;
break;
case 32:
((uint32_t *)data)[i] = val;
break;
break;
case 64:
((uint64_t *)data)[i] = val;
break;
break;
default:
return 0;
}
Expand Down Expand Up @@ -642,7 +613,6 @@ int RAI_parseTensorSetArgs(RedisModuleString **argv, int argc, RAI_Tensor **t, i

const char *fmtstr;
int datafmt = TENSOR_NONE;
int tensorAllocMode = TENSORALLOC_CALLOC;
size_t ndims = 0;
long long len = 1;
long long *dims = (long long *)array_new(long long, 1);
Expand All @@ -656,7 +626,6 @@ int RAI_parseTensorSetArgs(RedisModuleString **argv, int argc, RAI_Tensor **t, i
remaining_args = argc - 1 - argpos;
if (!strcasecmp(opt, "BLOB")) {
datafmt = TENSOR_BLOB;
tensorAllocMode = TENSORALLOC_CALLOC;
// if we've found the dataformat there are no more dimensions
// check right away if the arity is correct
if (remaining_args != 1 && enforceArity == 1) {
Expand All @@ -669,7 +638,6 @@ int RAI_parseTensorSetArgs(RedisModuleString **argv, int argc, RAI_Tensor **t, i
break;
} else if (!strcasecmp(opt, "VALUES")) {
datafmt = TENSOR_VALUES;
tensorAllocMode = TENSORALLOC_CALLOC;
// if we've found the dataformat there are no more dimensions
// check right away if the arity is correct
if (remaining_args != len && enforceArity == 1) {
Expand Down Expand Up @@ -699,7 +667,8 @@ int RAI_parseTensorSetArgs(RedisModuleString **argv, int argc, RAI_Tensor **t, i
RedisModuleString *rstr = argv[argpos];
*t = _TensorCreateWithDLDataTypeAndRString(datatype, datasize, dims, ndims, rstr, error);
} else {
*t = RAI_TensorCreateWithDLDataType(datatype, dims, ndims, tensorAllocMode);
bool is_empty = (datafmt == TENSOR_NONE);
*t = RAI_TensorCreateWithDLDataType(datatype, dims, ndims, is_empty);
}
if (!(*t)) {
array_free(dims);
Expand Down
7 changes: 3 additions & 4 deletions src/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,11 +65,10 @@ RAI_Tensor *RAI_TensorNew(void);
* @param dataType string containing the numeric data type of tensor elements
* @param dims n-dimensional array ( the dimension values are copied )
* @param ndims number of dimensions
* @param hasdata ( deprecated parameter )
* @return allocated RAI_Tensor on success, or NULL if the allocation
* failed.
*/
RAI_Tensor *RAI_TensorCreate(const char *dataType, long long *dims, int ndims, int hasdata);
RAI_Tensor *RAI_TensorCreate(const char *dataType, long long *dims, int ndims);

/**
* Allocate the memory and initialise the RAI_Tensor. Creates a tensor based on
Expand All @@ -81,12 +80,12 @@ RAI_Tensor *RAI_TensorCreate(const char *dataType, long long *dims, int ndims, i
* @param dtype DLDataType
* @param dims n-dimensional array ( the dimension values are copied )
* @param ndims number of dimensions
* @param tensorAllocMode
* @param empty True if creating an empty tensor (need to be initialized)
* @return allocated RAI_Tensor on success, or NULL if the allocation
* failed.
*/
RAI_Tensor *RAI_TensorCreateWithDLDataType(DLDataType dtype, long long *dims, int ndims,
int tensorAllocMode);
bool empty);

/**
* Allocate the memory for a new Tensor and copy data fom a tensor to it.
Expand Down
2 changes: 2 additions & 0 deletions tests/flow/includes.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,8 @@ def send_and_disconnect(cmd, red):
con = pool.get_connection(cmd[0])
ret = con.send_command(*cmd)
con.disconnect()
# For making sure that Redis will have the time to exit cleanly.
time.sleep(1)
return ret


Expand Down
37 changes: 37 additions & 0 deletions tests/flow/tests_withGears.py
Original file line number Diff line number Diff line change
Expand Up @@ -311,3 +311,40 @@ async def DAGRun_addOpsFromString(record):
values = con.execute_command('AI.TENSORGET', 'test5_res{1}', 'VALUES')
env.assertEqual(values, [b'4', b'9', b'4', b'9'])


@skip_if_gears_not_loaded
def test_tensor_create_via_gears(env):
script = '''

import redisAI

def TensorCreate_FromValues(record):

tensor = redisAI.createTensorFromValues('DOUBLE', [2,2], [1.0, 2.0, 3.0, 4.0])
redisAI.setTensorInKey('test1_res{1}', tensor)
return "test1_OK"

def TensorCreate_FromBlob(record):
tensor_blob = bytearray([5, 6, 7, 8])
tensor = redisAI.createTensorFromBlob('INT8', [2,2], tensor_blob)
redisAI.setTensorInKey('test2_res{1}', tensor)
return "test2_OK"

GB("CommandReader").map(TensorCreate_FromValues).register(trigger="TensorCreate_FromValues_test1")
GB("CommandReader").map(TensorCreate_FromBlob).register(trigger="TensorCreate_FromBlob_test2")
'''

con = env.getConnection()
ret = con.execute_command('rg.pyexecute', script)
env.assertEqual(ret, b'OK')
ret = con.execute_command('rg.trigger', 'TensorCreate_FromValues_test1')
env.assertEqual(ret[0], b'test1_OK')

values = con.execute_command('AI.TENSORGET', 'test1_res{1}', 'VALUES')
env.assertEqual(values, [b'1', b'2', b'3', b'4'])

ret = con.execute_command('rg.trigger', 'TensorCreate_FromBlob_test2')
env.assertEqual(ret[0], b'test2_OK')

values = con.execute_command('AI.TENSORGET', 'test2_res{1}', 'VALUES')
env.assertEqual(values, [5, 6, 7, 8])