diff --git a/Dockerfile_cli b/Dockerfile_cli
deleted file mode 100644
index 10ecf05b30b953b098a95dd0e60a9241485416f3..0000000000000000000000000000000000000000
--- a/Dockerfile_cli
+++ /dev/null
@@ -1,36 +0,0 @@
-# set base image from docker_hub or anywhere else
-FROM centos:8
-LABEL zhangruo zhangruo@ncti-gba.cn
-
-#set mirror address
-RUN sed -i -e "s/mirrorlist=/#mirrorlist=/g" /etc/yum.repos.d/CentOS-Linux-*.repo
-RUN sed -i -e "s|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g" /etc/yum.repos.d/CentOS-Linux-*.repo
-
-#which output subdir to use, e.g. debug_lite, release_lite, release, debug
-ENV SRC_SUB debug_lite
-
-#set variable
-ENV MYPATH /usr/local/intarkdb_lite
-ENV INTARK_LIB_PATH $MYPATH/lib
-ENV INTARK_BIN_PATH $MYPATH/bin
-
-#copy source
-COPY ./output/inc $MYPATH/inc
-COPY ./output/lib $INTARK_LIB_PATH
-COPY ./output/$SRC_SUB/lib $INTARK_LIB_PATH
-COPY ./output/$SRC_SUB/bin/InstarDB_CLI $INTARK_BIN_PATH/InstarDB_CLI
-COPY ./output/$SRC_SUB/bin/stress_insert_test $INTARK_BIN_PATH/stress_insert_test
-COPY ./output/$SRC_SUB/bin/stress_prepare_select_test $INTARK_BIN_PATH/stress_prepare_select_test
-COPY ./output/$SRC_SUB/bin/stress_select_test $INTARK_BIN_PATH/stress_select_test
-
-#set env variable
-WORKDIR $INTARK_BIN_PATH
-ENV PATH $PATH:$INTARK_LIB_PATH:$INTARK_BIN_PATH
-ENV LD_LIBRARY_PATH $LD_LIBRARY_PATH:$INTARK_LIB_PATH:$MYPATH/inc
-
-#shutdown transparent hugepage
-#RUN echo never > /sys/kernel/mm/transparent_hugepage/enabled
-#RUN echo never > /sys/kernel/mm/transparent_hugepage/defrag
-
-#execute cmd whencontainer running
-CMD $INTARK_BIN_PATH/InstarDB_CLI
diff --git a/Dockerfile_functest b/Dockerfile_functest
deleted file mode 100644
index e5d15d0867a4f14a8806832ccef16289b4eee97a..0000000000000000000000000000000000000000
--- a/Dockerfile_functest
+++ /dev/null
@@ -1,59 +0,0 @@
-# set base image from docker_hub or anywhere else
-FROM centos:8
-LABEL zhangruo zhangruo@ncti-gba.cn
-
-#which output subdir to use, e.g. debug_lite, release_lite, release, debug
-ENV SRC_SUB release_lite
-
-#set variable
-ENV MYPATH /usr/local/intarkdb_lite
-ENV INTARK_LIB_PATH $MYPATH/lib
-ENV INTARK_BIN_PATH $MYPATH/bin
-
-#copy source
-COPY ./output/inc $MYPATH/inc
-COPY ./output/lib $INTARK_LIB_PATH
-COPY ./output/$SRC_SUB/lib $INTARK_LIB_PATH
-
-#set mirror address
-RUN sed -i -e "s/mirrorlist=/#mirrorlist=/g" /etc/yum.repos.d/CentOS-Linux-*.repo
-RUN sed -i -e "s|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g" /etc/yum.repos.d/CentOS-Linux-*.repo
-#update yum sources
-RUN yum update -y
-
-#set env variable
-WORKDIR $INTARK_BIN_PATH
-ENV PATH $PATH:$INTARK_LIB_PATH:$INTARK_BIN_PATH
-ENV LD_LIBRARY_PATH $LD_LIBRARY_PATH:$INTARK_LIB_PATH:$MYPATH/inc
-
-#install python36, git, openssh-client
-RUN yum -y install python36 git
-# RUN yum -y install openssh-client
-
-# copy ssh key into docker container image
-# COPY id_rsa /root/.ssh/id_rsa
-# RUN chmod 600 /root/.ssh/id_rsa
-# # Add Git server to known host list
-# RUN ssh-keyscan codehub-cn-south-1.devcloud.huaweicloud.com >> /root/.ssh/known_hosts
-
-# clone function test framework
-# RUN git clone git@codehub-cn-south-1.devcloud.huaweicloud.com:qrssjkjcbxm_liyuxiang00001/InstarDB_Test_Framework.git $INTARK_BIN_PATH
-
-# or copy function test framework
-ADD InstarDB_Test_Framework.tar.gz $INTARK_BIN_PATH
-RUN rm -f $INTARK_BIN_PATH/InstarDB_Test_Framework/log/*
-RUN rm -f $INTARK_BIN_PATH/InstarDB_Test_Framework/report/*
-
-# set control file
-#RUN sed -i -e "s|CONTROL_FILES = (.*/gstor/data/ctrl1)|CONTROL_FILES = ($INTARK_BIN_PATH/InstarDB_Test_Framework/gstor/data/ctrl1)|g" $INTARK_BIN_PATH/InstarDB_Test_Framework/gstor.ini
-# setup dependency
-RUN pip3 install --user -r $INTARK_BIN_PATH/InstarDB_Test_Framework/requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
-#set library path
-RUN echo "libpath:${INTARK_LIB_PATH}" > $INTARK_BIN_PATH/InstarDB_Test_Framework/configure/instarDB_config.txt
-
-# shutdown transparent hugepage
-# RUN echo never > /sys/kernel/mm/transparent_hugepage/enabled
-# RUN echo never > /sys/kernel/mm/transparent_hugepage/defrag
-
-# execute cmd whencontainer running
-CMD python3 $INTARK_BIN_PATH/InstarDB_Test_Framework/run/instarDB_run.py
diff --git a/README.md b/README.md
index 17f3700b8b1f7590c0d299094a69f411c358caa1..29b456d3554df0e5b64e1a154de9f8cb42c52a74 100644
--- a/README.md
+++ b/README.md
@@ -14,7 +14,6 @@
- build:编译构建目录
- dependency: 第三方依赖
- example: c/c++/golang/python接口使用样例
-- test: 测试代码
- tools: 工具项
#### 二、编译指导
@@ -113,7 +112,6 @@ output目录结构如下:
└── lib # release版本库文件保存路径
```
-进入 output//bin/目录, 可以启动客户端测试工具intarkdb_cli,后面可以跟参数指定数据文件位置 eg: ./intarkdb_cli test
进入 output//bin/目录, 可以启动客户端测试工具intarkdb_cli,后面可以跟参数指定数据文件位置 eg: ./intarkdb_cli test
1. 运行单例测试:
diff --git a/VERSION.h b/VERSION.h
index 19725de01f0c3e18364dae2e528722e19f7dda5d..44a92770d9c1660af1714ccad6d74cf54267b92d 100644
--- a/VERSION.h
+++ b/VERSION.h
@@ -1,2 +1,2 @@
-const char* GIT_VERSION = "IntarkDB-V1.4.2";
+const char* GIT_VERSION = "IntarkDB 2.0";
const char* GIT_COMMIT_ID = "6b203bbf";
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
index d819831f49262197cae0fd777084b2dff981b5f2..7cce7e8c7b85fca4b43bf02ce3b07c862a2bdca0 100644
--- a/examples/CMakeLists.txt
+++ b/examples/CMakeLists.txt
@@ -3,4 +3,5 @@ IF (ENABLE_PG_QUERY)
add_subdirectory(example-cpp)
ENDIF()
add_subdirectory(example-kv)
+add_subdirectory(sqlite3-api-test)
\ No newline at end of file
diff --git a/examples/example-c/CMakeLists.txt b/examples/example-c/CMakeLists.txt
index 0b15698f98ed44cbb6e9bd156891c2e04f1505c4..10f76a16d8942f07f062ce7f9c5bb0b03e7663c2 100644
--- a/examples/example-c/CMakeLists.txt
+++ b/examples/example-c/CMakeLists.txt
@@ -9,7 +9,8 @@ set(CMAKE_CXX_STANDARD 17)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
include_directories(${INTARKDB_INCLUDE_PATH}
- ${INTARKDB_SRC_PATH})
+ ${INTARKDB_SRC_PATH}
+ ${INTARKDB_HOME}/interface/c)
set(INSTARDB_LINK_LIBS intarkdb)
diff --git a/examples/example-c/example_c.c b/examples/example-c/example_c.c
index b7ed46dd32ca28b55ca2477495fdde65d79deca0..6903a3e7847b3fe7d36d471525456a83d1d06f4c 100644
--- a/examples/example-c/example_c.c
+++ b/examples/example-c/example_c.c
@@ -20,7 +20,7 @@
*
* -------------------------------------------------------------------------
*/
-#include "interface/c/intarkdb_sql.h"
+#include "intarkdb_sql.h"
#include
typedef struct st_example_result {
diff --git a/examples/example-c/example_prepared_c.c b/examples/example-c/example_prepared_c.c
index 7a4beb4991b7fa711e16028d36a333bd73c8dcd4..771cbd46c526a1d0ad49c76ff0b735d661533497 100644
--- a/examples/example-c/example_prepared_c.c
+++ b/examples/example-c/example_prepared_c.c
@@ -20,7 +20,7 @@
*
* -------------------------------------------------------------------------
*/
-#include "interface/c/intarkdb_sql.h"
+#include "intarkdb_sql.h"
void create_example(intarkdb_connection conn, intarkdb_result intarkdb_result);
void prepare_example_insert(intarkdb_connection conn, intarkdb_result intarkdb_result);
diff --git a/tools/sqlite3-api-test/CMakeLists.txt b/examples/sqlite3-api-test/CMakeLists.txt
similarity index 94%
rename from tools/sqlite3-api-test/CMakeLists.txt
rename to examples/sqlite3-api-test/CMakeLists.txt
index bff909dd7bd315958238b79a86640dc125870c29..5733b6d3c7625a6fb07aa12630a6c844ec6ce746 100644
--- a/tools/sqlite3-api-test/CMakeLists.txt
+++ b/examples/sqlite3-api-test/CMakeLists.txt
@@ -5,6 +5,7 @@ set(SQL_LIB_PATH ${INTARKDB_LIB_PATH})
## include
include_directories(${GSTOR_SRC_HOME})
include_directories(${INTARKDB_COMPUTE_SQL_INC_PATH})
+include_directories(${INTARKDB_HOME}/interface/c)
## include 3rd
include_directories(${INTARKDB_SECUREC_INC_PATH})
diff --git a/tools/sqlite3-api-test/sqlite3_api_test.c b/examples/sqlite3-api-test/sqlite3_api_test.c
similarity index 82%
rename from tools/sqlite3-api-test/sqlite3_api_test.c
rename to examples/sqlite3-api-test/sqlite3_api_test.c
index 1c4b114509d76a4052aefb25cedc42f591bcf19b..ef3a8640cd70a117db6c569812867803a3598252 100644
--- a/tools/sqlite3-api-test/sqlite3_api_test.c
+++ b/examples/sqlite3-api-test/sqlite3_api_test.c
@@ -8,12 +8,12 @@
#include
#endif
-#include "interface/c/intarkdb_sql.h"
-#include "interface/sqlite3_api_wrapper/include/sqlite3.h"
+#include "intarkdb_sql.h"
+#include "../../interface/sqlite3_api_wrapper/include/sqlite3.h"
int NUM_Threads[20] = {5, 10, 15, 20, 25, 30};
-char result[100] = {'\0'}; // 初始化为全零
+char result[100] = {'\0'};
int NUM_Insert = 10;
int *thread_index[100];
@@ -21,11 +21,8 @@ void *handle_multi[100];
int exec_handle(void *data, int argc, char **argv, char **colname)
{
- /* 计数器*/
int i = *(int *)(data);
*(int *)(data) = i + 1;
-
- /* 取出结果*/
printf("[%s] is [%s], [%s] is [%s]...\n", colname[0], argv[0], colname[1], argv[1]);
return 0;
}
@@ -41,10 +38,7 @@ void test_sqlite3_get_autocommit(sqlite3 *db)
int state = sqlite3_get_autocommit(db);
printf("autocommit state %d\n",state);
}
-// void test_sqlite3_result_null(sqlite3 *db)
-// {
-// sqlite3_result_null();
-// }
+
int test_for_each(sqlite3 *db)
{
sqlite3_stmt *stmt;
@@ -99,7 +93,6 @@ int test_for_expanded_sql(sqlite3 *db)
printf("Failed to prepare statement: %s\n", sqlite3_errmsg(db));
return rc;
}
- // 绑定参数
sqlite3_bind_text(stmt, 1, "name", -1, NULL);
sqlite3_bind_text(stmt, 2, "age", -1, NULL);
sqlite3_bind_text(stmt, 3, "salary", -1, NULL);
@@ -108,34 +101,34 @@ int test_for_expanded_sql(sqlite3 *db)
int ret;
while ( ret = sqlite3_step(stmt)== SQLITE_ROW) {
int cols = sqlite3_data_count(stmt);
- // printf("Current row has %d columns:\n", cols);
+
for (int i = 0; i < sqlite3_column_count(stmt); i++) {
int type = sqlite3_column_type(stmt,i);
const char *name = sqlite3_column_name(stmt, i);
- // printf("Column %s: type:%d ", name,type);
+
switch (type) {
case SQLITE_INTEGER:
- // printf("value:%d\n", sqlite3_column_int(stmt, i));
+
break;
case SQLITE_FLOAT:
- // printf("value:%f\n", sqlite3_column_double(stmt, i));
+
break;
case SQLITE_TEXT:
- // printf("value:%s\n", sqlite3_column_text(stmt, i));
+
break;
case SQLITE_BLOB:
- // printf("BLOB\n");
+
break;
case SQLITE_NULL:
- // printf("NULL\n");
+
break;
default:
- // printf("Unknown type\n");
+
break;
}
}
}
- //测试
+
printf("----------------test_for_expanded_sql----------------------\n");
printf("Original_sql:%s\n",sqlite3_sql(stmt));
printf("Expanded_sql:%s\n",sqlite3_expanded_sql(stmt));
@@ -151,7 +144,7 @@ int test_for_sqlite3_stmt_readonly(sqlite3 *db)
const char *sql = "SELECT name, age, salary FROM employees;";
const char* sql2 = "INSERT INTO employees (name, age, salary) VALUES ('xxx', 25, 1000.0)";
int rc = 0;
- //测试
+
printf("----------------test_for_sqlite3_stmt_readonly----------------------\n");
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, 0);
if (rc != SQLITE_OK) {
@@ -184,15 +177,13 @@ int test_for_sqlite3_context_db_handle(sqlite3 *db)
sqlite3 *db_handle;
printf("----------------test_for_sqlite3_context_db_handle----------------------\n");
- // 创建上下文
+
context = sqlite3_create_context(db, 0, 0, NULL);
if (context == NULL) {
fprintf(stderr, "Can't create context: %s\n", sqlite3_errmsg(db));
sqlite3_close(db);
return 1;
}
-
- // 获取上下文关联的数据库句柄
db_handle = sqlite3_context_db_handle(context);
if (db_handle == NULL) {
fprintf(stderr, "Context does not have a database handle\n");
@@ -202,10 +193,7 @@ int test_for_sqlite3_context_db_handle(sqlite3 *db)
}
printf("Database handle: %p\n", db_handle);
-
- // 清理资源
sqlite3_free(context);
-
printf("-----------------------------------------------------------\n");
return 0;
}
@@ -235,19 +223,18 @@ int get_table_info(sqlite3 *db) {
);
if (rc == SQLITE_OK) {
- // 处理查询结果
for (int i = 0; i < (nRow + 1) * nCol; i++) {
if (i % nCol == 0) {
- printf("\n"); // 每行打印一个换行符
+ printf("\n");
}
- printf("%s ", pazResult[i]); // 打印查询结果
+ printf("%s ", pazResult[i]);
}
printf("\n");
- // 释放查询结果占用的内存
+
sqlite3_free_table(pazResult);
} else {
- // 处理错误
+
fprintf(stderr, "SQL error: %s\n", zErrMsg);
sqlite3_free(zErrMsg);
}
@@ -260,30 +247,21 @@ int get_table_info(sqlite3 *db) {
int test_sqlite3_bind_blob(sqlite3 *db)
{
printf("======sqlite3_bind_blob 测试 ======\n");
- // sqlite3 *db;
+
int rc;
sqlite3_stmt *stmt;
- // sqlite3_open("test.db", &db);
-
unsigned char blob_data[] = {0x01, 0x02, 0x03, 0x04, 0x05};
- // std::vector blob_data(5, 0x00);
- // unsigned char blob_data[] = {'1', '2'};
+
char *errorMsg;
const char *sql = "CREATE TABLE IF NOT EXISTS bind_blob(data BLOB)";
rc = sqlite3_exec(db, sql, NULL, NULL, &errorMsg);
if (rc != SQLITE_OK) {
printf("Failed to create table: %s\n", errorMsg);
sqlite3_free(errorMsg);
- // sqlite3_close(db);
- // return rc;
}
- // 准备 SQL 语句
sqlite3_prepare_v2(db, "INSERT INTO bind_blob(data) VALUES (?)", -1, &stmt, NULL);
-
- // 绑定 BLOB 数据
rc = sqlite3_bind_blob(stmt, 1, blob_data, sizeof(blob_data), SQLITE_STATIC);
- // 执行 SQL 语句
rc = sqlite3_step(stmt);
if (rc != SQLITE_DONE) {
printf("Execution failed: %s\n", sqlite3_errmsg(db));
@@ -317,9 +295,7 @@ int test_sqlite3_bind_blob(sqlite3 *db)
} else {
printf("Execution failed: %s\n", sqlite3_errmsg(db));
}
- // 关闭语句对象
sqlite3_finalize(stmt);
- // sqlite3_close(db);
}
int test_zerobind()
@@ -334,8 +310,6 @@ int test_zerobind()
if (rc != SQLITE_OK) {
printf("Failed to create table: %s\n", errorMsg);
sqlite3_free(errorMsg);
- // sqlite3_close(db);
- // return rc;
}
// Prepare an INSERT statement with a blob parameter
@@ -348,7 +322,6 @@ int test_zerobind()
return rc;
}
- // Bind a zero value blob to the statement parameter
int paramIndex = 1;
int blobSize = 10; // Size in bytes
rc = sqlite3_bind_zeroblob(stmt, paramIndex, blobSize);
@@ -358,8 +331,6 @@ int test_zerobind()
sqlite3_close(db);
return rc;
}
-
- // Execute the prepared statement
rc = sqlite3_step(stmt);
if (rc != SQLITE_DONE) {
printf("Execution failed: %s\n", sqlite3_errmsg(db));
@@ -369,7 +340,6 @@ int test_zerobind()
}
const char *sql2 = "SELECT data FROM zerobind";
- // sqlite3_stmt *stmt;
rc = sqlite3_prepare_v2(db, sql2, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
printf("Failed to prepare statement: %s\n", sqlite3_errmsg(db));
@@ -377,14 +347,11 @@ int test_zerobind()
return rc;
}
- // Fetch and print the results
rc = sqlite3_step(stmt);
if (rc == SQLITE_ROW) {
- // Get the blob data
const void *data = sqlite3_column_blob(stmt, 0);
int dataSize = sqlite3_column_bytes(stmt, 0);
if(dataSize > 0){
- // Print the blob data
printf("sqlite3_bind_zeroblob data: ");
for (int i = 0; i < dataSize; i++) {
printf("%02X ", ((unsigned char*)data)[i]);
@@ -396,7 +363,6 @@ int test_zerobind()
} else {
printf("Execution failed: %s\n", sqlite3_errmsg(db));
}
- // Finalize the statement and close the database connection
sqlite3_finalize(stmt);
sqlite3_close_v2(db);
@@ -433,11 +399,8 @@ int insert_data(sqlite3 *db)
{
int rc;
rc = insert_employee(db, "John Doe", 30, 5000.00);
-
rc = insert_employee(db, "Mal Col", 35, 6200.10);
rc = insert_employee(db, "Merry Csk", 40, 4500.00);
- // 获取受影响的行数
-
return rc;
}
@@ -463,7 +426,6 @@ int test2()
exit(0);
}
printf("open database success\n");
- /********************第二步,创建数据库表*************************/
char *sql_create_table;
char buf[120];
@@ -549,26 +511,21 @@ int bind_test(sqlite3 *db)
printf("%d\n",__LINE__);
fprintf(stderr, "SQL error: %s\n", err_msg);
sqlite3_free(err_msg);
- // sqlite3_close(db);
return 1;
}
- // 插入一个带有 NULL 值的行
sql = "INSERT INTO persons (name, age, data) VALUES (?, ?, ?)";
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
printf("%d\n",__LINE__);
fprintf(stderr, "SQL error: %s\n", err_msg);
sqlite3_free(err_msg);
- // sqlite3_close(db);
return 1;
}
- // 绑定 NULL 值
rc = sqlite3_bind_text(stmt, 1, "John Doe", -1, SQLITE_STATIC);
if (rc != SQLITE_OK) {
fprintf(stderr, "SQL error: %s\n", sqlite3_errmsg(db));
sqlite3_finalize(stmt);
- // sqlite3_close(db);
return 1;
}
@@ -577,10 +534,8 @@ int bind_test(sqlite3 *db)
printf("%d\n",__LINE__);
fprintf(stderr, "SQL error: %s\n", sqlite3_errmsg(db));
sqlite3_finalize(stmt);
- // sqlite3_close(db);
return 1;
}
- // 生成一些测试数据,测试数据太大不做打印
size_t data_size = 100 * 1024 * 1024; // 100 MB
unsigned char *data = (unsigned char *)malloc(data_size);
memset(data, 0xA5, data_size);
@@ -595,26 +550,21 @@ int bind_test(sqlite3 *db)
printf("%d\n",__LINE__);
fprintf(stderr, "SQL error: %s\n", sqlite3_errmsg(db));
sqlite3_finalize(stmt);
- // sqlite3_close(db);
return 1;
}
- // 清除绑定的参数值
rc = sqlite3_clear_bindings(stmt);
if (rc != SQLITE_OK) {
- // 处理错误
sqlite3_finalize(stmt);
sqlite3_close(db);
return 1;
}
- // 重置 SQL 语句
+
rc = sqlite3_reset(stmt);
- // 重新绑定值
rc = sqlite3_bind_text(stmt, 1, "Mal Col", -1, SQLITE_STATIC);
if (rc != SQLITE_OK) {
fprintf(stderr, "SQL error: %s\n", sqlite3_errmsg(db));
sqlite3_finalize(stmt);
- // sqlite3_close(db);
return 1;
}
@@ -623,7 +573,6 @@ int bind_test(sqlite3 *db)
printf("%d\n",__LINE__);
fprintf(stderr, "SQL error: %s\n", sqlite3_errmsg(db));
sqlite3_finalize(stmt);
- // sqlite3_close(db);
return 1;
}
unsigned char blob_data[] = {0x01, 0x02, 0x03, 0x04, 0x05};
@@ -638,16 +587,13 @@ int bind_test(sqlite3 *db)
printf("%d\n",__LINE__);
fprintf(stderr, "SQL error: %s\n", sqlite3_errmsg(db));
sqlite3_finalize(stmt);
- // sqlite3_close(db);
return 1;
}
- // 查询并打印插入的行
sql = "SELECT * FROM persons";
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
printf("%d\n",__LINE__);
fprintf(stderr, "SQL error: %s\n", sqlite3_errmsg(db));
- // sqlite3_close(db);
return 1;
}
@@ -671,75 +617,15 @@ int test_sqlite3_errstr(sqlite3 *db)
{
int rc;
char *err_msg = NULL;
- // 尝试创建一个没名称的表,这将产生一个错误
rc = sqlite3_exec(db, "CREATE TABLE (id INTEGER PRIMARY KEY, name TEXT)", NULL, NULL, &err_msg);
if (rc != SQLITE_OK) {
fprintf(stderr, "ERR测试:SQL error: %s\n", err_msg);
- // fprintf(stderr, "ERR测试:Error code: %d, %s\n", rc, sqlite3_errstr(rc));
- // printf("ERR测试:Error code: %d\n", sqlite3_errcode(db));
sqlite3_free(err_msg);
return 1;
}
return 0;
}
-// int test_value_int() {
-// sqlite3 *db;
-// char *err_msg = NULL;
-// int rc;
-
-// // 打开数据库连接
-// rc = sqlite3_open("t.db", &db);
-// if (rc != SQLITE_OK) {
-// fprintf(stderr, "Cannot open database: %s\n", sqlite3_errmsg(db));
-// return 1;
-// }
-
-// // 创建测试表并插入数据
-// rc = sqlite3_exec(db, "CREATE TABLE test (id INTEGER, value TEXT)", NULL, NULL, &err_msg);
-// if (rc != SQLITE_OK) {
-// fprintf(stderr, "Cannot create table: %s\n", err_msg);
-// sqlite3_free(err_msg);
-// sqlite3_close(db);
-// return 1;
-// }
-
-// rc = sqlite3_exec(db, "INSERT INTO test (id, value) VALUES (1, '123'), (2, '3.14'), (3, 'hello')", NULL, NULL, &err_msg);
-// if (rc != SQLITE_OK) {
-// fprintf(stderr, "Cannot insert data: %s\n", err_msg);
-// sqlite3_free(err_msg);
-// sqlite3_close(db);
-// return 1;
-// }
-
-// // 使用 sqlite3_value_int() 处理数据
-// sqlite3_stmt *stmt;
-// rc = sqlite3_prepare_v2(db, "SELECT id, value FROM test", -1, &stmt, NULL);
-// if (rc != SQLITE_OK) {
-// fprintf(stderr, "Cannot prepare statement: %s\n", sqlite3_errmsg(db));
-// sqlite3_close(db);
-// return 1;
-// }
-
-// while ((rc = sqlite3_step(stmt)) == SQLITE_ROW) {
-// int id = sqlite3_column_int(stmt, 0);
-// const char *value = (const char *)sqlite3_column_text(stmt, 1);
-// int int_value = 0;
-
-// // 手动处理 value 列的值
-// int_value = sqlite3_value_int((sqlite3_value *)sqlite3_column_value(stmt, 1));
-
-// printf("ID: %d, Value: %s, Int Value: %d\n", id, value, int_value);
-// }
-
-// if (rc != SQLITE_DONE) {
-// fprintf(stderr, "Error executing statement: %s\n", sqlite3_errmsg(db));
-// }
-
-// sqlite3_finalize(stmt);
-// sqlite3_close(db);
-// return 0;
-// }
-//忽略大小写,判断字符串是否相等
+
int test_sqlite3_str()
{
@@ -859,26 +745,26 @@ int main(int argc, char *argv[])
} else {
fprintf(stdout, "Table created successfully\n");
}
- last_insert_rowid(db);
+ // last_insert_rowid(db);
// //插入数据
insert_data(db);
//测试获取table信息
get_table_info(db);
test_for_each(db);
- test_for_expanded_sql(db);
- test_for_sqlite3_stmt_readonly(db);
- test_for_sqlite3_context_db_handle(db);
- test_sqlite3_get_autocommit(db);
+ // test_for_expanded_sql(db);
+ // test_for_sqlite3_stmt_readonly(db);
+ // test_for_sqlite3_context_db_handle(db);
+ // test_sqlite3_get_autocommit(db);
- test_sqlite3_bind_blob(db);
+ // test_sqlite3_bind_blob(db);
- test_zerobind();
+ // test_zerobind();
- test2();
- bind_test(db);
- test_sqlite3_errstr(db);
- test_sqlite3_str();
+ // test2();
+ // bind_test(db);
+ // test_sqlite3_errstr(db);
+ // test_sqlite3_str();
// 关闭数据库连接
sqlite3_close(db);
diff --git a/interface/CMakeLists.txt b/interface/CMakeLists.txt
index e606c2ef7dd307ae3b2a051812855096441dbf41..5e686b6d13b34e429cbf7461bdd25b67498edd17 100644
--- a/interface/CMakeLists.txt
+++ b/interface/CMakeLists.txt
@@ -1,3 +1,5 @@
MESSAGE(STATUS "INSTARDB-INTERFACE CMAKE BUILD BEGIN")
-#add_subdirectory(jdbc)
+add_subdirectory(c)
+add_subdirectory(jdbc)
+add_subdirectory(sqlite3_api_wrapper)
diff --git a/src/interface/c/CMakeLists.txt b/interface/c/CMakeLists.txt
similarity index 100%
rename from src/interface/c/CMakeLists.txt
rename to interface/c/CMakeLists.txt
diff --git a/src/interface/c/intarkdb_sql.h b/interface/c/intarkdb_sql.h
similarity index 100%
rename from src/interface/c/intarkdb_sql.h
rename to interface/c/intarkdb_sql.h
diff --git a/interface/jdbc/CMakeLists.txt b/interface/jdbc/CMakeLists.txt
index a31cfb45fae2a15f574c5978d1459bfc726fb4d1..d0d37d4be26be2003fee8d6515b979a148f8269a 100644
--- a/interface/jdbc/CMakeLists.txt
+++ b/interface/jdbc/CMakeLists.txt
@@ -17,6 +17,7 @@ include_directories($ENV{JAVA_HOME}/include/win32)
else()
include_directories($ENV{JAVA_HOME}/include/linux)
endif(WIN32)
+include_directories(${INTARKDB_HOME}/interface/c)
set(INTARKDB_LINK_LIBS
fmt::fmt
@@ -49,7 +50,6 @@ else()
$
$
$
- $
$)
endif()
diff --git a/interface/jdbc/intarkdb-jdbc-1.0.0.1.jar b/interface/jdbc/intarkdb-jdbc-1.0.0.1.jar
new file mode 100755
index 0000000000000000000000000000000000000000..4f9179fa9961a2ee7b92cb93caba0fe0aed7737e
Binary files /dev/null and b/interface/jdbc/intarkdb-jdbc-1.0.0.1.jar differ
diff --git a/interface/jdbc/org_intarkdb_core_IntarkdbNative.c b/interface/jdbc/org_intarkdb_core_IntarkdbNative.c
index bccc6724238f005dd42faef71a50fbc3a1d96504..f0500fc9acd980f3615d70b53abd611963debe34 100644
--- a/interface/jdbc/org_intarkdb_core_IntarkdbNative.c
+++ b/interface/jdbc/org_intarkdb_core_IntarkdbNative.c
@@ -25,7 +25,7 @@
#include
-#include "interface/c/intarkdb_sql.h"
+#include "intarkdb_sql.h"
#include "network/server/srv_interface.h"
/* Header for class org_intarkdb_core_IntarkdbNative */
diff --git a/src/interface/rust/.keep b/interface/rust/.keep
similarity index 100%
rename from src/interface/rust/.keep
rename to interface/rust/.keep
diff --git a/src/interface/rust/ncti-orm/.idea/.gitignore b/interface/rust/ncti-orm/.idea/.gitignore
similarity index 100%
rename from src/interface/rust/ncti-orm/.idea/.gitignore
rename to interface/rust/ncti-orm/.idea/.gitignore
diff --git a/src/interface/rust/ncti-orm/.idea/modules.xml b/interface/rust/ncti-orm/.idea/modules.xml
similarity index 100%
rename from src/interface/rust/ncti-orm/.idea/modules.xml
rename to interface/rust/ncti-orm/.idea/modules.xml
diff --git a/src/interface/rust/ncti-orm/.idea/ncti-orm.iml b/interface/rust/ncti-orm/.idea/ncti-orm.iml
similarity index 100%
rename from src/interface/rust/ncti-orm/.idea/ncti-orm.iml
rename to interface/rust/ncti-orm/.idea/ncti-orm.iml
diff --git a/src/interface/rust/ncti-orm/.idea/vcs.xml b/interface/rust/ncti-orm/.idea/vcs.xml
similarity index 100%
rename from src/interface/rust/ncti-orm/.idea/vcs.xml
rename to interface/rust/ncti-orm/.idea/vcs.xml
diff --git a/src/interface/rust/ncti-orm/Cargo.toml b/interface/rust/ncti-orm/Cargo.toml
similarity index 100%
rename from src/interface/rust/ncti-orm/Cargo.toml
rename to interface/rust/ncti-orm/Cargo.toml
diff --git a/src/interface/rust/ncti-orm/README.md b/interface/rust/ncti-orm/README.md
similarity index 100%
rename from src/interface/rust/ncti-orm/README.md
rename to interface/rust/ncti-orm/README.md
diff --git a/src/interface/rust/ncti-orm/codegen/Cargo.toml b/interface/rust/ncti-orm/codegen/Cargo.toml
similarity index 100%
rename from src/interface/rust/ncti-orm/codegen/Cargo.toml
rename to interface/rust/ncti-orm/codegen/Cargo.toml
diff --git a/src/interface/rust/ncti-orm/codegen/src/lib.rs b/interface/rust/ncti-orm/codegen/src/lib.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/codegen/src/lib.rs
rename to interface/rust/ncti-orm/codegen/src/lib.rs
diff --git a/src/interface/rust/ncti-orm/codegen/src/main.rs b/interface/rust/ncti-orm/codegen/src/main.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/codegen/src/main.rs
rename to interface/rust/ncti-orm/codegen/src/main.rs
diff --git a/src/interface/rust/ncti-orm/codegen/src/utils/db.rs b/interface/rust/ncti-orm/codegen/src/utils/db.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/codegen/src/utils/db.rs
rename to interface/rust/ncti-orm/codegen/src/utils/db.rs
diff --git a/src/interface/rust/ncti-orm/codegen/src/utils/error.rs b/interface/rust/ncti-orm/codegen/src/utils/error.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/codegen/src/utils/error.rs
rename to interface/rust/ncti-orm/codegen/src/utils/error.rs
diff --git a/src/interface/rust/ncti-orm/codegen/src/utils/mod.rs b/interface/rust/ncti-orm/codegen/src/utils/mod.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/codegen/src/utils/mod.rs
rename to interface/rust/ncti-orm/codegen/src/utils/mod.rs
diff --git a/src/interface/rust/ncti-orm/codegen/src/utils/model.rs b/interface/rust/ncti-orm/codegen/src/utils/model.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/codegen/src/utils/model.rs
rename to interface/rust/ncti-orm/codegen/src/utils/model.rs
diff --git a/src/interface/rust/ncti-orm/codegen/src/utils/result.rs b/interface/rust/ncti-orm/codegen/src/utils/result.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/codegen/src/utils/result.rs
rename to interface/rust/ncti-orm/codegen/src/utils/result.rs
diff --git a/src/interface/rust/ncti-orm/codegen/src/utils/string_util.rs b/interface/rust/ncti-orm/codegen/src/utils/string_util.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/codegen/src/utils/string_util.rs
rename to interface/rust/ncti-orm/codegen/src/utils/string_util.rs
diff --git a/src/interface/rust/ncti-orm/interface/Cargo.toml b/interface/rust/ncti-orm/interface/Cargo.toml
similarity index 100%
rename from src/interface/rust/ncti-orm/interface/Cargo.toml
rename to interface/rust/ncti-orm/interface/Cargo.toml
diff --git a/src/interface/rust/ncti-orm/interface/README.md b/interface/rust/ncti-orm/interface/README.md
similarity index 100%
rename from src/interface/rust/ncti-orm/interface/README.md
rename to interface/rust/ncti-orm/interface/README.md
diff --git a/src/interface/rust/ncti-orm/interface/build.rs b/interface/rust/ncti-orm/interface/build.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/interface/build.rs
rename to interface/rust/ncti-orm/interface/build.rs
diff --git a/src/interface/rust/ncti-orm/interface/src/data.rs b/interface/rust/ncti-orm/interface/src/data.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/interface/src/data.rs
rename to interface/rust/ncti-orm/interface/src/data.rs
diff --git a/src/interface/rust/ncti-orm/interface/src/func.rs b/interface/rust/ncti-orm/interface/src/func.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/interface/src/func.rs
rename to interface/rust/ncti-orm/interface/src/func.rs
diff --git a/src/interface/rust/ncti-orm/interface/src/lib.rs b/interface/rust/ncti-orm/interface/src/lib.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/interface/src/lib.rs
rename to interface/rust/ncti-orm/interface/src/lib.rs
diff --git a/src/interface/rust/ncti-orm/macro_driver/Cargo.toml b/interface/rust/ncti-orm/macro_driver/Cargo.toml
similarity index 100%
rename from src/interface/rust/ncti-orm/macro_driver/Cargo.toml
rename to interface/rust/ncti-orm/macro_driver/Cargo.toml
diff --git a/src/interface/rust/ncti-orm/macro_driver/src/lib.rs b/interface/rust/ncti-orm/macro_driver/src/lib.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/macro_driver/src/lib.rs
rename to interface/rust/ncti-orm/macro_driver/src/lib.rs
diff --git a/src/interface/rust/ncti-orm/macro_driver/src/main.rs b/interface/rust/ncti-orm/macro_driver/src/main.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/macro_driver/src/main.rs
rename to interface/rust/ncti-orm/macro_driver/src/main.rs
diff --git a/src/interface/rust/ncti-orm/ncti_orm.iml b/interface/rust/ncti-orm/ncti_orm.iml
similarity index 100%
rename from src/interface/rust/ncti-orm/ncti_orm.iml
rename to interface/rust/ncti-orm/ncti_orm.iml
diff --git a/src/interface/rust/ncti-orm/rbs/Cargo.toml b/interface/rust/ncti-orm/rbs/Cargo.toml
similarity index 100%
rename from src/interface/rust/ncti-orm/rbs/Cargo.toml
rename to interface/rust/ncti-orm/rbs/Cargo.toml
diff --git a/src/interface/rust/ncti-orm/rbs/src/index.rs b/interface/rust/ncti-orm/rbs/src/index.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/rbs/src/index.rs
rename to interface/rust/ncti-orm/rbs/src/index.rs
diff --git a/src/interface/rust/ncti-orm/rbs/src/lib.rs b/interface/rust/ncti-orm/rbs/src/lib.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/rbs/src/lib.rs
rename to interface/rust/ncti-orm/rbs/src/lib.rs
diff --git a/src/interface/rust/ncti-orm/rbs/src/main.rs b/interface/rust/ncti-orm/rbs/src/main.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/rbs/src/main.rs
rename to interface/rust/ncti-orm/rbs/src/main.rs
diff --git a/src/interface/rust/ncti-orm/rbs/src/value/ext/de.rs b/interface/rust/ncti-orm/rbs/src/value/ext/de.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/rbs/src/value/ext/de.rs
rename to interface/rust/ncti-orm/rbs/src/value/ext/de.rs
diff --git a/src/interface/rust/ncti-orm/rbs/src/value/ext/mod.rs b/interface/rust/ncti-orm/rbs/src/value/ext/mod.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/rbs/src/value/ext/mod.rs
rename to interface/rust/ncti-orm/rbs/src/value/ext/mod.rs
diff --git a/src/interface/rust/ncti-orm/rbs/src/value/ext/se.rs b/interface/rust/ncti-orm/rbs/src/value/ext/se.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/rbs/src/value/ext/se.rs
rename to interface/rust/ncti-orm/rbs/src/value/ext/se.rs
diff --git a/src/interface/rust/ncti-orm/rbs/src/value/map.rs b/interface/rust/ncti-orm/rbs/src/value/map.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/rbs/src/value/map.rs
rename to interface/rust/ncti-orm/rbs/src/value/map.rs
diff --git a/src/interface/rust/ncti-orm/rbs/src/value/mod.rs b/interface/rust/ncti-orm/rbs/src/value/mod.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/rbs/src/value/mod.rs
rename to interface/rust/ncti-orm/rbs/src/value/mod.rs
diff --git a/src/interface/rust/ncti-orm/src/crud.rs b/interface/rust/ncti-orm/src/crud.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/src/crud.rs
rename to interface/rust/ncti-orm/src/crud.rs
diff --git a/src/interface/rust/ncti-orm/src/db.rs b/interface/rust/ncti-orm/src/db.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/src/db.rs
rename to interface/rust/ncti-orm/src/db.rs
diff --git a/src/interface/rust/ncti-orm/src/lib.rs b/interface/rust/ncti-orm/src/lib.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/src/lib.rs
rename to interface/rust/ncti-orm/src/lib.rs
diff --git a/src/interface/rust/ncti-orm/src/main.rs b/interface/rust/ncti-orm/src/main.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/src/main.rs
rename to interface/rust/ncti-orm/src/main.rs
diff --git a/src/interface/rust/ncti-orm/src/trails.rs b/interface/rust/ncti-orm/src/trails.rs
similarity index 100%
rename from src/interface/rust/ncti-orm/src/trails.rs
rename to interface/rust/ncti-orm/src/trails.rs
diff --git a/src/interface/sqlite3_api_wrapper/CMakeLists.txt b/interface/sqlite3_api_wrapper/CMakeLists.txt
similarity index 94%
rename from src/interface/sqlite3_api_wrapper/CMakeLists.txt
rename to interface/sqlite3_api_wrapper/CMakeLists.txt
index 55401b7e87b9adfd55f0f8d7c4140b475a6aa77a..119f5d4bafb9bb3f2cf953935a421a320d425b55 100644
--- a/src/interface/sqlite3_api_wrapper/CMakeLists.txt
+++ b/interface/sqlite3_api_wrapper/CMakeLists.txt
@@ -8,6 +8,7 @@ include_directories(${INTARKDB_SRC_PATH})
include_directories(${INTARKDB_GSTOR_INC_PATH})
include_directories(${INTARKDB_SECUREC_INC_PATH})
include_directories(${INTARKDB_COMPUTE_SQL_INC_PATH})
+include_directories(${INTARKDB_HOME}/interface/c)
link_directories(${INTARKDB_THRID_LIB_PATH})
diff --git a/src/interface/sqlite3_api_wrapper/include/sqlite3.h b/interface/sqlite3_api_wrapper/include/sqlite3.h
similarity index 100%
rename from src/interface/sqlite3_api_wrapper/include/sqlite3.h
rename to interface/sqlite3_api_wrapper/include/sqlite3.h
diff --git a/src/interface/sqlite3_api_wrapper/printf.c b/interface/sqlite3_api_wrapper/printf.c
similarity index 100%
rename from src/interface/sqlite3_api_wrapper/printf.c
rename to interface/sqlite3_api_wrapper/printf.c
diff --git a/src/interface/sqlite3_api_wrapper/sqlite3_api_wrapper.c b/interface/sqlite3_api_wrapper/sqlite3_api_wrapper.c
similarity index 99%
rename from src/interface/sqlite3_api_wrapper/sqlite3_api_wrapper.c
rename to interface/sqlite3_api_wrapper/sqlite3_api_wrapper.c
index af773c446bb1749878bf5a00dadd6e5d6a1796e7..a8eb94aeb78a4983772633be96832cd7ce45bf27 100644
--- a/src/interface/sqlite3_api_wrapper/sqlite3_api_wrapper.c
+++ b/interface/sqlite3_api_wrapper/sqlite3_api_wrapper.c
@@ -21,8 +21,8 @@
* -------------------------------------------------------------------------
*/
#include "include/sqlite3.h"
-#include "interface/c/intarkdb_sql.h"
-#include "storage/gstor/zekernel/common/cm_defs.h"
+#include "../../src/interface/c/intarkdb_sql.h"
+#include "../../src/storage/gstor/zekernel/common/cm_defs.h"
#include
#include
diff --git a/src/interface/sqlite3_api_wrapper/sqlite3_api_wrapper.cpp b/interface/sqlite3_api_wrapper/sqlite3_api_wrapper.cpp
similarity index 99%
rename from src/interface/sqlite3_api_wrapper/sqlite3_api_wrapper.cpp
rename to interface/sqlite3_api_wrapper/sqlite3_api_wrapper.cpp
index 6f0da8cbdc0320209894c6c8a9a95f3a23505e98..995a800997aa4cc1688e6ec3303ffbab8536bddf 100644
--- a/src/interface/sqlite3_api_wrapper/sqlite3_api_wrapper.cpp
+++ b/interface/sqlite3_api_wrapper/sqlite3_api_wrapper.cpp
@@ -22,7 +22,7 @@
*/
#include "include/intarkdb.h"
#include "include/sqlite3.h"
-#include "interface/c/intarkdb_sql.h"
+#include "intarkdb_sql.h"
#include "storage/gstor/zekernel/common/cm_defs.h"
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 5e06c348dd856ee53cdc660f146f5e728540f811..8b10c08a2fdb3eb73a5b0c73a7ae9184ac5b2434 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -2,7 +2,6 @@ MESSAGE(STATUS "INTARKDB CMAKE BUILD BEGIN")
add_subdirectory(storage)
add_subdirectory(compute)
-add_subdirectory(interface)
if (NOT ANDROID AND NOT MSVC)
add_subdirectory(network)
endif()
diff --git a/src/compute/kv/CMakeLists.txt b/src/compute/kv/CMakeLists.txt
index 8fa067d93bcb10dd3f20f51e367f1c3e74c4d9a9..7842c2c9e6e1e6a73c8c8c35e3354fa8f8dbd993 100644
--- a/src/compute/kv/CMakeLists.txt
+++ b/src/compute/kv/CMakeLists.txt
@@ -34,6 +34,7 @@ include_directories(${INTARKDB_SRC_PATH})
include_directories(${INTARKDB_ZEKERNEL_COMMON_INC_PATH})
include_directories(${INTARKDB_UTF8PROC_INC_PATH})
include_directories(${INTARKDB_CJSON_PATH})
+include_directories(${INTARKDB_HOME}/interface/c)
# kv_object
add_library(kv_object OBJECT ${KV_source})
diff --git a/src/compute/kv/intarkdb_kv-c.cpp b/src/compute/kv/intarkdb_kv-c.cpp
index 2ae46b7c8673953b4bd2e411edca6957c5da1374..df6f13f1f631cde786e729906aa0eb5e824d4cf6 100644
--- a/src/compute/kv/intarkdb_kv-c.cpp
+++ b/src/compute/kv/intarkdb_kv-c.cpp
@@ -26,7 +26,7 @@
#include "intarkdb_kv.hpp"
#include "compute/kv/kv_connection.h"
-#include "interface/c/intarkdb_sql.h"
+#include "intarkdb_sql.h"
struct DatabaseWrapper_kv {
std::shared_ptr instance;
diff --git a/src/compute/sql/CMakeLists.txt b/src/compute/sql/CMakeLists.txt
index 71b31f0f0cc291a5c1379a01d89032ee106e485d..03376aadef940ac463fafd3c5461fdc582585440 100644
--- a/src/compute/sql/CMakeLists.txt
+++ b/src/compute/sql/CMakeLists.txt
@@ -65,6 +65,7 @@ include_directories(${INTARKDB_CJSON_PATH})
include_directories(${INTARKDB_GMSSL_INC_PATH})
include_directories(${INTARKDB_ZLIB_INC_PATH})
include_directories(${INTARKDB_COMPUTE_TS_INC_PATH})
+include_directories(${INTARKDB_HOME}/interface/c)
# sql_object
add_library(sql_object OBJECT ${binder_source} ${catalog_source} ${main_source} ${common_source} ${execution_source} ${planner_source} ${data_source} ${type_source} ${function_source})
diff --git a/src/compute/sql/binder/bind_create.cpp b/src/compute/sql/binder/bind_create.cpp
index 45357111377eb5f261f7a5b1485df5ddce3dabe7..ab94042e0f88c1d9a7996ba7e983bd59682cde54 100644
--- a/src/compute/sql/binder/bind_create.cpp
+++ b/src/compute/sql/binder/bind_create.cpp
@@ -143,6 +143,7 @@ auto Binder::BindMultiColConstraint(const duckdb_libpgquery::PGConstraint &const
throw intarkdb::Exception(ExceptionType::NOT_IMPLEMENTED, "not support check constraint yet!");
}
case duckdb_libpgquery::PG_CONSTR_FOREIGN: {
+ throw intarkdb::Exception(ExceptionType::NOT_IMPLEMENTED, "not support references constraint yet!");
auto table_ref = BindRangeVar(*NullCheckPtrCast(constraint.pktable), false);
auto base_table = static_cast(table_ref.get());
auto pk_user = base_table->GetSchema();
diff --git a/src/compute/sql/main/capi/intarkdb_sql-c.cpp b/src/compute/sql/main/capi/intarkdb_sql-c.cpp
index e5df65c205dd63b375c1c9abee93c00c082fa569..d63abc74731e0f73f26504d6a450b787af4af9df 100644
--- a/src/compute/sql/main/capi/intarkdb_sql-c.cpp
+++ b/src/compute/sql/main/capi/intarkdb_sql-c.cpp
@@ -25,7 +25,7 @@
#include "main/connection.h"
#include "type/type_str.h"
#include "common/exception.h"
-#include "interface/c/intarkdb_sql.h"
+#include "intarkdb_sql.h"
const int32_t DATE_FMT_YYYYMMDDHHMISS_LEN = 19;
diff --git a/src/compute/sql/test/c_api_test.cpp b/src/compute/sql/test/c_api_test.cpp
index 6a3966bd3da4c42879a4db297672c63faf63f445..3210384dbfeede9a53127c3836752196f94d49e3 100644
--- a/src/compute/sql/test/c_api_test.cpp
+++ b/src/compute/sql/test/c_api_test.cpp
@@ -31,7 +31,7 @@
#include "main/connection.h"
#include "main/database.h"
-#include "interface/c/intarkdb_sql.h"
+#include "intarkdb_sql.h"
class CApiTest : public ::testing::Test {
protected:
diff --git a/src/include/intarkdb.h b/src/include/intarkdb.h
index 7394aa38345c03542d191463bad274d790b2445e..55ab6ed8acd7fec31241d7e32481f225f4aacb5d 100644
--- a/src/include/intarkdb.h
+++ b/src/include/intarkdb.h
@@ -24,7 +24,6 @@
#ifndef __C_API_INTARKDB_H__
#define __C_API_INTARKDB_H__
-#include "interface/c/intarkdb_sql.h"
#include "compute/kv/intarkdb_kv.h"
#include "compute/sql/include/common/winapi.h"
#include "../VERSION.h"
diff --git a/src/interface/CMakeLists.txt b/src/interface/CMakeLists.txt
deleted file mode 100644
index 773f4d8b43a0a0fc73c1a0faee14b5b5c54a8156..0000000000000000000000000000000000000000
--- a/src/interface/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-MESSAGE(STATUS "INSTARDB-INTERFACE CMAKE BUILD BEGIN")
-
-add_subdirectory(c)
-add_subdirectory(sqlite3_api_wrapper)
-#add_subdirectory(jdbc)
diff --git a/src/interface/go/api/.gitignore b/src/interface/go/api/.gitignore
deleted file mode 100644
index 2fb9c39fef4f0ca5aaeb2c9596466f29b2026460..0000000000000000000000000000000000000000
--- a/src/interface/go/api/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-go.sum
-intarkdb/
diff --git a/src/interface/go/api/go.mod b/src/interface/go/api/go.mod
deleted file mode 100644
index fbfbaa0001c324ec47fcd2e9e819ebd725578cd6..0000000000000000000000000000000000000000
--- a/src/interface/go/api/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module api
-
-go 1.18
diff --git a/src/interface/go/api/intarkdb_interface/include/intarkdb.h b/src/interface/go/api/intarkdb_interface/include/intarkdb.h
deleted file mode 100644
index 4c818ff4d5e3354ac3655accddf61c1e449386e6..0000000000000000000000000000000000000000
--- a/src/interface/go/api/intarkdb_interface/include/intarkdb.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
-* Copyright (c) GBA-NCTI-ISDC. 2022-2024.
-*
-* openGauss embedded is licensed under Mulan PSL v2.
-* You can use this software according to the terms and conditions of the Mulan PSL v2.
-* You may obtain a copy of Mulan PSL v2 at:
-*
-* http://license.coscl.org.cn/MulanPSL2
-*
-* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
-* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
-* MERCHANTABILITY OR FITFOR A PARTICULAR PURPOSE.
-* See the Mulan PSL v2 for more details.
-* -------------------------------------------------------------------------
-*
-* intarkdb.h
-*
-* IDENTIFICATION
-* openGauss-embedded/src/interface/go/api/intarkdb_interface/include/intarkdb.h
-*
-* -------------------------------------------------------------------------
- */
-
-#pragma once
-#include
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-#ifdef WIN32
-#define EXP_SQL_API __declspec(dllexport)
-#define EXPORT_API __declspec(dllexport)
-#else
-#define EXP_SQL_API __attribute__((visibility("default")))
-#define EXPORT_API __attribute__((visibility("default")))
-#endif
-
-// sql
-typedef __int64_t int64_t;
-
-typedef struct st_api_text {
- char *str;
- int64_t len;
- int64_t data_type;
-} api_text_t;
-
-typedef struct st_result_column {
- char *data;
- int64_t data_len;
-} result_column;
-
-typedef struct st_result_row {
- int64_t column_count; //列数
- result_column* row_column_list; //行包含的列列表
- struct st_result_row* next;
-} result_row;
-
-typedef struct st_intarkdb_res_def {
- int64_t row_count; //行数
- bool is_select;
- void* res_row; //行结果集 //这里实际是 RecordBatch*
-
- int64_t column_count; //列数
- api_text_t* column_names; //列名
- char* msg; //执行结果信息
-
- char* value_ptr; // for free column value
- int64_t row_idx; // for next
-} intarkdb_res_def;
-
-typedef struct st_intarkdb_database {
- void* db;
-} *intarkdb_database;
-
-typedef struct st_intarkdb_connection {
- void* conn;
-} *intarkdb_connection;
-
-typedef enum en_status_def {
- SQL_ERROR = -1,
- SQL_SUCCESS = 0,
- SQL_TIMEDOUT = 1,
-} intarkdb_state_t;
-
-typedef struct st_intarkdb_res_def* intarkdb_result;
-
-EXP_SQL_API intarkdb_state_t intarkdb_open(const char *path, intarkdb_database *db);
-
-EXP_SQL_API void intarkdb_close(intarkdb_database *db);
-
-EXP_SQL_API intarkdb_state_t intarkdb_connect(intarkdb_database database, intarkdb_connection *conn);
-
-EXP_SQL_API void intarkdb_disconnect(intarkdb_connection *conn);
-
-EXP_SQL_API intarkdb_state_t intarkdb_query(intarkdb_connection connection, const char *query, intarkdb_result result);
-
-EXP_SQL_API intarkdb_result intarkdb_init_result();
-
-EXP_SQL_API int64_t intarkdb_row_count(intarkdb_result result);
-
-EXP_SQL_API int64_t intarkdb_column_count(intarkdb_result result);
-
-EXP_SQL_API const char * intarkdb_column_name(intarkdb_result result, int64_t col);
-
-EXP_SQL_API char * intarkdb_value_varchar(intarkdb_result result, int64_t row, int64_t col);
-
-EXP_SQL_API void intarkdb_free_row(intarkdb_result result);
-
-EXP_SQL_API void intarkdb_destroy_result(intarkdb_result result);
-
-// kv
-/* This is the reply object returned by redisCommand() */
-typedef __SIZE_TYPE__ size_t;
-typedef struct intarkdbReply_t {
- int type; /* return type */
- size_t len; /* Length of string */
- char *str; /* err or value*/
-} intarkdbReply;
-
-typedef enum en_status {
- GS_ERROR = -1,
- GS_SUCCESS = 0,
- GS_TIMEDOUT = 1,
- GS_IGNORE_OBJECT_EXISTS = 2,
- GS_FULL_CONN = 3, // 链接数已满
-} status_t;
-
-EXPORT_API status_t intarkdb_startup_db(int dbtype, char *path);
-EXPORT_API void intarkdb_shutdown_db(void);
-
-EXPORT_API status_t intarkdb_kv_begin(void *handle);
-EXPORT_API status_t intarkdb_kv_commit(void *handle);
-EXPORT_API status_t intarkdb_kv_rollback(void *handle);
-
-EXPORT_API status_t alloc_kv_handle(void **handle);
-EXPORT_API void free_kv_handle(void *handle);
-
-EXPORT_API status_t create_or_open_kv_table(void *handle, const char *table_name);
-
-EXPORT_API void intarkdb_freeReplyObject(intarkdbReply *reply);
-
-EXPORT_API void *intarkdb_command_set(void *handle, const char *key, const char *val);
-EXPORT_API void *intarkdb_command_get(void *handle, const char *key);
-EXPORT_API void *intarkdb_command_del(void *handle, const char *key, int prefix, int *count);
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/interface/go/api/intarkdb_interface/intarkdb_interface.go b/src/interface/go/api/intarkdb_interface/intarkdb_interface.go
deleted file mode 100644
index 82d810eb8d5e267bca316a007bc976d66ec76dda..0000000000000000000000000000000000000000
--- a/src/interface/go/api/intarkdb_interface/intarkdb_interface.go
+++ /dev/null
@@ -1,359 +0,0 @@
-/*
-* Copyright (c) GBA-NCTI-ISDC. 2022-2024.
-*
-* openGauss embedded is licensed under Mulan PSL v2.
-* You can use this software according to the terms and conditions of the Mulan PSL v2.
-* You may obtain a copy of Mulan PSL v2 at:
-*
-* http://license.coscl.org.cn/MulanPSL2
-*
-* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
-* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
-* MERCHANTABILITY OR FITFOR A PARTICULAR PURPOSE.
-* See the Mulan PSL v2 for more details.
-* -------------------------------------------------------------------------
-*
-* intarkdb_interface.go
-*
-* IDENTIFICATION
-* openGauss-embedded/src/interface/go/api/intarkdb_interface/intarkdb_interface.go
-*
-* -------------------------------------------------------------------------
- */
-
-package intarkdb_interface
-
-/*
-// 头文件的位置,相对于源文件是当前目录,所以是 .,头文件在多个目录时写多个 #cgo CFLAGS: ...
-#cgo CFLAGS: -I./include
-// 从哪里加载动态库,位置与文件名,-ladd 加载 libadd.so 文件
-// #cgo LDFLAGS: -L${SRCDIR}/lib -lintarkdb -Wl,-rpath=${SRCDIR}/lib
-#cgo LDFLAGS: -L${SRCDIR}/../../../../../output/release/lib -lintarkdb -Wl,-rpath=${SRCDIR}/../../../../../output/release/lib
-
-#include
-#include
-#include
-#include
-#include "include/intarkdb.h"
-*/
-import "C"
-import (
- "fmt"
- "unsafe"
-)
-
-type DBStatus int
-
-const (
- dbError DBStatus = -1
- dbSuccess DBStatus = 0
- dbTimeout DBStatus = 1
- ignoreObjectExists DBStatus = 2
- fullConn DBStatus = 3
-)
-
-var dbStatusTag = map[DBStatus]string{
- dbError: "error",
- dbSuccess: "success",
- dbTimeout: "timeout",
- ignoreObjectExists: "ignore object exists",
- fullConn: "full conn",
-}
-
-func StatusMessage(dbStatus DBStatus) string {
- return dbStatusTag[dbStatus]
-}
-
-type DBType int
-
-const (
- Intarkdb DBType = 0
- CEIL DBType = 1
-)
-
-const (
- SQL string = "sql"
- KV string = "kv"
- Quit string = "quit"
- OK string = "ok"
- Failed string = "failed"
-)
-
-type IntarkdbSQL struct {
- db C.intarkdb_database
- connection C.intarkdb_connection
- result C.intarkdb_result
-}
-
-type SQLInterface interface {
- IntarkdbOpen(path string) (err error)
- IntarkdbClose()
- IntarkdbConnect() (err error)
- IntarkdbDisconnect()
- IntarkdbInitResult() (err error)
- IntarkdbQuery(query string) (err error)
- IntarkdbRowCount() uint64
- IntarkdbColumnCount() uint64
- IntarkdbColumnName(col uint8) string
- IntarkdbValueVarchar(row, col uint8) string
- IntarkdbFreeRow()
- IntarkdbDestroyResult()
-}
-
-func (g *IntarkdbSQL) IntarkdbOpen(path string) (err error) {
- cPath := C.CString(path)
- defer C.free(unsafe.Pointer(cPath))
-
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_open(cPath, &g.db))
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("DB open %s", dbStatusTag[dbStatus])
- }
- return
-}
-
-func (g *IntarkdbSQL) IntarkdbClose() {
- C.intarkdb_close(&g.db)
- fmt.Println("DB close")
-}
-
-func (g *IntarkdbSQL) IntarkdbConnect() (err error) {
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_connect(g.db, &g.connection))
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("connection %s", dbStatusTag[dbStatus])
- }
- return
-}
-
-func (g *IntarkdbSQL) IntarkdbDisconnect() {
- C.intarkdb_disconnect(&g.connection)
- fmt.Println("connection close")
-}
-
-func (g *IntarkdbSQL) IntarkdbInitResult() (err error) {
- g.result = C.intarkdb_init_result()
- if g.result == nil {
- err = fmt.Errorf("intarkdb init result fail")
- }
- return
-}
-
-func (g *IntarkdbSQL) IntarkdbQuery(query string) (err error) {
- cQuery := C.CString(query)
- defer C.free(unsafe.Pointer(cQuery))
-
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_query(g.connection, cQuery, g.result))
- if dbStatus != dbSuccess {
- err = fmt.Errorf("intarkdb query %s", dbStatusTag[dbStatus])
- }
- return
-}
-
-func (g *IntarkdbSQL) IntarkdbRowCount() int64 {
- return int64(C.intarkdb_row_count(g.result))
-}
-
-func (g *IntarkdbSQL) IntarkdbColumnCount() int64 {
- return int64(C.intarkdb_column_count(g.result))
-}
-
-func (g *IntarkdbSQL) IntarkdbColumnName(col int64) string {
- return C.GoString(C.intarkdb_column_name(g.result, C.long(col)))
-}
-
-func (g *IntarkdbSQL) IntarkdbValueVarchar(row, col int64) string {
- return C.GoString(C.intarkdb_value_varchar(g.result, C.long(row), C.long(col)))
-}
-
-func (g *IntarkdbSQL) IntarkdbFreeRow() {
- C.intarkdb_free_row(g.result)
- fmt.Println("result free success")
-}
-
-func (g *IntarkdbSQL) IntarkdbDestroyResult() {
- C.intarkdb_destroy_result(g.result)
- fmt.Println("result destroy success")
-}
-
-type IntarkdbKV struct {
- handle unsafe.Pointer
-}
-
-type KVInterface interface {
- OpenDB(dbtype DBType, path string) (err error)
- CloseDB()
- AllocKVHandle() (err error)
- FreeKVHandle()
- CreateOrOpenKVTable(name string) (err error)
- Free(reply *C.intarkdbReply)
- Set(key, value string) (err error)
- Get(key string) (value string, err error)
- Del(key string, prefix int32) (err error)
- TransactionBegin() (err error)
- TransactionCommit() (err error)
- TransactionRollback() (err error)
-}
-
-func (g *IntarkdbKV) OpenDB(dbtype DBType, path string) (err error) {
- cPath := C.CString(path)
- defer C.free(unsafe.Pointer(cPath))
-
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_startup_db((C.int)(dbtype), cPath))
- if dbStatus != dbSuccess {
- err = fmt.Errorf("open db %s", dbStatusTag[dbStatus])
- }
- return
-}
-
-func (g *IntarkdbKV) CloseDB() {
- C.intarkdb_shutdown_db()
- fmt.Println("close db success!")
-}
-
-func (g *IntarkdbKV) AllocKVHandle() (err error) {
- dbStatus := dbError
- dbStatus = (DBStatus)(C.alloc_kv_handle(&(g.handle)))
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("alloc kv handle %s", dbStatusTag[dbStatus])
- }
- return
-}
-
-func (g *IntarkdbKV) FreeKVHandle() {
- C.free_kv_handle(g.handle)
- fmt.Println("alloc kv handle free success")
-}
-
-func (g *IntarkdbKV) CreateOrOpenKVTable(name string) (err error) {
- cName := C.CString(name)
- defer C.free(unsafe.Pointer(cName))
-
- dbStatus := dbError
- dbStatus = (DBStatus)(C.create_or_open_kv_table(g.handle, cName))
- if dbStatus != dbSuccess {
- err = fmt.Errorf("create or open kv table %s", dbStatusTag[dbStatus])
- }
- return
-}
-
-func (g *IntarkdbKV) Set(key, value string) (err error) {
- cKey := C.CString(key)
- cValue := C.CString(value)
- defer C.free(unsafe.Pointer(cKey))
- defer C.free(unsafe.Pointer(cValue))
- g.TransactionBegin()
- ptr := (*C.intarkdbReply)(C.intarkdb_command_set(g.handle, cKey, cValue))
- defer g.free(ptr)
-
- result := C.GoString(ptr.str)
- dbStatus := dbError
- dbStatus = (DBStatus)(ptr._type)
- if dbStatus == dbSuccess {
- if result == OK {
- fmt.Println("set key success")
- g.TransactionCommit()
- } else {
- dbStatus = dbError
- }
- }
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("set key %s", dbStatusTag[dbStatus])
-
- }
- return
-}
-
-func (g *IntarkdbKV) Get(key string) (value string, err error) {
- cKey := C.CString(key)
- defer C.free(unsafe.Pointer(cKey))
- g.TransactionBegin()
- ptr := (*C.intarkdbReply)(C.intarkdb_command_get(g.handle, cKey))
- defer g.free(ptr)
-
- value = C.GoString(ptr.str)
- dbStatus := dbError
- dbStatus = (DBStatus)(ptr._type)
- if dbStatus == dbSuccess {
- if value != "" {
- fmt.Println("get key success")
- g.TransactionCommit()
- } else {
- dbStatus = dbError
- }
- }
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("get key %s", dbStatusTag[dbStatus])
- }
-
- return
-}
-
-func (g *IntarkdbKV) Del(key string, prefix int32) (err error) {
- cKey := C.CString(key)
- defer C.free(unsafe.Pointer(cKey))
- var count int32 = -1
- cCount := (*C.int)(unsafe.Pointer(&count))
-
- g.TransactionBegin()
- ptr := (*C.intarkdbReply)(C.intarkdb_command_del(g.handle, cKey, C.int(prefix), cCount))
- defer g.free(ptr)
-
- dbStatus := dbError
- dbStatus = (DBStatus)(ptr._type)
- if dbStatus == dbSuccess {
- if count > 0 {
- fmt.Println("del key success")
- g.TransactionCommit()
- } else {
- dbStatus = dbError
- }
- }
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("del key %s", dbStatusTag[dbStatus])
- }
-
- return
-}
-
-func (g *IntarkdbKV) free(reply *C.intarkdbReply) {
- C.intarkdb_freeReplyObject(reply)
-}
-
-func (g *IntarkdbKV) TransactionBegin() (err error) {
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_kv_begin(g.handle))
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("transaction begin %s", dbStatusTag[dbStatus])
- }
- return
-}
-
-func (g *IntarkdbKV) TransactionCommit() (err error) {
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_kv_commit(g.handle))
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("transaction commit %s", dbStatusTag[dbStatus])
- }
- return
-}
-
-func (g *IntarkdbKV) TransactionRollback() (err error) {
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_kv_rollback(g.handle))
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("transaction rollback %s", dbStatusTag[dbStatus])
- }
- return
-}
diff --git a/src/interface/go/api/main.go b/src/interface/go/api/main.go
deleted file mode 100644
index d5a355245e0752e18d19c088e16d639080ce595a..0000000000000000000000000000000000000000
--- a/src/interface/go/api/main.go
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
-* Copyright (c) GBA-NCTI-ISDC. 2022-2024.
-*
-* openGauss embedded is licensed under Mulan PSL v2.
-* You can use this software according to the terms and conditions of the Mulan PSL v2.
-* You may obtain a copy of Mulan PSL v2 at:
-*
-* http://license.coscl.org.cn/MulanPSL2
-*
-* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
-* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
-* MERCHANTABILITY OR FITFOR A PARTICULAR PURPOSE.
-* See the Mulan PSL v2 for more details.
-* -------------------------------------------------------------------------
-*
-* main.go
-*
-* IDENTIFICATION
-* openGauss-embedded/src/interface/go/api/main.go
-*
-* -------------------------------------------------------------------------
- */
-
-package main
-
-import (
- "bufio"
- "flag"
- "fmt"
- "os"
- "strings"
-
- "api/intarkdb_interface"
-)
-
-var path string
-var operatorType string
-
-func init() {
- flag.StringVar(&path, "path", ".", "DB path")
- flag.StringVar(&operatorType, "type", "sql", "DB type : sql, kv")
-}
-
-func main() {
- flag.Parse()
- fmt.Println("[go] DB path:", path)
-
- switch operatorType {
- case intarkdb_interface.SQL:
- sql(path)
- case intarkdb_interface.KV:
- kv(path)
- default:
- fmt.Println("operatorType error, operatorType = ", operatorType)
- }
-}
-
-func kv(path string) {
- var intarkdb = intarkdb_interface.IntarkdbKV{}
- err := intarkdb.OpenDB(intarkdb_interface.Intarkdb, path)
- if err != nil {
- fmt.Println(err.Error())
- return
- }
- defer intarkdb.CloseDB()
-
- err = intarkdb.AllocKVHandle()
- if err != nil {
- fmt.Println(err.Error())
- return
- }
- defer intarkdb.FreeKVHandle()
-
- intarkdb.CreateOrOpenKVTable("SYS_KV")
-
- fmt.Println("example : set key value")
- fmt.Println("example : get key")
- fmt.Println("example : del key")
- fmt.Println("example : quit")
-
- scanner := bufio.NewScanner(os.Stdin)
- for {
- fmt.Print(">> ")
- scanner.Scan()
- query := scanner.Text()
- query = strings.TrimLeft(query, " ")
- query = strings.TrimRight(query, " ")
-
- if query == intarkdb_interface.Quit {
- break
- }
- querys := strings.Split(query, " ")
- if len(querys) > 1 {
- temp := querys[0]
- switch temp {
- case "set":
- if len(querys) == 3 {
- intarkdb.Set(querys[1], querys[2])
- }
- case "get":
- if len(querys) == 2 {
- value, err := intarkdb.Get(querys[1])
- if err == nil && value != "" {
- fmt.Println("get key =", querys[1], ", value =", value)
- }
- }
- case "del":
- if len(querys) == 2 {
- intarkdb.Del(querys[1], 0)
- }
- }
- }
- }
-}
-
-func sql(path string) {
- var intarkdb = intarkdb_interface.IntarkdbSQL{}
-
- err := intarkdb.IntarkdbOpen(path)
- if err != nil {
- fmt.Println(err.Error())
- return
- }
- defer intarkdb.IntarkdbClose()
-
- err = intarkdb.IntarkdbConnect()
- if err != nil {
- fmt.Println(err.Error())
- return
- }
- defer intarkdb.IntarkdbDisconnect()
-
- err = intarkdb.IntarkdbInitResult()
- if err != nil {
- fmt.Println(err.Error())
- return
- }
- defer intarkdb.IntarkdbDestroyResult()
-
- scanner := bufio.NewScanner(os.Stdin)
- for {
- fmt.Print(">> ")
- scanner.Scan()
- query := scanner.Text()
- query = strings.TrimLeft(query, " ")
- query = strings.TrimRight(query, " ")
-
- if query == intarkdb_interface.Quit {
- break
- }
-
- if !strings.HasSuffix(query, ";") {
- fmt.Println("sql needs ; end")
- continue
- }
- // if !(strings.HasPrefix(query, "CREATE") || strings.HasPrefix(query, "INSERT") || strings.HasPrefix(query, "SELECT") ||
- // strings.HasPrefix(query, "DELETE") || strings.HasPrefix(query, "UPDATE")) {
- // fmt.Println("sql supports CREATE,INSERT,DELETE,UPDATE,SELECT only")
- // continue
- // }
-
- querys := strings.Split(query, ";")
- for index := range querys {
- if querys[index] == "" {
- continue
- }
- err = intarkdb.IntarkdbQuery(querys[index])
- if err != nil {
- fmt.Println(err.Error())
- break
- }
- rowCount := intarkdb.IntarkdbRowCount()
- for row := int64(0); row < rowCount; row++ {
- columnCount := intarkdb.IntarkdbColumnCount()
- if row == 0 {
- fmt.Print(" ")
- for col := int64(0); col < columnCount; col++ {
- colName := intarkdb.IntarkdbColumnName(col)
- fmt.Print(colName, " ")
- }
- fmt.Println()
- }
- fmt.Print(row, " ")
- for col := int64(0); col < columnCount; col++ {
- colValue := intarkdb.IntarkdbValueVarchar(row, col)
- fmt.Print(colValue, " ")
- }
- fmt.Println()
- }
- intarkdb.IntarkdbFreeRow()
- }
- }
-}
diff --git a/src/interface/go/api/readme.md b/src/interface/go/api/readme.md
deleted file mode 100644
index 271cf93d530640e9da4ae9fcf8c16023c2e7f88d..0000000000000000000000000000000000000000
--- a/src/interface/go/api/readme.md
+++ /dev/null
@@ -1,56 +0,0 @@
-#####
-##go语言简易SQL客户端测试程序
-#####
-
-##
-##依赖说明
-依赖动态库
-gstor_sql.so
-gstor_dev_c.so
-gstor.so
-storage.so
-
-依赖头文件
-gstor_interface/include/gstor.h
-
-##
-##运行
-在当前目录下go run main.go 即可,也可以带目录运行: go run main.go -path database_dir (database_dir为gstor数据库目录)
-不带目录运行会在当前目录下创建gstor数据库目录
-
-##
-##当前功能支持
-1、创建表
-2、增删改查
-3、提交事务
-
-##
-##当前功能限制说明(当前SQL功能不完善、不稳定,不按下列约束操作可能会引起客户端程序崩溃)
-1、建表必须建主键
-2、插入需要指定所有字段插入
-3、查询只能按主键查、或者不带条件查,查询需要查询所有字段
-4、修改必须按主键修改,且需要按顺序修改所有字段(需要修改的字段前面的所有字段,后面的可以不带)
-5、删除必须按主键删除
-6、需要显式提交事务,暂不支持默认提交
-7、当前不支持复杂SQL语句,只支持单表操作
-8、当前只支持等值条件和AND条件
-
-##
-##测试案例
-##建表-建主键
-CREATE TABLE AAAA_TEST_TABLE(LOGID INTEGER, ACCTID INTEGER, CHGAMT VARCHAR(20), BAK1 VARCHAR(20), BAK2 VARCHAR(20), PRIMARY KEY (LOGID,ACCTID));
-##插入-指定所有字段
-INSERT INTO AAAA_TEST_TABLE(LOGID,ACCTID,CHGAMT,BAK1,BAK2) VALUES (1,11,'AAA','bak1','bak2'), (2,22,'BBB','bak1','bak2');
-##删除-按主键
-DELETE FROM AAAA_TEST_TABLE WHERE LOGID=1 and ACCTID=11;
-##修改-按主键,且按顺序指定修改字段
-UPDATE AAAA_TEST_TABLE set LOGID=2,ACCTID=22,CHGAMT='CCC' WHERE LOGID=2 and ACCTID=22;
-##查询1-按主键
-SELECT LOGID,ACCTID,CHGAMT,BAK1,BAK2 FROM AAAA_TEST_TABLE where LOGID=1 and ACCTID=11;
-##查询2-全表查询
-SELECT LOGID,ACCTID,CHGAMT,BAK1,BAK2 FROM AAAA_TEST_TABLE;
-
-######
-客户端输入的SQL语句以英文分号结束
-输入quit退出程序
-######
\ No newline at end of file
diff --git a/src/interface/go/apiv2/.gitignore b/src/interface/go/apiv2/.gitignore
deleted file mode 100644
index 2fb9c39fef4f0ca5aaeb2c9596466f29b2026460..0000000000000000000000000000000000000000
--- a/src/interface/go/apiv2/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-go.sum
-intarkdb/
diff --git a/src/interface/go/apiv2/go.mod b/src/interface/go/apiv2/go.mod
deleted file mode 100644
index b27e13561d80b3e5ead9438b72e139a8ecccd69c..0000000000000000000000000000000000000000
--- a/src/interface/go/apiv2/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module apiv2
-
-go 1.18
diff --git a/src/interface/go/apiv2/intarkdb_interface/include/intarkdb.h b/src/interface/go/apiv2/intarkdb_interface/include/intarkdb.h
deleted file mode 100644
index a0aaff72521810610a4ad5be0e4f096759371666..0000000000000000000000000000000000000000
--- a/src/interface/go/apiv2/intarkdb_interface/include/intarkdb.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Copyright (c) GBA-NCTI-ISDC. 2022-2024.
- *
- * openGauss embedded is licensed under Mulan PSL v2.
- * You can use this software according to the terms and conditions of the Mulan PSL v2.
- * You may obtain a copy of Mulan PSL v2 at:
- *
- * http://license.coscl.org.cn/MulanPSL2
- *
- * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
- * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITFOR A PARTICULAR PURPOSE.
- * See the Mulan PSL v2 for more details.
- * -------------------------------------------------------------------------
- *
- * intarkdb.h
- *
- * IDENTIFICATION
- * openGauss-embedded/src/interface/go/apiv2/intarkdb_interface/include/intarkdb.h
- *
- * -------------------------------------------------------------------------
- */
-
-#pragma once
-#include
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifdef WIN32
-#define EXP_SQL_API __declspec(dllexport)
-#define EXPORT_API __declspec(dllexport)
-#else
-#define EXP_SQL_API __attribute__((visibility("default")))
-#define EXPORT_API __attribute__((visibility("default")))
-#endif
-
-// sql
-typedef __int64_t int64_t;
-
-typedef struct st_api_text {
- char *str;
- int64_t len;
- int64_t data_type;
-} api_text_t;
-
-typedef struct st_result_column {
- char *data;
- int64_t data_len;
-} result_column;
-
-typedef struct st_result_row {
- int64_t column_count; // 列数
- result_column *row_column_list; // 行包含的列列表
- struct st_result_row *next;
-} result_row;
-
-typedef struct st_intarkdb_res_def {
- int64_t row_count; // 行数
- bool is_select;
- void *res_row; // 行结果集 //这里实际是 RecordBatch*
-
- int64_t column_count; // 列数
- api_text_t *column_names; // 列名
- char *msg; // 执行结果信息
-
- char *value_ptr; // for free column value
- int64_t row_idx; // for next
-} intarkdb_res_def;
-
-typedef struct st_intarkdb_database {
- void *db;
-} *intarkdb_database;
-
-typedef struct st_intarkdb_connection {
- void *conn;
-} *intarkdb_connection;
-
-typedef enum en_status_def {
- SQL_ERROR = -1,
- SQL_SUCCESS = 0,
- SQL_TIMEDOUT = 1,
-} intarkdb_state_t;
-
-typedef struct st_intarkdb_res_def *intarkdb_result;
-
-EXP_SQL_API intarkdb_state_t intarkdb_open(const char *path, intarkdb_database *db);
-
-EXP_SQL_API void intarkdb_close(intarkdb_database *db);
-
-EXP_SQL_API intarkdb_state_t intarkdb_connect(intarkdb_database database, intarkdb_connection *conn);
-
-EXP_SQL_API void intarkdb_disconnect(intarkdb_connection *conn);
-
-EXP_SQL_API intarkdb_state_t intarkdb_query(intarkdb_connection connection, const char *query, intarkdb_result result);
-
-EXP_SQL_API intarkdb_result intarkdb_init_result();
-
-EXP_SQL_API int64_t intarkdb_row_count(intarkdb_result result);
-
-EXP_SQL_API int64_t intarkdb_column_count(intarkdb_result result);
-
-EXP_SQL_API const char *intarkdb_column_name(intarkdb_result result, int64_t col);
-
-EXP_SQL_API char *intarkdb_value_varchar(intarkdb_result result, int64_t row, int64_t col);
-
-EXP_SQL_API void intarkdb_free_row(intarkdb_result result);
-
-EXP_SQL_API void intarkdb_destroy_result(intarkdb_result result);
-
-EXP_SQL_API const char * intarkdb_result_msg(intarkdb_result result);
-
-// kv
-/* This is the reply object returned by redisCommand() */
-typedef __SIZE_TYPE__ size_t;
-
-typedef struct st_intarkdb_connection_kv {
- void *conn;
-} *intarkdb_connection_kv;
-
-/* This is the reply object */
-typedef struct KvReply_t {
- int type; /* return type */
- size_t len; /* Length of string */
- char *str; /* err or value*/
-} KvReply;
-
-typedef enum en_status_kv {
- KV_ERROR = -1,
- KV_SUCCESS = 0,
-} intarkdb_state_kv;
-
-EXP_SQL_API int intarkdb_connect_kv(intarkdb_database database, intarkdb_connection_kv *kvconn);
-
-EXP_SQL_API void intarkdb_disconnect_kv(intarkdb_connection_kv *kvconn);
-
-EXP_SQL_API int intarkdb_open_table_kv(intarkdb_connection_kv kvconn, const char *table_name);
-
-EXP_SQL_API void *intarkdb_set(intarkdb_connection_kv kvconn, const char *key, const char *val);
-
-EXP_SQL_API void *intarkdb_get(intarkdb_connection_kv kvconn, const char *key);
-
-EXP_SQL_API void *intarkdb_del(intarkdb_connection_kv kvconn, const char *key);
-
-EXP_SQL_API intarkdb_state_kv intarkdb_begin(intarkdb_connection_kv kvconn);
-
-EXP_SQL_API intarkdb_state_kv intarkdb_commit(intarkdb_connection_kv kvconn);
-
-EXP_SQL_API intarkdb_state_kv intarkdb_rollback(intarkdb_connection_kv kvconn);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/interface/go/apiv2/intarkdb_interface/intarkdb_interface.go b/src/interface/go/apiv2/intarkdb_interface/intarkdb_interface.go
deleted file mode 100644
index 51794edb1d88b02d78b1e82ef0f1986cc4fba62b..0000000000000000000000000000000000000000
--- a/src/interface/go/apiv2/intarkdb_interface/intarkdb_interface.go
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
-* Copyright (c) GBA-NCTI-ISDC. 2022-2024.
-*
-* openGauss embedded is licensed under Mulan PSL v2.
-* You can use this software according to the terms and conditions of the Mulan PSL v2.
-* You may obtain a copy of Mulan PSL v2 at:
-*
-* http://license.coscl.org.cn/MulanPSL2
-*
-* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
-* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
-* MERCHANTABILITY OR FITFOR A PARTICULAR PURPOSE.
-* See the Mulan PSL v2 for more details.
-* -------------------------------------------------------------------------
-*
-* intarkdb_interface.go
-*
-* IDENTIFICATION
-* openGauss-embedded/src/interface/go/apiv2/intarkdb_interface/intarkdb_interface.go
-*
-* -------------------------------------------------------------------------
- */
-
-package intarkdb_interface
-
-/*
-// 头文件的位置,相对于源文件是当前目录,所以是 .,头文件在多个目录时写多个 #cgo CFLAGS: ...
-#cgo CFLAGS: -I./include
-// 从哪里加载动态库,位置与文件名,-ladd 加载 libadd.so 文件
-// #cgo LDFLAGS: -L${SRCDIR}/lib -lintarkdb -Wl,-rpath=${SRCDIR}/lib
-#cgo LDFLAGS: -L${SRCDIR}/../../../../../output/release/lib -lintarkdb -Wl,-rpath=${SRCDIR}/../../../../../output/release/lib
-
-#include
-#include
-#include
-#include
-#include "include/intarkdb.h"
-*/
-import "C"
-import (
- "fmt"
- "unsafe"
-)
-
-type DBStatus int
-
-const (
- dbError DBStatus = -1
- dbSuccess DBStatus = 0
- dbTimeout DBStatus = 1
- ignoreObjectExists DBStatus = 2
- fullConn DBStatus = 3
- notExist DBStatus = 10
-)
-
-var dbStatusTag = map[DBStatus]string{
- dbError: "error",
- dbSuccess: "success",
- dbTimeout: "timeout",
- ignoreObjectExists: "ignore object exists",
- fullConn: "full conn",
- notExist: "not exist",
-}
-
-func StatusMessage(dbStatus DBStatus) string {
- return dbStatusTag[dbStatus]
-}
-
-const (
- SQL string = "sql"
- KV string = "kv"
- Quit string = "quit"
- OK string = "ok"
- Failed string = "failed"
-)
-
-type Intarkdb struct {
- db C.intarkdb_database
-}
-
-type IntarkdbInterface interface {
- IntarkdbOpen(path string) (err error)
- IntarkdbClose()
-}
-
-func (g *Intarkdb) IntarkdbOpen(path string) (err error) {
- cPath := C.CString(path)
- defer C.free(unsafe.Pointer(cPath))
-
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_open(cPath, &g.db))
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("DB open %s", dbStatusTag[dbStatus])
- }
- return
-}
-
-func (g *Intarkdb) IntarkdbClose() {
- C.intarkdb_close(&g.db)
- fmt.Println("DB close")
-}
-
-type IntarkdbSQL struct {
- DB Intarkdb
- connection C.intarkdb_connection
- result C.intarkdb_result
-}
-
-type SQLInterface interface {
- IntarkdbConnect() (err error)
- IntarkdbDisconnect()
- IntarkdbInitResult() (err error)
- IntarkdbQuery(query string) (err error)
- IntarkdbRowCount() uint64
- IntarkdbColumnCount() uint64
- IntarkdbColumnName(col uint8) string
- IntarkdbValueVarchar(row, col uint8) string
- IntarkdbDestroyResult()
-}
-
-func (g *IntarkdbSQL) IntarkdbConnect() (err error) {
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_connect(g.DB.db, &g.connection))
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("sql connection %s", dbStatusTag[dbStatus])
- }
- return
-}
-
-func (g *IntarkdbSQL) IntarkdbDisconnect() {
- C.intarkdb_disconnect(&g.connection)
- fmt.Println("sql connection close")
-}
-
-func (g *IntarkdbSQL) IntarkdbInitResult() (err error) {
- g.result = C.intarkdb_init_result()
- if g.result == nil {
- err = fmt.Errorf("intarkdb init result fail")
- }
- return
-}
-
-func (g *IntarkdbSQL) IntarkdbQuery(query string) (err error) {
- cQuery := C.CString(query)
- defer C.free(unsafe.Pointer(cQuery))
-
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_query(g.connection, cQuery, g.result))
- if dbStatus != dbSuccess {
- err = fmt.Errorf("intarkdb query err : %s", C.GoString(C.intarkdb_result_msg(g.result)))
- }
- return
-}
-
-func (g *IntarkdbSQL) IntarkdbRowCount() int64 {
- return int64(C.intarkdb_row_count(g.result))
-}
-
-func (g *IntarkdbSQL) IntarkdbColumnCount() int64 {
- return int64(C.intarkdb_column_count(g.result))
-}
-
-func (g *IntarkdbSQL) IntarkdbColumnName(col int64) string {
- return C.GoString(C.intarkdb_column_name(g.result, C.long(col)))
-}
-
-func (g *IntarkdbSQL) IntarkdbValueVarchar(row, col int64) string {
- return C.GoString(C.intarkdb_value_varchar(g.result, C.long(row), C.long(col)))
-}
-
-func (g *IntarkdbSQL) IntarkdbDestroyResult() {
- C.intarkdb_destroy_result(g.result)
- fmt.Println("result destroy success")
-}
-
-type IntarkdbKV struct {
- DB Intarkdb
- connection C.intarkdb_connection_kv
-}
-
-type KVInterface interface {
- Connect() (err error)
- Disconnect()
- OpenTable(name string) (err error)
- Set(key, value string) (err error)
- Get(key string) (value string, err error)
- Del(key string) (err error)
- TransactionBegin() (err error)
- TransactionCommit() (err error)
- TransactionRollback() (err error)
-}
-
-func (g *IntarkdbKV) Connect() (err error) {
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_connect_kv(g.DB.db, &g.connection))
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("kv connection %s", dbStatusTag[dbStatus])
- }
- return
-}
-
-func (g *IntarkdbKV) Disconnect() {
- C.intarkdb_disconnect_kv(&g.connection)
- fmt.Println("kv connection close")
-}
-
-// 默认系统表,可通过OpenTable创建新的表
-func (g *IntarkdbKV) OpenTable(name string) (err error) {
- cName := C.CString(name)
- defer C.free(unsafe.Pointer(cName))
-
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_open_table_kv(g.connection, cName))
- if dbStatus != dbSuccess {
- err = fmt.Errorf("open kv table %s", dbStatusTag[dbStatus])
- }
- return
-}
-
-func (g *IntarkdbKV) Set(key, value string) (err error) {
- cKey := C.CString(key)
- cValue := C.CString(value)
- defer C.free(unsafe.Pointer(cKey))
- defer C.free(unsafe.Pointer(cValue))
-
- ptr := (*C.KvReply)(C.intarkdb_set(g.connection, cKey, cValue))
-
- dbStatus := dbError
- dbStatus = (DBStatus)(ptr._type)
- if dbStatus == dbSuccess {
- fmt.Println("set key success")
- } else {
- err = fmt.Errorf("set key %s %s", key, dbStatusTag[dbStatus])
- }
-
- return
-}
-
-func (g *IntarkdbKV) Get(key string) (value string, err error) {
- cKey := C.CString(key)
- defer C.free(unsafe.Pointer(cKey))
-
- ptr := (*C.KvReply)(C.intarkdb_get(g.connection, cKey))
-
- value = C.GoString(ptr.str)
- dbStatus := dbError
- dbStatus = (DBStatus)(ptr._type)
- if dbStatus == dbSuccess && value != "" {
- fmt.Println("get key success")
- } else {
- if value == "" {
- dbStatus = notExist
- }
- err = fmt.Errorf("get key %s %s", key, dbStatusTag[dbStatus])
- }
-
- return
-}
-
-func (g *IntarkdbKV) Del(key string) (err error) {
- cKey := C.CString(key)
- defer C.free(unsafe.Pointer(cKey))
-
- ptr := (*C.KvReply)(C.intarkdb_del(g.connection, cKey))
-
- dbStatus := dbError
- dbStatus = (DBStatus)(ptr._type)
- if dbStatus == dbSuccess {
- fmt.Println("del key success")
- } else {
- err = fmt.Errorf("del key %s %s", key, dbStatusTag[dbStatus])
- }
-
- return
-}
-
-func (g *IntarkdbKV) TransactionBegin() (err error){
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_begin(g.connection))
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("kv TransactionBegin %s", dbStatusTag[dbStatus])
- }
-
- return
-}
-
-func (g *IntarkdbKV) TransactionCommit() (err error){
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_commit(g.connection))
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("kv TransactionCommit %s", dbStatusTag[dbStatus])
- }
-
- return
-}
-
-func (g *IntarkdbKV) TransactionRollback() (err error){
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_rollback(g.connection))
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("kv TransactionRollback %s", dbStatusTag[dbStatus])
- }
-
- return
-}
diff --git a/src/interface/go/apiv2/main.go b/src/interface/go/apiv2/main.go
deleted file mode 100644
index 60f856539ff5641542c128621b5ddd73477ae925..0000000000000000000000000000000000000000
--- a/src/interface/go/apiv2/main.go
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
-* Copyright (c) GBA-NCTI-ISDC. 2022-2024.
-*
-* openGauss embedded is licensed under Mulan PSL v2.
-* You can use this software according to the terms and conditions of the Mulan PSL v2.
-* You may obtain a copy of Mulan PSL v2 at:
-*
-* http://license.coscl.org.cn/MulanPSL2
-*
-* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
-* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
-* MERCHANTABILITY OR FITFOR A PARTICULAR PURPOSE.
-* See the Mulan PSL v2 for more details.
-* -------------------------------------------------------------------------
-*
-* main.go
-*
-* IDENTIFICATION
-* openGauss-embedded/src/interface/go/apiv2/main.go
-*
-* -------------------------------------------------------------------------
- */
-
-package main
-
-import (
- "flag"
- "fmt"
-
- "apiv2/intarkdb_interface"
-)
-
-var path string
-
-func init() {
- flag.StringVar(&path, "path", ".", "DB path")
-}
-
-func main() {
- flag.Parse()
- fmt.Println("[go] DB path:", path)
-
- var intarkdb intarkdb_interface.Intarkdb
- err := intarkdb.IntarkdbOpen(path)
- if err != nil {
- fmt.Println(err)
- return
- }
-
- done := make(chan bool, 2)
-
- go kv(intarkdb, done)
- go sql(intarkdb, done)
-
- <-done
- <-done
-
- intarkdb.IntarkdbClose()
-}
-
-func kv(intarkdb intarkdb_interface.Intarkdb, done chan<- bool) {
- defer func() {
- done <- true
- }()
-
- var intarkdbKV intarkdb_interface.IntarkdbKV
- intarkdbKV.DB = intarkdb
- var err error
-
- err = intarkdbKV.Connect()
- if err != nil {
- fmt.Println(err.Error())
- return
- }
- defer intarkdbKV.Disconnect()
-
- // 调用OpenTable可生成指定表,否则默认系统表
- err = intarkdbKV.OpenTable("t_test")
- if err != nil {
- fmt.Println(err.Error())
- return
- }
-
- key1 := "one"
- value1 := "111"
-
- err = intarkdbKV.Set(key1, value1)
- if err != nil {
- fmt.Println(err.Error())
- return
- }
-
- var str string
- str, err = intarkdbKV.Get(key1)
- if err != nil {
- fmt.Println(err.Error())
- return
- } else {
- fmt.Println("get ", key1, " value ", str)
- }
-
- err = intarkdbKV.Del(key1)
- if err != nil {
- fmt.Println(err.Error())
- return
- }
-
- intarkdbKV.TransactionBegin()
- intarkdbKV.Set("begin1", "begin1")
- intarkdbKV.Set("begin2", "begin2")
- err = intarkdbKV.TransactionCommit()
- if err != nil {
- fmt.Println("commit err")
- intarkdbKV.TransactionRollback()
- }
-
- intarkdbKV.TransactionBegin()
- intarkdbKV.Set("begin3", "begin3")
- intarkdbKV.Set("begin4", "begin4")
- intarkdbKV.TransactionRollback()
-}
-
-func sql(intarkdb intarkdb_interface.Intarkdb, done chan<- bool) {
- defer func() {
- done <- true
- }()
-
- var intarkdbSQL intarkdb_interface.IntarkdbSQL
- intarkdbSQL.DB = intarkdb
-
- err := intarkdbSQL.IntarkdbConnect()
- if err != nil {
- fmt.Println(err.Error())
- return
- }
- defer intarkdbSQL.IntarkdbDisconnect()
-
- err = intarkdbSQL.IntarkdbInitResult()
- if err != nil {
- fmt.Println(err.Error())
- return
- }
- defer intarkdbSQL.IntarkdbDestroyResult()
-
- create := "create table sql_test(id integer, name varchar(20));"
- insert := "insert into sql_test values(1,'one'),(2,'two');"
- query := "select * from sql_test;"
-
- err = intarkdbSQL.IntarkdbQuery(create)
- if err != nil {
- fmt.Println(err.Error())
- return
- }
-
- err = intarkdbSQL.IntarkdbQuery(insert)
- if err != nil {
- fmt.Println(err.Error())
- return
- }
-
- err = intarkdbSQL.IntarkdbQuery(query)
- if err != nil {
- fmt.Println(err.Error())
- return
- } else {
- rowCount := intarkdbSQL.IntarkdbRowCount()
- for row := int64(0); row < rowCount; row++ {
- columnCount := intarkdbSQL.IntarkdbColumnCount()
- if row == 0 {
- fmt.Print(" ")
- for col := int64(0); col < columnCount; col++ {
- colName := intarkdbSQL.IntarkdbColumnName(col)
- fmt.Print(colName, " ")
- }
- fmt.Println()
- }
- fmt.Print(row, " ")
- for col := int64(0); col < columnCount; col++ {
- colValue := intarkdbSQL.IntarkdbValueVarchar(row, col)
- fmt.Print(colValue, " ")
- }
- fmt.Println()
- }
- }
-
-}
diff --git a/src/interface/go/apiv2/readme.md b/src/interface/go/apiv2/readme.md
deleted file mode 100644
index a7a27fa7620faa6a1a9d5d804cf67cda211fe7cb..0000000000000000000000000000000000000000
--- a/src/interface/go/apiv2/readme.md
+++ /dev/null
@@ -1,18 +0,0 @@
-#####
-##intarkdb go语言接口
-#####
-
-##
-##依赖说明
-依赖头文件
-intarkdb_interface/include/intarkdb.h
-
-##
-##运行
-在当前目录下go run main.go 即可,也可以带目录运行: go run main.go -path database_dir (database_dir为intarkdb数据库目录)
-不带目录运行会在当前目录下创建intarkdb数据库目录
-
-######
-客户端输入的SQL语句以英文分号结束
-输入quit退出程序
-######
\ No newline at end of file
diff --git a/src/interface/go/gorm/.gitignore b/src/interface/go/gorm/.gitignore
deleted file mode 100644
index b54ee55cb31ae532cdd9a4c6107f975c5fda8791..0000000000000000000000000000000000000000
--- a/src/interface/go/gorm/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-go.sum
-intarkdb/
-mydatabase.db
diff --git a/src/interface/go/gorm/go.mod b/src/interface/go/gorm/go.mod
deleted file mode 100644
index 9f701b8308bd8f8cdac456d15fff66b4a550987a..0000000000000000000000000000000000000000
--- a/src/interface/go/gorm/go.mod
+++ /dev/null
@@ -1,14 +0,0 @@
-module go-api
-
-go 1.18
-
-require (
- gorm.io/driver/sqlite v1.5.4
- gorm.io/gorm v1.25.5
-)
-
-require (
- github.com/jinzhu/inflection v1.0.0 // indirect
- github.com/jinzhu/now v1.1.5 // indirect
- github.com/mattn/go-sqlite3 v1.14.17 // indirect
-)
diff --git a/src/interface/go/gorm/main.go b/src/interface/go/gorm/main.go
deleted file mode 100644
index 27f3af2f7caec713421153900cfe144af4a641f6..0000000000000000000000000000000000000000
--- a/src/interface/go/gorm/main.go
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
-* Copyright (c) GBA-NCTI-ISDC. 2022-2024.
-*
-* openGauss embedded is licensed under Mulan PSL v2.
-* You can use this software according to the terms and conditions of the Mulan PSL v2.
-* You may obtain a copy of Mulan PSL v2 at:
-*
-* http://license.coscl.org.cn/MulanPSL2
-*
-* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
-* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
-* MERCHANTABILITY OR FITFOR A PARTICULAR PURPOSE.
-* See the Mulan PSL v2 for more details.
-* -------------------------------------------------------------------------
-*
-* main.go
-*
-* IDENTIFICATION
-* openGauss-embedded/src/interface/go/gorm/main.go
-*
-* -------------------------------------------------------------------------
- */
-
-package main
-
-import (
- "fmt"
- "go-api/orm-driver/intarkdb"
- "log"
- "os"
- "time"
-
- "gorm.io/driver/sqlite"
- "gorm.io/gorm"
- "gorm.io/gorm/logger"
-)
-
-type Left struct {
- RightCode int32
- Name string
-}
-
-type Right struct {
- Code int32
- Content string
-}
-
-type LeftRight struct {
- Left
- Right
-}
-
-func runSqlite() {
- db, _ := gorm.Open(sqlite.Open("mydatabase.db"), &gorm.Config{})
-
- setLog(db)
-
- initTableAndData(db)
-
- joinTable(db)
-
- limitOffset(db)
-
- orderBy(db)
-
- groupByAndFunc(db)
-
- operator(db)
-}
-
-func runIntarkDB() {
- db, err := gorm.Open(intarkdb.Open("."))
- if err != nil {
- panic("failed to connect database")
- }
-
- setLog(db)
-
- initTableAndData(db)
-
- joinTable(db)
-
- limitOffset(db)
-
- orderBy(db)
-
- groupByAndFunc(db)
-
- operator(db)
-
- tagAndType(db)
-
- compoundIndex(db)
-}
-
-func setLog(db *gorm.DB) {
- newLogger := logger.New(
- log.New(os.Stdout, "\r\n", log.LstdFlags),
- logger.Config{
- LogLevel: logger.Info, // 设置日志级别为 Info,打印 SQL 语句
- },
- )
- db.Logger = newLogger
-}
-
-func initTableAndData(db *gorm.DB) {
- db.Migrator().DropTable("lefts")
- db.Migrator().DropTable("rights")
-
- db.AutoMigrate(&Left{})
- db.AutoMigrate(&Right{})
-
- left := Left{RightCode: 1, Name: "l1"}
- right := Right{Code: 1, Content: "r1"}
-
- db.Create(&left)
-
- left.RightCode = 2
- db.Create(&left)
- left.RightCode = 3
- db.Create(&left)
-
- left.RightCode = 4
- left.Name = "l2"
- db.Create(&left)
-
- left.RightCode = 5
- db.Create(&left)
- left.RightCode = 6
- db.Create(&left)
-
- left.RightCode = 7
- left.Name = "l3"
- db.Create(&left)
-
- left.RightCode = 8
- db.Create(&left)
- left.RightCode = 9
- db.Create(&left)
-
- db.Create(&right)
- right.Code = 2
- right.Content = "r2"
- db.Create(&right)
-}
-
-// 连表(inner join、left join)
-// count计数
-func joinTable(db *gorm.DB) {
- var count int64
- var leftRights []LeftRight
- // left join
- db.Table("lefts").Select("*").Joins("LEFT JOIN rights ON lefts.right_code = rights.code").Find(&leftRights).Count(&count)
- fmt.Printf("count = %d, LeftRight len = %d\n", count, len(leftRights))
- for _, v := range leftRights {
- fmt.Println(v)
- }
-
- db.Table("lefts").Select("*").Joins("LEFT JOIN rights ON lefts.right_code = ?", 1).Find(&leftRights).Count(&count)
- fmt.Printf("count = %d, LeftRight len = %d\n", count, len(leftRights))
- for _, v := range leftRights {
- fmt.Println(v)
- }
-
- db.Table("lefts").Select("*").Joins("LEFT JOIN rights ON lefts.name = ?", "l1").Find(&leftRights).Count(&count)
- fmt.Printf("count = %d, LeftRight len = %d\n", count, len(leftRights))
- for _, v := range leftRights {
- fmt.Println(v)
- }
-
- // inner join
- db.Model(&Left{}).Select("*").InnerJoins("INNER JOIN rights ON lefts.right_code = rights.code").Find(&leftRights).Count(&count)
- fmt.Printf("count = %d, LeftRight len = %d\n", count, len(leftRights))
- for _, v := range leftRights {
- fmt.Println(v)
- }
-
- db.Model(&Left{}).Select("*").InnerJoins("INNER JOIN rights ON rights.code = ?", 1).Find(&leftRights).Count(&count)
- fmt.Printf("count = %d, LeftRight len = %d\n", count, len(leftRights))
- for _, v := range leftRights {
- fmt.Println(v)
- }
-
- db.Model(&Left{}).Select("*").InnerJoins("INNER JOIN rights ON rights.content = ?", "r1").Find(&leftRights).Count(&count)
- fmt.Printf("count = %d, LeftRight len = %d\n", count, len(leftRights))
- for _, v := range leftRights {
- fmt.Println(v)
- }
-}
-
-// 分页(limit、offset)
-func limitOffset(db *gorm.DB) {
- var lefts []Left
- db.Table("lefts").Select("*").Limit(2).Offset(1).Find(&lefts)
- for _, v := range lefts {
- fmt.Println(v)
- }
-}
-
-// 排序(order by)
-func orderBy(db *gorm.DB) {
- var lefts []Left
- db.Table("lefts").Select("*").Order("right_code desc").Order("name").Find(&lefts)
- for _, v := range lefts {
- fmt.Println(v)
- }
-
- db.Table("lefts").Select("*").Order("right_code asc").Order("name").Find(&lefts)
- for _, v := range lefts {
- fmt.Println(v)
- }
-}
-
-type Result struct {
- Name string
- Total int64
- Maximum int64
- Minimum int64
- Average float64
-}
-
-// 分组(group by)
-// 内置函数:sum、avg、max、min
-func groupByAndFunc(db *gorm.DB) {
- var results []Result
- db.Table("lefts").Select("name, sum(right_code) as total, max(right_code) as maximum, min(right_code) as minimum, avg(right_code) as average").Group("name").Find(&results)
- for _, v := range results {
- fmt.Printf("name = %s, sum = %d, max = %d, min = %d, avg = %f\n", v.Name, v.Total, v.Maximum, v.Minimum, v.Average)
- }
-}
-
-// 运算符:=、<>、>、<、>=、<=、in、not in、and、or、( )
-func operator(db *gorm.DB) {
- var lefts []Left
- rightCodes := []uint{1, 4, 7}
- names := []string{"l2", "l3"}
- db.Table("lefts").Select("*").Where("right_code in (?) and name not in (?)", rightCodes, names).Find(&lefts)
- for _, v := range lefts {
- fmt.Println(v)
- }
-
- db.Table("lefts").Select("*").Where("(right_code > ? and right_code < ?) or (right_code >= ? and right_code <= ?)", 1, 3, 4, 6).Find(&lefts)
- for _, v := range lefts {
- fmt.Println(v)
- }
-
- db.Table("lefts").Select("*").Where("right_code <> ? and name = ? ", 1, "l1").Find(&lefts)
- for _, v := range lefts {
- fmt.Println(v)
- }
-}
-
-type TagAndType struct {
- ID int32 `gorm:"primaryKey;autoIncrement"`
- BigIntField int64 `gorm:"column:big_int_field;default:100;unique"`
- UintField uint32 `gorm:"column:uint_field"`
- BigUintField uint64 `gorm:"column:big_uint_field"`
- CharField string `gorm:"column:char_field;type:char(20);not null"`
- VarcharField string `gorm:"column:varchar_field;type:varchar(255);default:null"`
- TextField string `gorm:"column:text_field;type:text"`
- CreateTimestamp time.Time
- Gender bool
- Price float64
-}
-
-func tagAndType(db *gorm.DB) {
- db.Migrator().DropTable("tag_and_types")
- db.AutoMigrate(&TagAndType{})
- data := TagAndType{BigIntField: -9223372036854775808,
- CharField: "test1", Price: 10.22, Gender: true, CreateTimestamp: time.Now().Local()}
- db.Create(&data)
-
- where := "id = ?"
- // 查询记录
- var result TagAndType
- tx := db.First(&result, where, 1)
- if tx.Error != nil {
- panic("Failed to retrieve record: " + tx.Error.Error())
- }
- fmt.Printf("Retrieved : %+v\n", result)
-
- // 更新记录
- tx = db.Model(&TagAndType{}).Where(where, 1).Update("Price", 280.22)
- if tx.Error != nil {
- panic("Failed to update record: " + tx.Error.Error())
- }
-
- data.VarcharField = "default:null"
- data.BigIntField = -2000000000
- db.Create(&data)
- data.TextField = "text"
- data.BigIntField = -3000000000
- db.Create(&data)
-
- // 删除记录
- tx = db.Delete(&TagAndType{}, where, 3)
- if tx.Error != nil {
- panic("Failed to delete record: " + tx.Error.Error())
- }
-
- // 查询全部数据
- var results []TagAndType
- tx = db.Model(&TagAndType{}).Select("*").Find(&results)
- if tx.Error != nil {
- panic("Failed to select * record: " + tx.Error.Error())
- }
- for i, v := range results {
- fmt.Printf("Retrieved %d : %+v\n", i, v)
- }
-}
-
-type CompoundIndex struct {
- OneIndex int32
- TwoIndex string
-}
-
-func compoundIndex(db *gorm.DB) {
- db.Migrator().DropTable("compound_indices")
- db.AutoMigrate(&CompoundIndex{})
- tx := db.Exec("create index one_two on compound_indices (one_index, two_index);")
- if tx.Error != nil {
- panic("Failed to create compoundIndex: " + tx.Error.Error())
- }
-}
-
-func demonstration() {
- db, err := gorm.Open(intarkdb.Open("."))
- if err != nil {
- panic("failed to connect database")
- }
-
- db.Logger = logger.New(
- log.New(os.Stdout, "\r\n", log.LstdFlags),
- logger.Config{
- LogLevel: logger.Info, // 设置日志级别为 Info,打印 SQL 语句
- },
- )
-
- // db.Migrator().DropTable("compounds")
-
- // db.AutoMigrate(&Compound{})
-
- db.Create(&Compound{ID: 3, TwoIndex: "123"})
- db.Create(&Compound{ID: 4, TwoIndex: "1234567890000"})
-
- var results []Compound
- tx := db.Model(&Compound{}).Select("*").Find(&results)
- if tx.Error != nil {
- panic("Failed to select * record: " + tx.Error.Error())
- }
- for i, v := range results {
- fmt.Printf("Retrieved %d : %+v\n", i, v)
- }
-}
-
-type Compound struct {
- ID int `gorm:"primaryKey"`
- TwoIndex string
-}
-
-func main() {
- // runSqlite()
-
- runIntarkDB()
- // demonstration()
-}
diff --git a/src/interface/go/gorm/orm-driver/intarkdb/intarkdb.go b/src/interface/go/gorm/orm-driver/intarkdb/intarkdb.go
deleted file mode 100644
index 33b3efed64cf4a32aef487d0a518c773b7651bda..0000000000000000000000000000000000000000
--- a/src/interface/go/gorm/orm-driver/intarkdb/intarkdb.go
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
-* Copyright (c) GBA-NCTI-ISDC. 2022-2024.
-* Copyright (c) 2013-NOW Jinzhu
-*
-* openGauss embedded is licensed under Mulan PSL v2.
-* You can use this software according to the terms and conditions of the Mulan PSL v2.
-* You may obtain a copy of Mulan PSL v2 at:
-*
-* http://license.coscl.org.cn/MulanPSL2
-*
-* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
-* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
-* MERCHANTABILITY OR FITFOR A PARTICULAR PURPOSE.
-* See the Mulan PSL v2 for more details.
-* -------------------------------------------------------------------------
-*
-* intarkdb.go
-*
-* IDENTIFICATION
-* openGauss-embedded/src/interface/go/gorm/orm-driver/intarkdb/intarkdb.go
-*
-* -------------------------------------------------------------------------
- */
-
-package intarkdb
-
-import (
- "database/sql"
- "strconv"
- "strings"
-
- _ "go-api/sql-driver/intarkdb"
-
- "gorm.io/gorm"
- "gorm.io/gorm/callbacks"
- "gorm.io/gorm/clause"
- "gorm.io/gorm/logger"
- "gorm.io/gorm/migrator"
- "gorm.io/gorm/schema"
-)
-
-const DriverName = "intarkdb"
-
-type Dialector struct {
- DriverName string
- DSN string
- Conn gorm.ConnPool
-}
-
-func Open(dsn string) gorm.Dialector {
- return &Dialector{DSN: dsn}
-}
-
-func (dialector Dialector) Name() string {
- return "intarkdb"
-}
-
-func (dialector Dialector) Initialize(db *gorm.DB) (err error) {
- if dialector.DriverName == "" {
- dialector.DriverName = DriverName
- }
-
- if dialector.Conn != nil {
- db.ConnPool = dialector.Conn
- } else {
- conn, err := sql.Open(dialector.DriverName, dialector.DSN)
- if err != nil {
- return err
- }
- db.ConnPool = conn
- }
-
- callbacks.RegisterDefaultCallbacks(db, &callbacks.Config{
- LastInsertIDReversed: true,
- })
-
- for k, v := range dialector.ClauseBuilders() {
- db.ClauseBuilders[k] = v
- }
-
- return
-}
-
-func (dialector Dialector) ClauseBuilders() map[string]clause.ClauseBuilder {
- return map[string]clause.ClauseBuilder{
- "INSERT": func(c clause.Clause, builder clause.Builder) {
- if insert, ok := c.Expression.(clause.Insert); ok {
- if stmt, ok := builder.(*gorm.Statement); ok {
- stmt.WriteString("INSERT ")
- if insert.Modifier != "" {
- stmt.WriteString(insert.Modifier)
- stmt.WriteByte(' ')
- }
-
- stmt.WriteString("INTO ")
- if insert.Table.Name == "" {
- stmt.WriteQuoted(stmt.Table)
- } else {
- stmt.WriteQuoted(insert.Table)
- }
- return
- }
- }
-
- c.Build(builder)
- },
- "LIMIT": func(c clause.Clause, builder clause.Builder) {
- if limit, ok := c.Expression.(clause.Limit); ok {
- var lmt = -1
- if limit.Limit != nil && *limit.Limit >= 0 {
- lmt = *limit.Limit
- }
- if lmt >= 0 || limit.Offset > 0 {
- builder.WriteString("LIMIT ")
- builder.WriteString(strconv.Itoa(lmt))
- }
- if limit.Offset > 0 {
- builder.WriteString(" OFFSET ")
- builder.WriteString(strconv.Itoa(limit.Offset))
- }
- }
- },
- // "FOR": func(c clause.Clause, builder clause.Builder) {
- // if _, ok := c.Expression.(clause.Locking); ok {
- // // instardb does not support row-level locking.
- // return
- // }
- // c.Build(builder)
- // },
- }
-}
-
-// 返回字段的默认值表达式
-func (dialector Dialector) DefaultValueOf(field *schema.Field) clause.Expression {
- if field.AutoIncrement {
- return clause.Expr{SQL: "NULL"}
- }
-
- // doesn't work, will raise error
- return clause.Expr{SQL: "DEFAULT"}
-}
-
-// 创建Migrator
-func (dialector Dialector) Migrator(db *gorm.DB) gorm.Migrator {
- return Migrator{migrator.Migrator{Config: migrator.Config{
- DB: db,
- Dialector: dialector,
- // CreateIndexAfterCreateTable: true,
- }}}
-}
-
-// 绑定参数
-func (dialector Dialector) BindVarTo(writer clause.Writer, stmt *gorm.Statement, v interface{}) {
- writer.WriteByte('?')
-}
-
-// 对特殊标识符解析成当前数据库的标准
-func (dialector Dialector) QuoteTo(writer clause.Writer, str string) {
- // writer.WriteByte('`')
- if strings.Contains(str, ".") {
- for idx, str := range strings.Split(str, ".") {
- if idx > 0 {
- writer.WriteString(".")
- }
- writer.WriteString(str)
- // writer.WriteByte('`')
- }
- } else {
- writer.WriteString(str)
- // writer.WriteByte('`')
- }
-}
-
-// 执行 SQL 语句的解释器,返回最终要执行的sql
-func (dialector Dialector) Explain(sql string, vars ...interface{}) string {
- return logger.ExplainSQL(sql, nil, `"`, vars...)
-}
-
-func (dialector Dialector) DataTypeOf(field *schema.Field) string {
- switch field.DataType {
- case schema.Bool:
- return "boolean"
- case schema.Int:
- var result string
- if field.Size <= 32 {
- result = "integer"
- } else {
- result = "bigint"
- }
-
- if field.AutoIncrement {
- result = result + " autoincrement"
- }
- return result
- case schema.Uint:
- var result string
- if field.Size <= 32 {
- result = "uint32"
- } else {
- result = "ubigint"
- }
-
- if field.AutoIncrement {
- result = result + " autoincrement"
- }
- return result
- case schema.Float:
- return "real"
- case schema.String:
- return "varchar"
- case schema.Time:
- return "timestamp"
- case schema.Bytes:
- return "blob"
- }
-
- return string(field.DataType)
-}
diff --git a/src/interface/go/gorm/orm-driver/intarkdb/migrator.go b/src/interface/go/gorm/orm-driver/intarkdb/migrator.go
deleted file mode 100644
index 4257f609832df3de232634ebe6c452397c134b5e..0000000000000000000000000000000000000000
--- a/src/interface/go/gorm/orm-driver/intarkdb/migrator.go
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
-* Copyright (c) GBA-NCTI-ISDC. 2022-2024.
-* Copyright (c) 2013-NOW Jinzhu
-*
-* openGauss embedded is licensed under Mulan PSL v2.
-* You can use this software according to the terms and conditions of the Mulan PSL v2.
-* You may obtain a copy of Mulan PSL v2 at:
-*
-* http://license.coscl.org.cn/MulanPSL2
-*
-* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
-* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
-* MERCHANTABILITY OR FITFOR A PARTICULAR PURPOSE.
-* See the Mulan PSL v2 for more details.
-* -------------------------------------------------------------------------
-*
-* migrator.go
-*
-* IDENTIFICATION
-* openGauss-embedded/src/interface/go/gorm/orm-driver/intarkdb/migrator.go
-*
-* -------------------------------------------------------------------------
- */
-
-package intarkdb
-
-import (
- "errors"
-
- "gorm.io/gorm"
- "gorm.io/gorm/logger"
- "gorm.io/gorm/migrator"
-)
-
-type Migrator struct {
- migrator.Migrator
-}
-
-type printSQLLogger struct {
- logger.Interface
-}
-
-func (m Migrator) CurrentDatabase() (name string) {
- return "intarkdb"
-}
-
-func (m Migrator) HasTable(value interface{}) bool {
- var count int
- m.Migrator.RunWithValue(value, func(stmt *gorm.Statement) error {
- return m.DB.Raw("SELECT count(*) FROM 'SYS_TABLES' WHERE NAME=?", stmt.Table).Row().Scan(&count)
- })
- return count > 0
-}
-
-// AutoMigrate auto migrate values
-func (m Migrator) AutoMigrate(values ...interface{}) error {
- for _, value := range m.ReorderModels(values, true) {
- queryTx := m.DB.Session(&gorm.Session{})
- execTx := queryTx
- if m.DB.DryRun {
- queryTx.DryRun = false
- execTx = m.DB.Session(&gorm.Session{Logger: &printSQLLogger{Interface: m.DB.Logger}})
- }
- if !queryTx.Migrator().HasTable(value) {
- if err := execTx.Migrator().CreateTable(value); err != nil {
- return err
- }
- } else {
- if err := m.RunWithValue(value, func(stmt *gorm.Statement) error {
- return errors.New("table already exists")
- }); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-// TODO
-// func (m Migrator) HasIndex(value interface{}, name string) bool {
-// var count int
-// m.RunWithValue(value, func(stmt *gorm.Statement) error {
-// if idx := stmt.Schema.LookIndex(name); idx != nil {
-// name = idx.Name
-// }
-
-// if name != "" {
-// m.DB.Raw(
-// "SELECT count(*) FROM 'SYS_INDEXES' WHERE type = ? AND tbl_name = ? AND name = ?", "index", stmt.Table, name,
-// ).Row().Scan(&count)
-// }
-// return nil
-// })
-// return count > 0
-// }
diff --git a/src/interface/go/gorm/sql-driver/intarkdb/include/intarkdb.h b/src/interface/go/gorm/sql-driver/intarkdb/include/intarkdb.h
deleted file mode 100644
index 10c6278e32f656de50e519a29da8891227f2331a..0000000000000000000000000000000000000000
--- a/src/interface/go/gorm/sql-driver/intarkdb/include/intarkdb.h
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
-* Copyright (c) GBA-NCTI-ISDC. 2022-2024.
-* Copyright (c) 2022 Huawei Technologies Co.,Ltd.
-*
-* openGauss embedded is licensed under Mulan PSL v2.
-* You can use this software according to the terms and conditions of the Mulan PSL v2.
-* You may obtain a copy of Mulan PSL v2 at:
-*
-* http://license.coscl.org.cn/MulanPSL2
-*
-* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
-* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
-* MERCHANTABILITY OR FITFOR A PARTICULAR PURPOSE.
-* See the Mulan PSL v2 for more details.
-* -------------------------------------------------------------------------
-*
-* intarkdb.h
-*
-* IDENTIFICATION
-* openGauss-embedded/src/interface/go/gorm/sql-driver/intarkdb/include/intarkdb.h
-*
-* -------------------------------------------------------------------------
-*/
-
-#pragma once
-#include
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-#ifdef WIN32
-#define EXP_SQL_API __declspec(dllexport)
-#else
-#define EXP_SQL_API __attribute__((visibility("default")))
-#endif
-
-/*--------------------------------------------------------------------*/
-// Type define
-/*--------------------------------------------------------------------*/
-
-typedef __int8_t int8_t;
-typedef __int16_t int16_t;
-typedef __int32_t int32_t;
-typedef __int64_t int64_t;
-typedef __uint8_t uint8_t;
-typedef __uint16_t uint16_t;
-typedef __uint32_t uint32_t;
-typedef __uint64_t uint64_t;
-typedef __SIZE_TYPE__ size_t;
-
-typedef enum en_gs_type {
- GS_TYPE_UNKNOWN = -1,
- GS_TYPE_BASE = 20000,
- GS_TYPE_INTEGER = GS_TYPE_BASE + 1, /* native 32 bits integer */
- GS_TYPE_BIGINT = GS_TYPE_BASE + 2, /* native 64 bits integer */
- GS_TYPE_REAL = GS_TYPE_BASE + 3, /* 8-byte native double */
- GS_TYPE_NUMBER = GS_TYPE_BASE + 4, /* number */
- GS_TYPE_DECIMAL = GS_TYPE_BASE + 5, /* decimal, internal used */
- GS_TYPE_DATE = GS_TYPE_BASE + 6, /* datetime */
- GS_TYPE_TIMESTAMP = GS_TYPE_BASE + 7, /* timestamp */
- GS_TYPE_CHAR = GS_TYPE_BASE + 8, /* char(n) */
- GS_TYPE_VARCHAR = GS_TYPE_BASE + 9, /* varchar, varchar2 */
- GS_TYPE_STRING = GS_TYPE_BASE + 10, /* native char * */
- GS_TYPE_BINARY = GS_TYPE_BASE + 11, /* binary */
- GS_TYPE_VARBINARY = GS_TYPE_BASE + 12, /* varbinary */
- GS_TYPE_CLOB = GS_TYPE_BASE + 13, /* clob */
- GS_TYPE_BLOB = GS_TYPE_BASE + 14, /* blob */
- GS_TYPE_CURSOR = GS_TYPE_BASE + 15, /* resultset, for stored procedure */
- GS_TYPE_COLUMN = GS_TYPE_BASE + 16, /* column type, internal used */
- GS_TYPE_BOOLEAN = GS_TYPE_BASE + 17,
-
- /* timestamp with time zone ,this type is fake, it is abandoned now,
- * you can treat it as GS_TYPE_TIMESTAMP just for compatibility */
- GS_TYPE_TIMESTAMP_TZ_FAKE = GS_TYPE_BASE + 18,
- GS_TYPE_TIMESTAMP_LTZ = GS_TYPE_BASE + 19, /* timestamp with local time zone */
- GS_TYPE_INTERVAL = GS_TYPE_BASE + 20, /* interval of Postgre style, no use */
- GS_TYPE_INTERVAL_YM = GS_TYPE_BASE + 21, /* interval YEAR TO MONTH */
- GS_TYPE_INTERVAL_DS = GS_TYPE_BASE + 22, /* interval DAY TO SECOND */
- GS_TYPE_RAW = GS_TYPE_BASE + 23, /* raw */
- GS_TYPE_IMAGE = GS_TYPE_BASE + 24, /* image, equals to longblob */
- GS_TYPE_UINT32 = GS_TYPE_BASE + 25, /* unsigned integer */
- GS_TYPE_UINT64 = GS_TYPE_BASE + 26, /* unsigned bigint */
- GS_TYPE_SMALLINT = GS_TYPE_BASE + 27, /* 16-bit integer */
- GS_TYPE_USMALLINT = GS_TYPE_BASE + 28, /* unsigned 16-bit integer */
- GS_TYPE_TINYINT = GS_TYPE_BASE + 29, /* 8-bit integer */
- GS_TYPE_UTINYINT = GS_TYPE_BASE + 30, /* unsigned 8-bit integer */
- GS_TYPE_FLOAT = GS_TYPE_BASE + 31, /* 4-byte float */
- // !!!add new member must ensure not exceed the limitation of g_type_maps in sql_oper_func.c
- /* the real tz type , GS_TYPE_TIMESTAMP_TZ_FAKE will be not used , it will be the same as GS_TYPE_TIMESTAMP */
- GS_TYPE_TIMESTAMP_TZ = GS_TYPE_BASE + 32, /* timestamp with time zone */
- GS_TYPE_ARRAY = GS_TYPE_BASE + 33, /* array */
- /* com */
- /* caution: SCALAR type must defined above */
- GS_TYPE_OPERAND_CEIL = GS_TYPE_BASE + 40, // ceil of operand type
-
- /* The datatype can't used in datatype caculation system. only used for
- * decl in/out param in pl/sql */
- GS_TYPE_RECORD = GS_TYPE_BASE + 41,
- GS_TYPE_COLLECTION = GS_TYPE_BASE + 42,
- GS_TYPE_OBJECT = GS_TYPE_BASE + 43,
- // new data type
- GS_TYPE_HUGEINT = GS_TYPE_BASE + 44,
- /* The datatype below the GS_TYPE__DO_NOT_USE can be used as database DATATYPE.
- * In some extend, GS_TYPE__DO_NOT_USE represents the maximal number
- * of DATATYPE that Zenith are supported. The newly adding datatype
- * must before GS_TYPE__DO_NOT_USE, and the type_id must be consecutive
- */
- GS_TYPE__DO_NOT_USE = GS_TYPE_BASE + 45,
-
- /* The following datatypes are functional datatypes, which can help
- * to implement some features when needed. Note that they can not be
- * used as database DATATYPE */
- /* to present a datatype node, for example cast(para1, typenode),
- * the second argument is an expr_node storing the information of
- * a datatype, such as length, precision, scale, etc.. */
- GS_TYPE_FUNC_BASE = GS_TYPE_BASE + 200,
- GS_TYPE_TYPMODE = GS_TYPE_FUNC_BASE + 1,
-
- /* This datatype only be used in winsort aggr */
- GS_TYPE_VM_ROWID = GS_TYPE_FUNC_BASE + 2,
- GS_TYPE_ITVL_UNIT = GS_TYPE_FUNC_BASE + 3,
- GS_TYPE_UNINITIALIZED = GS_TYPE_FUNC_BASE + 4,
-
- /* The following datatypes be used for native date or timestamp type value to bind */
- GS_TYPE_NATIVE_DATE = GS_TYPE_FUNC_BASE + 5, // native datetime, internal used
- GS_TYPE_NATIVE_TIMESTAMP = GS_TYPE_FUNC_BASE + 6, // native timestamp, internal used
- GS_TYPE_LOGIC_TRUE = GS_TYPE_FUNC_BASE + 7, // native true, internal used
-
-} gs_type_t;
-
-typedef struct st_api_text {
- char *str;
- int64_t len;
- int64_t data_type;
-} api_text_t;
-
-typedef struct st_result_column {
- char *data;
- int64_t data_len;
-} result_column;
-
-typedef struct st_result_row {
- int64_t column_count; //列数
- result_column* row_column_list; //行包含的列列表
- struct st_result_row* next;
-} result_row;
-
-typedef struct st_intarkdb_res_def {
- int64_t row_count; //行数
- bool is_select;
- void* res_row; //行结果集 //这里实际是 RecordBatch*
-
- int64_t column_count; //列数
- api_text_t* column_names; //列名
- char* msg; //执行结果信息
-
- char* value_ptr; // for free column value
- int64_t row_idx; // for next
-} intarkdb_res_def;
-
-typedef struct st_intarkdb_database {
- void* db;
-} *intarkdb_database;
-
-typedef struct st_intarkdb_connection {
- void* conn;
-} *intarkdb_connection;
-
-typedef enum en_status_def {
- SQL_ERROR = -1,
- SQL_SUCCESS = 0,
- SQL_TIMEDOUT = 1,
-} intarkdb_state_t;
-
-typedef struct st_intarkdb_res_def* intarkdb_result;
-
-typedef struct st_intarkdb_prepared_statement {
- void* prep_stmt;
-} *intarkdb_prepared_statement;
-
-EXP_SQL_API intarkdb_state_t intarkdb_open(const char *path, intarkdb_database *db);
-
-EXP_SQL_API void intarkdb_close(intarkdb_database *db);
-
-EXP_SQL_API intarkdb_state_t intarkdb_connect(intarkdb_database database, intarkdb_connection *conn);
-
-EXP_SQL_API void intarkdb_disconnect(intarkdb_connection *conn);
-
-EXP_SQL_API intarkdb_state_t intarkdb_query(intarkdb_connection connection, const char *query, intarkdb_result result);
-
-EXP_SQL_API intarkdb_result intarkdb_init_result();
-
-EXP_SQL_API int32_t intarkdb_result_effect_row(intarkdb_result result);
-
-EXP_SQL_API int64_t intarkdb_row_count(intarkdb_result result);
-
-EXP_SQL_API int64_t intarkdb_column_count(intarkdb_result result);
-
-EXP_SQL_API const char * intarkdb_column_name(intarkdb_result result, int64_t col);
-
-EXP_SQL_API int32_t intarkdb_column_type(intarkdb_result result, int64_t col);
-
-EXP_SQL_API void intarkdb_column_typename(intarkdb_result result, int64_t col, char *type_name, size_t max_len);
-
-EXP_SQL_API char * intarkdb_value_varchar(intarkdb_result result, int64_t row, int64_t col);
-
-EXP_SQL_API void intarkdb_free_row(intarkdb_result result);
-
-EXP_SQL_API void intarkdb_destroy_result(intarkdb_result result);
-
-EXP_SQL_API const char * intarkdb_result_msg(intarkdb_result result);
-
-// ----------------------------------------result value-----------------------------------------------
-EXP_SQL_API bool intarkdb_next_row(intarkdb_result result);
-
-EXP_SQL_API char *intarkdb_column_value(intarkdb_result result, int64_t col);
-
-EXP_SQL_API bool intarkdb_value_boolean(intarkdb_result result, int64_t row, int64_t col);
-
-EXP_SQL_API int32_t intarkdb_value_int32(intarkdb_result result, int64_t row, int64_t col);
-
-EXP_SQL_API int64_t intarkdb_value_int64(intarkdb_result result, int64_t row, int64_t col);
-
-EXP_SQL_API uint32_t intarkdb_value_uint32(intarkdb_result result, int64_t row, int64_t col);
-
-EXP_SQL_API uint64_t intarkdb_value_uint64(intarkdb_result result, int64_t row, int64_t col);
-
-EXP_SQL_API double intarkdb_value_double(intarkdb_result result, int64_t row, int64_t col);
-
-EXP_SQL_API char *intarkdb_value_date(intarkdb_result result, int64_t row, int64_t col);
-
-EXP_SQL_API char *intarkdb_value_timestamp(intarkdb_result result, int64_t row, int64_t col);
-
-EXP_SQL_API int64_t intarkdb_value_timestamp_ms(intarkdb_result result, int64_t row, int64_t col);
-
-EXP_SQL_API int64_t intarkdb_value_timestamp_us(intarkdb_result result, int64_t row, int64_t col);
-
-EXP_SQL_API void *intarkdb_value_blob(intarkdb_result result, int64_t row, int64_t col, int32_t *val_len);
-
-// ----------------------------------------prepare----------------------------------------------------
-EXP_SQL_API intarkdb_state_t intarkdb_prepare(intarkdb_connection conn, const char *query, intarkdb_prepared_statement *out);
-
-EXP_SQL_API int64_t intarkdb_prepare_nparam(intarkdb_prepared_statement prepared_statement);
-
-EXP_SQL_API char *intarkdb_prepare_errmsg(intarkdb_prepared_statement prepared_statement);
-
-EXP_SQL_API intarkdb_state_t intarkdb_execute_prepared(intarkdb_prepared_statement prepared_statement, intarkdb_result result);
-
-EXP_SQL_API void intarkdb_destroy_prepare(intarkdb_prepared_statement *prepared_statement);
-
-EXP_SQL_API intarkdb_state_t intarkdb_bind_boolean(intarkdb_prepared_statement prepared_statement, uint32_t idx, bool val);
-
-EXP_SQL_API intarkdb_state_t intarkdb_bind_int8(intarkdb_prepared_statement prepared_statement, uint32_t idx, int8_t val);
-
-EXP_SQL_API intarkdb_state_t intarkdb_bind_int16(intarkdb_prepared_statement prepared_statement, uint32_t idx, int16_t val);
-
-EXP_SQL_API intarkdb_state_t intarkdb_bind_int32(intarkdb_prepared_statement prepared_statement, uint32_t idx, int32_t val);
-
-EXP_SQL_API intarkdb_state_t intarkdb_bind_int64(intarkdb_prepared_statement prepared_statement, uint32_t idx, int64_t val);
-
-EXP_SQL_API intarkdb_state_t intarkdb_bind_uint8(intarkdb_prepared_statement prepared_statement, uint32_t idx, uint8_t val);
-
-EXP_SQL_API intarkdb_state_t intarkdb_bind_uint16(intarkdb_prepared_statement prepared_statement, uint32_t idx, uint16_t val);
-
-EXP_SQL_API intarkdb_state_t intarkdb_bind_uint32(intarkdb_prepared_statement prepared_statement, uint32_t idx, uint32_t val);
-
-EXP_SQL_API intarkdb_state_t intarkdb_bind_uint64(intarkdb_prepared_statement prepared_statement, uint32_t idx, uint64_t val);
-
-EXP_SQL_API intarkdb_state_t intarkdb_bind_float(intarkdb_prepared_statement prepared_statement, uint32_t idx, float val);
-
-EXP_SQL_API intarkdb_state_t intarkdb_bind_double(intarkdb_prepared_statement prepared_statement, uint32_t idx, double val);
-
-EXP_SQL_API intarkdb_state_t intarkdb_bind_date(intarkdb_prepared_statement prepared_statement, uint32_t idx, const char *val);
-
-EXP_SQL_API intarkdb_state_t intarkdb_bind_timestamp_ms(intarkdb_prepared_statement prepared_statement, uint32_t idx, int64_t val);
-
-EXP_SQL_API intarkdb_state_t intarkdb_bind_timestamp_us(intarkdb_prepared_statement prepared_statement, uint32_t idx, int64_t val);
-
-EXP_SQL_API intarkdb_state_t intarkdb_bind_varchar(intarkdb_prepared_statement prepared_statement, uint32_t idx, const char *val);
-
-EXP_SQL_API intarkdb_state_t intarkdb_bind_decimal(intarkdb_prepared_statement prepared_statement, uint32_t idx, const char *val);
-
-EXP_SQL_API intarkdb_state_t intarkdb_bind_null(intarkdb_prepared_statement prepared_statement, uint32_t idx);
-
-EXP_SQL_API intarkdb_state_t intarkdb_bind_blob(intarkdb_prepared_statement prepared_statement, uint32_t idx, const void *data, uint32_t len);
-
-EXP_SQL_API char *intarkdb_expanded_sql(intarkdb_prepared_statement prepared_statement);
-
-#ifdef __cplusplus
-}
-#endif
\ No newline at end of file
diff --git a/src/interface/go/gorm/sql-driver/intarkdb/intarkdb.go b/src/interface/go/gorm/sql-driver/intarkdb/intarkdb.go
deleted file mode 100644
index 123bd168308d1e24002f57c1f6a64b503f2352ed..0000000000000000000000000000000000000000
--- a/src/interface/go/gorm/sql-driver/intarkdb/intarkdb.go
+++ /dev/null
@@ -1,440 +0,0 @@
-/*
-* Copyright (c) GBA-NCTI-ISDC. 2022-2024.
-*
-* openGauss embedded is licensed under Mulan PSL v2.
-* You can use this software according to the terms and conditions of the Mulan PSL v2.
-* You may obtain a copy of Mulan PSL v2 at:
-*
-* http://license.coscl.org.cn/MulanPSL2
-*
-* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
-* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
-* MERCHANTABILITY OR FITFOR A PARTICULAR PURPOSE.
-* See the Mulan PSL v2 for more details.
-* -------------------------------------------------------------------------
-*
-* intarkdb.go
-*
-* IDENTIFICATION
-* openGauss-embedded/src/interface/go/gorm/sql-driver/intarkdb/intarkdb.go
-*
-* -------------------------------------------------------------------------
- */
-
-package intarkdb
-
-/*
-// 头文件的位置,相对于源文件是当前目录,所以是 .,头文件在多个目录时写多个 #cgo CFLAGS: ...
-#cgo CFLAGS: -I./include
-// 从哪里加载动态库,位置与文件名,-ladd 加载 libadd.so 文件
-// 支持绝对路径
-// #cgo LDFLAGS: -L${SRCDIR}/lib -lintarkdb -Wl,-rpath=${SRCDIR}/lib
-#cgo LDFLAGS: -L${SRCDIR}/../../../../../../output/release/lib -lintarkdb -Wl,-rpath=${SRCDIR}/../../../../../../output/release/lib
-
-#include
-#include
-#include
-#include
-#include "include/intarkdb.h"
-*/
-import "C"
-import (
- "context"
- "database/sql"
- "database/sql/driver"
- "errors"
- "fmt"
- "io"
- "net/url"
- "strconv"
- "strings"
- "sync"
- "time"
- "unsafe"
-)
-
-func init() {
- if driverName != "" {
- sql.Register(driverName, &IntarkdbDriver{})
- }
-}
-
-var driverName = "intarkdb"
-
-type DBStatus int
-
-const (
- dbError DBStatus = -1
- dbSuccess DBStatus = 0
- dbTimeout DBStatus = 1
-)
-
-var dbStatusTag = map[DBStatus]string{
- dbError: "error",
- dbSuccess: "success",
- dbTimeout: "timeout",
-}
-
-var IntarkdbTimestampFormats = []string{
- "2006-01-02 15:04:05.999999999-07:00",
- "2006-01-02T15:04:05.999999999-07:00",
- "2006-01-02 15:04:05.999999999",
- "2006-01-02T15:04:05.999999999",
- "2006-01-02 15:04:05.999999",
- "2006-01-02T15:04:05",
- "2006-01-02 15:04",
- "2006-01-02T15:04",
- "2006-01-02",
-}
-
-type IntarkdbDriver struct {
-}
-
-var (
- db C.intarkdb_database
- mutex sync.Mutex
- once sync.Once
-)
-
-type IntarkdbConn struct {
- mu sync.Mutex
- conn C.intarkdb_connection
- closed bool
- closeDB bool
-}
-
-// IntarkdbTx implements driver.Tx.
-type IntarkdbTx struct {
- c *IntarkdbConn
-}
-
-// IntarkdbStmt implements driver.Stmt.
-type IntarkdbStmt struct {
- mu sync.Mutex
- c *IntarkdbConn
- s C.intarkdb_prepared_statement
- // t string
- closed bool
- // cls bool
-}
-
-// IntarkdbResult implements sql.Result.
-type IntarkdbResult struct {
- // id int64
- changes int64
-}
-
-// IntarkdbRows implements driver.Rows.
-type IntarkdbRows struct {
- s *IntarkdbStmt
- colCount int64
- cols []string
- // decltype []string
- // cls bool
- closed bool
- ctx context.Context // no better alternative to pass context into Next() method
- result C.intarkdb_result
- // colType []
-}
-
-func (tx *IntarkdbTx) Commit() error {
- query := "COMMIT"
- cQuery := C.CString(query)
- defer C.free(unsafe.Pointer(cQuery))
-
- dbStatus := (DBStatus)(C.intarkdb_query(tx.c.conn, cQuery, nil))
- if dbStatus != dbSuccess {
- tx.Rollback()
- return fmt.Errorf("intarkdb commit %s", dbStatusTag[dbStatus])
- }
-
- return nil
-}
-
-func (tx *IntarkdbTx) Rollback() error {
- query := "ROLLBACK"
- cQuery := C.CString(query)
- defer C.free(unsafe.Pointer(cQuery))
-
- dbStatus := (DBStatus)(C.intarkdb_query(tx.c.conn, cQuery, nil))
- if dbStatus != dbSuccess {
- return fmt.Errorf("intarkdb rollback %s", dbStatusTag[dbStatus])
- }
-
- return nil
-}
-
-func (d *IntarkdbDriver) Open(dsn string) (driver.Conn, error) {
- pos := strings.IndexRune(dsn, '?')
- var closeDB string
- if pos >= 1 {
- params, err := url.ParseQuery(dsn[pos+1:])
- if err != nil {
- return nil, err
- }
-
- if val := params.Get("closeDB"); val != "" {
- closeDB = val
- }
- }
-
- cPath := C.CString(dsn)
- defer C.free(unsafe.Pointer(cPath))
-
- var dbStatus DBStatus
- once.Do(func() {
- dbStatus = (DBStatus)(C.intarkdb_open(cPath, &db))
- })
-
- if dbStatus != dbSuccess {
- return nil, fmt.Errorf("intarkdb open %s", dbStatusTag[dbStatus])
- }
-
- var connection C.intarkdb_connection
- dbStatus = (DBStatus)(C.intarkdb_connect(db, &connection))
- if dbStatus != dbSuccess {
- return nil, fmt.Errorf("intarkdb connection %s", dbStatusTag[dbStatus])
- }
-
- conn := &IntarkdbConn{conn: connection}
- if closeDB == "true" {
- conn.closeDB = true
- }
-
- return conn, nil
-}
-
-func (c *IntarkdbConn) Prepare(query string) (driver.Stmt, error) {
- return c.prepare(context.Background(), query)
-}
-
-func (c *IntarkdbConn) prepare(ctx context.Context, query string) (driver.Stmt, error) {
- cQuery := C.CString(query)
- defer C.free(unsafe.Pointer(cQuery))
- var s C.intarkdb_prepared_statement
-
- dbStatus := (DBStatus)(C.intarkdb_prepare(c.conn, cQuery, &s))
- if dbStatus != dbSuccess {
- return nil, fmt.Errorf("intarkdb prepare %s", C.GoString(C.intarkdb_prepare_errmsg(s)))
- }
-
- ss := &IntarkdbStmt{c: c, s: s}
-
- return ss, nil
-}
-
-// func (c *IntarkdbConn) Query(query string, args []driver.Value) (driver.Rows, error) {
-// return nil,nil
-// // return c.query(context.Background(), query, args)
-// }
-
-func (c *IntarkdbConn) Close() error {
- C.intarkdb_disconnect(&(c.conn))
- // c.mu.Lock()
- c.closed = true
- // c.mu.Unlock()
- if c.closeDB {
- // mutex.Lock()
- // defer mutex.Unlock()
- C.intarkdb_close(&db)
- }
- return nil
-}
-
-func (c *IntarkdbConn) Begin() (driver.Tx, error) {
- return c.begin(context.Background())
-}
-
-func (c *IntarkdbConn) begin(ctx context.Context) (driver.Tx, error) {
- query := "Begin"
- cQuery := C.CString(query)
- defer C.free(unsafe.Pointer(cQuery))
-
- dbStatus := (DBStatus)(C.intarkdb_query(c.conn, cQuery, nil))
- if dbStatus != dbSuccess {
- return nil, fmt.Errorf("intarkdb begin %s", dbStatusTag[dbStatus])
- }
- return &IntarkdbTx{c}, nil
-}
-
-func (c *IntarkdbConn) dbConnOpen() bool {
- if c == nil {
- return false
- }
- // c.mu.Lock()
- // defer c.mu.Unlock()
- return !c.closed
-}
-
-func (s *IntarkdbStmt) NumInput() int {
- return int(C.intarkdb_prepare_nparam(s.s))
-}
-
-// Close the statement.
-func (s *IntarkdbStmt) Close() error {
- // s.mu.Lock()
- // defer s.mu.Unlock()
- if s.closed {
- return nil
- }
- s.closed = true
- if !s.c.dbConnOpen() {
- return errors.New("intarkdb statement with already closed database connection")
- }
- C.intarkdb_destroy_prepare(&s.s)
- return nil
-}
-
-func (s *IntarkdbStmt) bind(args []driver.Value) error {
- if len(args) != s.NumInput() {
- return fmt.Errorf("the number of args does not match")
- }
-
- var dbStatus DBStatus
- for i, arg := range args {
- cI := C.__uint32_t(i + 1)
- switch v := arg.(type) {
- case nil:
- dbStatus = (DBStatus)(C.intarkdb_bind_null(s.s, cI))
- case string:
- cQuery := C.CString(v)
- defer C.free(unsafe.Pointer(cQuery))
- dbStatus = (DBStatus)(C.intarkdb_bind_varchar(s.s, cI, cQuery))
- case int32:
- dbStatus = (DBStatus)(C.intarkdb_bind_int32(s.s, cI, C.int(v)))
- case int64:
- dbStatus = (DBStatus)(C.intarkdb_bind_int64(s.s, cI, C.long(v)))
- case uint32:
- dbStatus = (DBStatus)(C.intarkdb_bind_uint32(s.s, cI, C.__uint32_t(v)))
- case uint64:
- dbStatus = (DBStatus)(C.intarkdb_bind_uint64(s.s, cI, C.__uint64_t(v)))
- case bool:
- dbStatus = (DBStatus)(C.intarkdb_bind_boolean(s.s, cI, C.bool(v)))
- case float64:
- dbStatus = (DBStatus)(C.intarkdb_bind_double(s.s, cI, C.double(v)))
- case []byte:
- if v == nil {
- dbStatus = (DBStatus)(C.intarkdb_bind_null(s.s, cI))
- } else {
- return fmt.Errorf("not support byte")
- }
- case time.Time:
- b := []byte(v.Format(IntarkdbTimestampFormats[4]))
- dbStatus = (DBStatus)(C.intarkdb_bind_date(s.s, cI, (*C.char)(unsafe.Pointer(&b[0]))))
- default:
- dbStatus = dbError
- }
- if dbStatus != dbSuccess {
- return fmt.Errorf("bind arg err %s", dbStatusTag[dbStatus])
- }
- }
-
- return nil
-}
-
-func (s *IntarkdbStmt) Exec(args []driver.Value) (driver.Result, error) {
- if err := s.bind(args); err != nil {
- return nil, err
- }
-
- result := C.intarkdb_init_result()
- dbStatus := (DBStatus)(C.intarkdb_execute_prepared(s.s, result))
-
- if dbStatus != dbSuccess {
- return nil, fmt.Errorf("intarkdb exec %s", C.GoString(C.intarkdb_result_msg(result)))
- }
-
- count := int64(C.intarkdb_row_count(result))
- C.intarkdb_destroy_result(result)
-
- return &IntarkdbResult{changes: count}, nil
-}
-
-func (s *IntarkdbStmt) Query(args []driver.Value) (driver.Rows, error) {
- if err := s.bind(args); err != nil {
- return nil, err
- }
-
- result := C.intarkdb_init_result()
- dbStatus := (DBStatus)(C.intarkdb_execute_prepared(s.s, result))
- if dbStatus != dbSuccess {
- return nil, fmt.Errorf("intarkdb exec %s", C.GoString(C.intarkdb_result_msg(result)))
- }
-
- colCount := int64(C.intarkdb_column_count(result))
-
- return &IntarkdbRows{result: result, colCount: colCount}, nil
-}
-
-func (rc *IntarkdbRows) Next(dest []driver.Value) error {
- if !bool(C.intarkdb_next_row(rc.result)) {
- return io.EOF
- }
- for i := range dest {
- value := C.GoString(C.intarkdb_column_value(rc.result, C.long(i)))
- ctype := C.intarkdb_column_type(rc.result, C.long(i))
- switch ctype {
- case C.GS_TYPE_VARCHAR:
- dest[i] = value
- case C.GS_TYPE_REAL, C.GS_TYPE_DECIMAL:
- dest[i], _ = strconv.ParseFloat(value, 64)
- case C.GS_TYPE_BOOLEAN:
- dest[i], _ = strconv.ParseBool(value)
- case C.GS_TYPE_INTEGER:
- dest[i], _ = strconv.ParseInt(value, 0, 32)
- case C.GS_TYPE_BIGINT:
- dest[i], _ = strconv.ParseInt(value, 0, 64)
- case C.GS_TYPE_UINT32:
- dest[i], _ = strconv.ParseUint(value, 0, 32)
- case C.GS_TYPE_UINT64:
- dest[i], _ = strconv.ParseUint(value, 0, 64)
- case C.GS_TYPE_TIMESTAMP:
- if timeVal, err := time.ParseInLocation(IntarkdbTimestampFormats[4], value, time.Local); err == nil {
- dest[i] = timeVal
- } else {
- // The column is a time value, so return the zero time on parse failure.
- dest[i] = time.Time{}
- }
- }
- // dest[i] = C.GoString(C.intarkdb_column_value(rc.result, C.long(i)))
- }
-
- return nil
-}
-
-func (rc *IntarkdbRows) declTypes() []string {
-
- return nil
-}
-
-func (rc *IntarkdbRows) Close() error {
- // rc.s.mu.Lock()
- // if rc.s.closed || rc.closed {
- if rc.closed {
- // rc.s.mu.Unlock()
- return nil
- }
- rc.closed = true
- C.intarkdb_destroy_result(rc.result)
- // rc.s.mu.Unlock()
- return nil
-}
-
-func (rc *IntarkdbRows) Columns() []string {
- // rc.s.mu.Lock()
- // defer rc.s.mu.Unlock()
- rc.cols = make([]string, rc.colCount)
- for i := 0; i < int(rc.colCount); i++ {
- rc.cols[i] = C.GoString(C.intarkdb_column_name(rc.result, C.long(i)))
- }
- return rc.cols
-}
-
-func (r *IntarkdbResult) LastInsertId() (int64, error) {
- return 0, nil
-}
-
-// RowsAffected return how many rows affected.
-func (r *IntarkdbResult) RowsAffected() (int64, error) {
- return r.changes, nil
-}
diff --git a/src/interface/go/gorm/sql-driver/intarkdb/intarkdb_go18.go b/src/interface/go/gorm/sql-driver/intarkdb/intarkdb_go18.go
deleted file mode 100644
index d6fc6ad1961d36bbabde5ab38fcab1abba9bdd9b..0000000000000000000000000000000000000000
--- a/src/interface/go/gorm/sql-driver/intarkdb/intarkdb_go18.go
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
-* Copyright (c) GBA-NCTI-ISDC. 2022-2024.
-* Copyright (c) 2014 Yasuhiro Matsumoto
-*
-* openGauss embedded is licensed under Mulan PSL v2.
-* You can use this software according to the terms and conditions of the Mulan PSL v2.
-* You may obtain a copy of Mulan PSL v2 at:
-*
-* http://license.coscl.org.cn/MulanPSL2
-*
-* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
-* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
-* MERCHANTABILITY OR FITFOR A PARTICULAR PURPOSE.
-* See the Mulan PSL v2 for more details.
-* -------------------------------------------------------------------------
-*
-* intarkdb_go18.go
-*
-* IDENTIFICATION
-* openGauss-embedded/src/interface/go/gorm/sql-driver/intarkdb/intarkdb_go18.go
-*
-* -------------------------------------------------------------------------
- */
-
-package intarkdb
-
-import (
- "database/sql/driver"
-
- "context"
-)
-
-// Ping implement Pinger.
-func (c *IntarkdbConn) Ping(ctx context.Context) error {
- if c.conn == nil {
- // must be ErrBadConn for sql to close the database
- return driver.ErrBadConn
- }
- return nil
-}
-
-// QueryContext implement QueryerContext.
-// func (c *IntarkdbConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
-// return c.query(ctx, query, args)
-// }
-
-// // ExecContext implement ExecerContext.
-// func (c *IntarkdbConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
-// return c.exec(ctx, query, args)
-// }
-
-// PrepareContext implement ConnPrepareContext.
-func (c *IntarkdbConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
- return c.prepare(ctx, query)
-}
-
-// BeginTx implement ConnBeginTx.
-func (c *IntarkdbConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
- return c.begin(ctx)
-}
-
-// QueryContext implement QueryerContext.
-// func (s *IntarkdbStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
-// return s.query(ctx, args)
-// }
-
-// // ExecContext implement ExecerContext.
-// func (s *IntarkdbStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
-// return s.exec(ctx, args)
-// }
diff --git a/src/interface/go/gorm/tests/count_test.go b/src/interface/go/gorm/tests/count_test.go
deleted file mode 100644
index 13391118e38ccee539b564b75152c46058cbfab4..0000000000000000000000000000000000000000
--- a/src/interface/go/gorm/tests/count_test.go
+++ /dev/null
@@ -1,191 +0,0 @@
-package tests_test
-
-import (
- "fmt"
- "regexp"
- "testing"
-
- "gorm.io/gorm"
-)
-
-func TestCountWithGroup(t *testing.T) {
- DB.Create([]Company{
- {Name: "company_count_group_a"},
- {Name: "company_count_group_a"},
- {Name: "company_count_group_a"},
- {Name: "company_count_group_b"},
- {Name: "company_count_group_c"},
- })
-
- var count1 int64
- if err := DB.Model(&Company{}).Where("name = ?", "company_count_group_a").Group("name").Count(&count1).Error; err != nil {
- t.Errorf(fmt.Sprintf("Count should work, but got err %v", err))
- }
- if count1 != 1 {
- t.Errorf("Count with group should be 1, but got count: %v", count1)
- }
-
- var count2 int64
- if err := DB.Debug().Model(&Company{}).Where("name in ?", []string{"company_count_group_b", "company_count_group_c"}).Group("name").Count(&count2).Error; err != nil {
- t.Errorf(fmt.Sprintf("Count should work, but got err %v", err))
- }
- if count2 != 2 {
- t.Errorf("Count with group should be 2, but got count: %v", count2)
- }
-}
-
-func TestCount(t *testing.T) {
- var (
- user1 = *GetUser("count-1", Config{})
- user2 = *GetUser("count-2", Config{})
- user3 = *GetUser("count-3", Config{})
- users []User
- count, count1, count2 int64
- )
-
- DB.Create(&user1)
- DB.Create(&user2)
- DB.Create(&user3)
-
- if err := DB.Where("name = ?", user1.Name).Or("name = ?", user3.Name).Find(&users).Count(&count).Error; err != nil {
- t.Errorf(fmt.Sprintf("Count should work, but got err %v", err))
- }
-
- if count != int64(len(users)) {
- t.Errorf("Count() method should get correct value, expect: %v, got %v", count, len(users))
- }
-
- if err := DB.Model(&User{}).Where("name = ?", user1.Name).Or("name = ?", user3.Name).Count(&count).Find(&users).Error; err != nil {
- t.Errorf(fmt.Sprintf("Count should work, but got err %v", err))
- }
-
- if count != int64(len(users)) {
- t.Errorf("Count() method should get correct value, expect: %v, got %v", count, len(users))
- }
-
- DB.Model(&User{}).Where("name = ?", user1.Name).Count(&count1).Or("name in ?", []string{user2.Name, user3.Name}).Count(&count2)
- if count1 != 1 || count2 != 3 {
- t.Errorf("multiple count in chain should works")
- }
-
- tx := DB.Model(&User{}).Where("name = ?", user1.Name).Session(&gorm.Session{})
- tx.Count(&count1)
- tx.Or("name in ?", []string{user2.Name, user3.Name}).Count(&count2)
- if count1 != 1 || count2 != 3 {
- t.Errorf("count after new session should works")
- }
-
- var count3 int64
- if err := DB.Model(&User{}).Where("name in ?", []string{user2.Name, user2.Name, user3.Name}).Group("id").Count(&count3).Error; err != nil {
- t.Errorf("Error happened when count with group, but got %v", err)
- }
-
- if count3 != 2 {
- t.Errorf("Should get correct count for count with group, but got %v", count3)
- }
-
- dryDB := DB.Session(&gorm.Session{DryRun: true})
- result := dryDB.Table("users").Select("name").Count(&count) // SELECT COUNT(name) FROM users;不带单引号
- if !regexp.MustCompile(`SELECT COUNT\(name\) FROM users`).MatchString(result.Statement.SQL.String()) {
- t.Fatalf("Build count with select, but got %v", result.Statement.SQL.String())
- }
-
- result = dryDB.Table("users").Distinct("name").Count(&count)
- if !regexp.MustCompile(`SELECT COUNT\(DISTINCT\(name\)\) FROM users`).MatchString(result.Statement.SQL.String()) {
- t.Fatalf("Build count with select, but got %v", result.Statement.SQL.String())
- }
-
- var count4 int64
- if err := DB.Table("users").Joins("LEFT JOIN companies on companies.name = users.name").Where("users.name = ?", user1.Name).Count(&count4).Error; err != nil || count4 != 1 {
- t.Errorf("count with join, got error: %v, count %v", err, count4)
- }
-
- var count5 int64
- if err := DB.Table("users").Where("users.name = ?", user1.Name).Order("name").Count(&count5).Error; err != nil || count5 != 1 {
- t.Errorf("count with join, got error: %v, count %v", err, count)
- }
-
- // not support
- // var count6 int64
- // if err := DB.Model(&User{}).Where("name in ?", []string{user1.Name, user2.Name, user3.Name}).Select(
- // "(CASE WHEN name=? THEN ? ELSE ? END) as name", "count-1", "main", "other",
- // ).Count(&count6).Find(&users).Error; err != nil || count6 != 3 {
- // t.Fatalf(fmt.Sprintf("Count should work, but got err %v", err))
- // }
-
- // expects := []User{{Name: "main"}, {Name: "other"}, {Name: "other"}}
- // sort.SliceStable(users, func(i, j int) bool {
- // return strings.Compare(users[i].Name, users[j].Name) < 0
- // })
-
- // AssertEqual(t, users, expects)
-
- // var count7 int64
- // if err := DB.Model(&User{}).Where("name in ?", []string{user1.Name, user2.Name, user3.Name}).Select(
- // "(CASE WHEN name=? THEN ? ELSE ? END) as name, age", "count-1", "main", "other",
- // ).Count(&count7).Find(&users).Error; err != nil || count7 != 3 {
- // t.Fatalf(fmt.Sprintf("Count should work, but got err %v", err))
- // }
-
- // expects = []User{{Name: "main", Age: 18}, {Name: "other", Age: 18}, {Name: "other", Age: 18}}
- // sort.SliceStable(users, func(i, j int) bool {
- // return strings.Compare(users[i].Name, users[j].Name) < 0
- // })
-
- // AssertEqual(t, users, expects)
-
- // var count8 int64
- // if err := DB.Model(&User{}).Where("name in ?", []string{user1.Name, user2.Name, user3.Name}).Select(
- // "(CASE WHEN age=18 THEN 1 ELSE 2 END) as age", "name",
- // ).Count(&count8).Find(&users).Error; err != nil || count8 != 3 {
- // t.Fatalf("Count should work, but got err %v", err)
- // }
-
- // expects = []User{{Name: "count-1", Age: 1}, {Name: "count-2", Age: 1}, {Name: "count-3", Age: 1}}
- // sort.SliceStable(users, func(i, j int) bool {
- // return strings.Compare(users[i].Name, users[j].Name) < 0
- // })
-
- // AssertEqual(t, users, expects)
-
- var count9 int64
- if err := DB.Scopes(func(tx *gorm.DB) *gorm.DB {
- return tx.Table("users")
- }).Where("name in ?", []string{user1.Name, user2.Name, user3.Name}).Count(&count9).Find(&users).Error; err != nil || count9 != 3 {
- t.Fatalf("Count should work, but got err %v", err)
- }
-
- var count10 int64
- if err := DB.Model(&User{}).Select("*").Where("name in ?", []string{user1.Name, user2.Name, user3.Name}).Count(&count10).Error; err != nil || count10 != 3 {
- t.Fatalf("Count should be 3, but got count: %v err %v", count10, err)
- }
-
- var count11 int64
- sameUsers := make([]*User, 0)
- for i := 0; i < 3; i++ {
- sameUsers = append(sameUsers, GetUser("count-4", Config{}))
- }
- DB.Create(sameUsers)
-
- if err := DB.Model(&User{}).Where("name = ?", "count-4").Group("name").Count(&count11).Error; err != nil || count11 != 1 {
- t.Fatalf("Count should be 1, but got count: %v err %v", count11, err)
- }
-
- // var count12 int64
- // if err := DB.Table("users").
- // Where("name in ?", []string{user1.Name, user2.Name, user3.Name}).
- // Preload("Toys", func(db *gorm.DB) *gorm.DB {
- // return db.Table("toys").Select("name")
- // }).Count(&count12).Error; err == nil {
- // t.Errorf("error should raise when using preload without schema")
- // }
-
- // var count13 int64
- // if err := DB.Model(User{}).
- // Where("name in ?", []string{user1.Name, user2.Name, user3.Name}).
- // Preload("Toys", func(db *gorm.DB) *gorm.DB {
- // return db.Table("toys").Select("name")
- // }).Count(&count13).Error; err != nil {
- // t.Errorf("no error should raise when using count with preload, but got %v", err)
- // }
-}
diff --git a/src/interface/go/gorm/tests/create_test.go b/src/interface/go/gorm/tests/create_test.go
deleted file mode 100644
index 75458c501c813997c8fef68e74e078eed154404a..0000000000000000000000000000000000000000
--- a/src/interface/go/gorm/tests/create_test.go
+++ /dev/null
@@ -1,322 +0,0 @@
-package tests_test
-
-import (
- "errors"
- "testing"
-
- "gorm.io/gorm"
- "gorm.io/gorm/clause"
-)
-
-func TestCreate(t *testing.T) {
- user := *GetUser("create", Config{})
-
- if results := DB.Create(&user); results.Error != nil {
- t.Fatalf("errors happened when create: %v", results.Error)
- } else if results.RowsAffected != 1 {
- t.Fatalf("rows affected expects: %v, got %v", 1, results.RowsAffected)
- }
-
- if user.ID == 0 {
- t.Errorf("user's primary key should has value after create, got : %v", user.ID)
- }
-
- var newUser User
- if err := DB.Where("id = ?", user.ID).First(&newUser).Error; err != nil {
- t.Fatalf("errors happened when query: %v", err)
- } else {
- CheckUser(t, newUser, user)
- }
-}
-
-func TestCreateInBatches(t *testing.T) {
- users := []User{
- *GetUser("create_in_batches_1", Config{}),
- *GetUser("create_in_batches_2", Config{}),
- *GetUser("create_in_batches_3", Config{}),
- *GetUser("create_in_batches_4", Config{}),
- *GetUser("create_in_batches_5", Config{}),
- *GetUser("create_in_batches_6", Config{}),
- }
-
- result := DB.CreateInBatches(&users, 2)
- if result.RowsAffected != int64(len(users)) {
- t.Errorf("affected rows should be %v, but got %v", len(users), result.RowsAffected)
- }
-
- for _, user := range users {
- if user.ID == 0 {
- t.Fatalf("failed to fill user's ID, got %v", user.ID)
- } else {
- var newUser User
- if err := DB.Where("id = ?", user.ID).Preload(clause.Associations).First(&newUser).Error; err != nil {
- t.Fatalf("errors happened when query: %v", err)
- } else {
- CheckUser(t, newUser, user)
- }
- }
- }
-}
-
-func TestCreateInBatchesWithDefaultSize(t *testing.T) {
- users := []User{
- *GetUser("create_with_default_batch_size_1", Config{}),
- *GetUser("create_with_default_batch_sizs_2", Config{}),
- *GetUser("create_with_default_batch_sizs_3", Config{}),
- *GetUser("create_with_default_batch_sizs_4", Config{}),
- *GetUser("create_with_default_batch_sizs_5", Config{}),
- *GetUser("create_with_default_batch_sizs_6", Config{}),
- }
-
- result := DB.Session(&gorm.Session{CreateBatchSize: 2}).Create(&users)
- if result.RowsAffected != int64(len(users)) {
- t.Errorf("affected rows should be %v, but got %v", len(users), result.RowsAffected)
- }
-
- for _, user := range users {
- if user.ID == 0 {
- t.Fatalf("failed to fill user's ID, got %v", user.ID)
- } else {
- var newUser User
- if err := DB.Where("id = ?", user.ID).Preload(clause.Associations).First(&newUser).Error; err != nil {
- t.Fatalf("errors happened when query: %v", err)
- } else {
- CheckUser(t, newUser, user)
- }
- }
- }
-}
-
-func TestCreateFromMap(t *testing.T) {
- if err := DB.Model(&User{}).Create(map[string]interface{}{"Name": "create_from_map", "Age": 18}).Error; err != nil {
- t.Fatalf("failed to create data from map, got error: %v", err)
- }
-
- var result User
- if err := DB.Where("name = ?", "create_from_map").First(&result).Error; err != nil || result.Age != 18 {
- t.Fatalf("failed to create from map, got error %v", err)
- }
-
- if err := DB.Model(&User{}).Create(map[string]interface{}{"name": "create_from_map_1", "age": 18}).Error; err != nil {
- t.Fatalf("failed to create data from map, got error: %v", err)
- }
-
- var result1 User
- if err := DB.Where("name = ?", "create_from_map_1").First(&result1).Error; err != nil || result1.Age != 18 {
- t.Fatalf("failed to create from map, got error %v", err)
- }
-
- datas := []map[string]interface{}{
- {"Name": "create_from_map_2", "Age": 19},
- {"name": "create_from_map_3", "Age": 20},
- }
-
- if err := DB.Model(&User{}).Create(&datas).Error; err != nil {
- t.Fatalf("failed to create data from slice of map, got error: %v", err)
- }
-
- var result2 User
- if err := DB.Where("name = ?", "create_from_map_2").First(&result2).Error; err != nil || result2.Age != 19 {
- t.Fatalf("failed to query data after create from slice of map, got error %v", err)
- }
-
- var result3 User
- if err := DB.Where("name = ?", "create_from_map_3").First(&result3).Error; err != nil || result3.Age != 20 {
- t.Fatalf("failed to query data after create from slice of map, got error %v", err)
- }
-
- syncID()
-}
-
-// not support
-// 1. uint64 Cannot create index on column with datatype uint64
-// 2. INSERT INTO empty_structs DEFAULT VALUES; DEFAULT VALUES clause is not supported
-// func TestCreateEmptyStruct(t *testing.T) {
-// type EmptyStruct struct {
-// ID uint
-// }
-// DB.Migrator().DropTable(&EmptyStruct{})
-
-// if err := DB.AutoMigrate(&EmptyStruct{}); err != nil {
-// t.Errorf("no error should happen when auto migrate, but got %v", err)
-// }
-
-// if err := DB.Create(&EmptyStruct{}).Error; err != nil {
-// t.Errorf("No error should happen when creating user, but got %v", err)
-// }
-// }
-
-func TestCreateEmptySlice(t *testing.T) {
- data := []User{}
- if err := DB.Create(&data).Error; err != gorm.ErrEmptySlice {
- t.Errorf("no data should be created, got %v", err)
- }
-
- sliceMap := []map[string]interface{}{}
- if err := DB.Model(&User{}).Create(&sliceMap).Error; err != gorm.ErrEmptySlice {
- t.Errorf("no data should be created, got %v", err)
- }
-}
-
-func TestCreateInvalidSlice(t *testing.T) {
- users := []*User{
- GetUser("invalid_slice_1", Config{}),
- GetUser("invalid_slice_2", Config{}),
- nil,
- }
-
- if err := DB.Create(&users).Error; !errors.Is(err, gorm.ErrInvalidData) {
- t.Errorf("should returns error invalid data when creating from slice that contains invalid data")
- }
-}
-
-func TestCreateWithNoGORMPrimaryKey(t *testing.T) {
- type JoinTable struct {
- UserID uint32
- FriendID uint32
- }
-
- DB.Migrator().DropTable(&JoinTable{})
- if err := DB.AutoMigrate(&JoinTable{}); err != nil {
- t.Errorf("no error should happen when auto migrate, but got %v", err)
- }
-
- jt := JoinTable{UserID: 1, FriendID: 2}
- err := DB.Create(&jt).Error
- if err != nil {
- t.Errorf("No error should happen when create a record without a GORM primary key. But in the database this primary key exists and is the union of 2 or more fields\n But got: %s", err)
- }
-}
-
-func TestSelectWithCreate(t *testing.T) {
- user := *GetUser("select_create", Config{})
- DB.Select("ID", "Name", "Age", "Birthday").Create(&user)
-
- var user2 User
- DB.First(&user2, user.ID)
-
- CheckUser(t, user2, user)
-}
-
-func TestFirstOrCreateNotExistsTable(t *testing.T) {
- company := Company{Name: "first_or_create_if_not_exists_table"}
- if err := DB.Table("not_exists").FirstOrCreate(&company).Error; err == nil {
- t.Errorf("not exists table, but err is nil")
- }
-}
-
-func TestFirstOrCreateWithPrimaryKey(t *testing.T) {
- company := Company{ID: 100, Name: "company100_with_primarykey"}
- DB.FirstOrCreate(&company)
-
- if company.ID != 100 {
- t.Errorf("invalid primary key after creating, got %v", company.ID)
- }
-
- companies := []Company{
- {ID: 101, Name: "company101_with_primarykey"},
- {ID: 102, Name: "company102_with_primarykey"},
- }
- DB.Create(&companies)
-
- if companies[0].ID != 101 || companies[1].ID != 102 {
- t.Errorf("invalid primary key after creating, got %v, %v", companies[0].ID, companies[1].ID)
- }
-}
-
-func TestCreateNilPointer(t *testing.T) {
- var user *User
-
- err := DB.Create(user).Error
- if err == nil || err != gorm.ErrInvalidValue {
- t.Fatalf("it is not ErrInvalidValue")
- }
-}
-
-func TestFirstOrCreateRowsAffected(t *testing.T) {
- user := User{Name: "TestFirstOrCreateRowsAffected"}
-
- res := DB.FirstOrCreate(&user, "name = ?", user.Name)
- if res.Error != nil || res.RowsAffected != 1 {
- t.Fatalf("first or create rows affect err:%v rows:%d", res.Error, res.RowsAffected)
- }
-
- res = DB.FirstOrCreate(&user, "name = ?", user.Name)
- if res.Error != nil || res.RowsAffected != 0 {
- t.Fatalf("first or create rows affect err:%v rows:%d", res.Error, res.RowsAffected)
- }
-
- syncID()
-}
-
-func TestCreateWithAutoIncrementCompositeKey(t *testing.T) {
- type CompositeKeyProduct struct {
- ProductID int `gorm:"primaryKey;autoIncrement:true;"` // primary key
- LanguageCode int `gorm:"primaryKey;"` // primary key
- Code string
- Name string
- }
-
- if err := DB.Migrator().DropTable(&CompositeKeyProduct{}); err != nil {
- t.Fatalf("failed to migrate, got error %v", err)
- }
- if err := DB.AutoMigrate(&CompositeKeyProduct{}); err != nil {
- t.Fatalf("failed to migrate, got error %v", err)
- }
-
- prod := &CompositeKeyProduct{
- LanguageCode: 56,
- Code: "Code56",
- Name: "ProductName56",
- }
- if err := DB.Create(&prod).Error; err != nil {
- t.Fatalf("failed to create, got error %v", err)
- }
-
- newProd := &CompositeKeyProduct{}
- if err := DB.First(&newProd).Error; err != nil {
- t.Fatalf("errors happened when query: %v", err)
- } else {
- prod.ProductID = newProd.ProductID
- AssertObjEqual(t, newProd, prod, "ProductID", "LanguageCode", "Code", "Name")
- }
-}
-
-// not support
-// 1. default string 在gorm中默认双引号,目前我们只能单引号
-// 2. OnConflict 目前还不支持
-// func TestCreateOnConfilctWithDefalutNull(t *testing.T) {
-// type OnConfilctUser struct {
-// ID string
-// Name string `gorm:"default:null"`
-// Email string
-// Mobile string `gorm:"default:'133xxxx'"`
-// }
-
-// err := DB.Migrator().DropTable(&OnConfilctUser{})
-// AssertEqual(t, err, nil)
-// err = DB.AutoMigrate(&OnConfilctUser{})
-// AssertEqual(t, err, nil)
-
-// u := OnConfilctUser{
-// ID: "on-confilct-user-id",
-// Name: "on-confilct-user-name",
-// Email: "on-confilct-user-email",
-// Mobile: "on-confilct-user-mobile",
-// }
-// err = DB.Create(&u).Error
-// AssertEqual(t, err, nil)
-
-// u.Name = "on-confilct-user-name-2"
-// u.Email = "on-confilct-user-email-2"
-// u.Mobile = ""
-// err = DB.Clauses(clause.OnConflict{UpdateAll: true}).Create(&u).Error
-// AssertEqual(t, err, nil)
-
-// var u2 OnConfilctUser
-// err = DB.Where("id = ?", u.ID).First(&u2).Error
-// AssertEqual(t, err, nil)
-// AssertEqual(t, u2.Name, "on-confilct-user-name-2")
-// AssertEqual(t, u2.Email, "on-confilct-user-email-2")
-// AssertEqual(t, u2.Mobile, "133xxxx")
-// }
diff --git a/src/interface/go/gorm/tests/delete_test.go b/src/interface/go/gorm/tests/delete_test.go
deleted file mode 100644
index b19e84f7dcc53e0c200b7ef9a2261cc0e2ac242d..0000000000000000000000000000000000000000
--- a/src/interface/go/gorm/tests/delete_test.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package tests_test
-
-import (
- "errors"
- "testing"
-
- "gorm.io/gorm"
- "gorm.io/gorm/clause"
-)
-
-func TestDelete(t *testing.T) {
- users := []User{*GetUser("delete", Config{}), *GetUser("delete", Config{}), *GetUser("delete", Config{})}
-
- if err := DB.Create(&users).Error; err != nil {
- t.Errorf("errors happened when create: %v", err)
- }
-
- for _, user := range users {
- if user.ID == 0 {
- t.Fatalf("user's primary key should has value after create, got : %v", user.ID)
- }
- }
-
- if res := DB.Delete(&users[1]); res.Error != nil || res.RowsAffected != 1 {
- t.Errorf("errors happened when delete: %v, affected: %v", res.Error, res.RowsAffected)
- }
-
- var result User
- if err := DB.Where("id = ?", users[1].ID).First(&result).Error; err == nil || !errors.Is(err, gorm.ErrRecordNotFound) {
- t.Errorf("should returns record not found error, but got %v", err)
- }
-
- for _, user := range []User{users[0], users[2]} {
- result = User{}
- if err := DB.Where("id = ?", user.ID).First(&result).Error; err != nil {
- t.Errorf("no error should returns when query %v, but got %v", user.ID, err)
- }
- }
-
- for _, user := range []User{users[0], users[2]} {
- result = User{}
- if err := DB.Where("id = ?", user.ID).First(&result).Error; err != nil {
- t.Errorf("no error should returns when query %v, but got %v", user.ID, err)
- }
- }
-
- if err := DB.Delete(&users[0]).Error; err != nil {
- t.Errorf("errors happened when delete: %v", err)
- }
-
- if err := DB.Delete(&User{}).Error; err != gorm.ErrMissingWhereClause {
- t.Errorf("errors happened when delete: %v", err)
- }
-
- if err := DB.Where("id = ?", users[0].ID).First(&result).Error; err == nil || !errors.Is(err, gorm.ErrRecordNotFound) {
- t.Errorf("should returns record not found error, but got %v", err)
- }
-}
-
-func TestDeleteWithTable(t *testing.T) {
- type UserWithDelete struct {
- ID int
- Name string
- }
-
- DB.Table("deleted_users").Migrator().DropTable(UserWithDelete{})
- DB.Table("deleted_users").AutoMigrate(UserWithDelete{})
-
- user := UserWithDelete{Name: "delete1"}
- DB.Table("deleted_users").Create(&user)
-
- var result UserWithDelete
- if err := DB.Table("deleted_users").First(&result).Error; err != nil {
- t.Errorf("failed to find deleted user, got error %v", err)
- }
-
- user.ID = result.ID
- AssertEqual(t, result, user)
-
- if err := DB.Table("deleted_users").Delete(&result).Error; err != nil {
- t.Errorf("failed to delete user, got error %v", err)
- }
-
- var result2 UserWithDelete
- if err := DB.Table("deleted_users").First(&result2, user.ID).Error; !errors.Is(err, gorm.ErrRecordNotFound) {
- t.Errorf("should raise record not found error, but got error %v", err)
- }
-}
-
-func TestInlineCondDelete(t *testing.T) {
- user1 := *GetUser("inline_delete_1", Config{})
- user2 := *GetUser("inline_delete_2", Config{})
- DB.Save(&user1).Save(&user2)
-
- if DB.Delete(&User{}, user1.ID).Error != nil {
- t.Errorf("No error should happen when delete a record")
- } else if err := DB.Where("name = ?", user1.Name).First(&User{}).Error; !errors.Is(err, gorm.ErrRecordNotFound) {
- t.Errorf("User can't be found after delete")
- }
-
- if err := DB.Delete(&User{}, "name = ?", user2.Name).Error; err != nil {
- t.Errorf("No error should happen when delete a record, err=%s", err)
- } else if err := DB.Where("name = ?", user2.Name).First(&User{}).Error; !errors.Is(err, gorm.ErrRecordNotFound) {
- t.Errorf("User can't be found after delete")
- }
-}
-
-func TestBlockGlobalDelete(t *testing.T) {
- if err := DB.Delete(&User{}).Error; err == nil || !errors.Is(err, gorm.ErrMissingWhereClause) {
- t.Errorf("should returns missing WHERE clause while deleting error")
- }
-
- if err := DB.Session(&gorm.Session{AllowGlobalUpdate: true}).Delete(&User{}).Error; err != nil {
- t.Errorf("should returns no error while enable global update, but got err %v", err)
- }
-}
-
-func TestDeleteReturning(t *testing.T) {
- companies := []Company{
- {Name: "delete-returning-1"},
- {Name: "delete-returning-2"},
- {Name: "delete-returning-3"},
- }
- DB.Create(&companies)
-
- var results []Company
- DB.Where("name IN ?", []string{companies[0].Name, companies[1].Name}).Clauses(clause.Returning{}).Delete(&results)
- // if len(results) != 2 {
- // t.Errorf("failed to return delete data, got %v", results)
- // }
-
- var count int64
- DB.Model(&Company{}).Where("name IN ?", []string{companies[0].Name, companies[1].Name, companies[2].Name}).Count(&count)
- if count != 1 {
- t.Errorf("failed to delete data, current count %v", count)
- }
-}
diff --git a/src/interface/go/gorm/tests/distinct_test.go b/src/interface/go/gorm/tests/distinct_test.go
deleted file mode 100644
index 80debd0b25d1873a562652281fb05d8012e647e3..0000000000000000000000000000000000000000
--- a/src/interface/go/gorm/tests/distinct_test.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package tests_test
-
-import (
- "testing"
-
- "gorm.io/gorm"
-)
-
-func TestDistinct(t *testing.T) {
- users := []User{
- *GetUser("distinct", Config{}),
- *GetUser("distinct", Config{}),
- *GetUser("distinct", Config{}),
- *GetUser("distinct-2", Config{}),
- *GetUser("distinct-3", Config{}),
- }
- users[0].Age = 20
-
- if err := DB.Create(&users).Error; err != nil {
- t.Fatalf("errors happened when create users: %v", err)
- }
-
- var names []string
- DB.Table("users").Where("name like ?", "distinct%").Order("name").Pluck("name", &names)
- AssertEqual(t, names, []string{"distinct", "distinct", "distinct", "distinct-2", "distinct-3"})
-
- var names1 []string
- DB.Model(&User{}).Where("name like ?", "distinct%").Distinct().Order("name").Pluck("Name", &names1)
-
- AssertEqual(t, names1, []string{"distinct", "distinct-2", "distinct-3"})
-
- var names2 []string
- DB.Scopes(func(db *gorm.DB) *gorm.DB {
- return db.Table("users")
- }).Where("name like ?", "distinct%").Order("name").Pluck("name", &names2)
- AssertEqual(t, names2, []string{"distinct", "distinct", "distinct", "distinct-2", "distinct-3"})
-
- var results []User
- if err := DB.Distinct("name", "age").Where("name like ?", "distinct%").Order("name, age desc").Find(&results).Error; err != nil {
- t.Errorf("failed to query users, got error: %v", err)
- }
-
- expects := []User{
- {Name: "distinct", Age: 20},
- {Name: "distinct", Age: 18},
- {Name: "distinct-2", Age: 18},
- {Name: "distinct-3", Age: 18},
- }
-
- if len(results) != 4 {
- t.Fatalf("invalid results length found, expects: %v, got %v", len(expects), len(results))
- }
-
- for idx, expect := range expects {
- AssertObjEqual(t, results[idx], expect, "Name", "Age")
- }
-
- var count int64
- if err := DB.Model(&User{}).Where("name like ?", "distinct%").Count(&count).Error; err != nil || count != 5 {
- t.Errorf("failed to query users count, got error: %v, count: %v", err, count)
- }
-
- if err := DB.Model(&User{}).Distinct("name").Where("name like ?", "distinct%").Count(&count).Error; err != nil || count != 3 {
- t.Errorf("failed to query users count, got error: %v, count %v", err, count)
- }
-
- // dryDB := DB.Session(&gorm.Session{DryRun: true})
- // r := dryDB.Distinct("u.id, u.*").Table("user_speaks as s").Joins("inner join users as u on u.id = s.user_id").Where("s.language_code ='US' or s.language_code ='ES'").Find(&User{})
- // if !regexp.MustCompile(`SELECT DISTINCT u\.id, u\.\* FROM user_speaks as s inner join users as u`).MatchString(r.Statement.SQL.String()) {
- // t.Fatalf("Build Distinct with u.*, but got %v", r.Statement.SQL.String())
- // }
-}
diff --git a/src/interface/go/gorm/tests/group_by_test.go b/src/interface/go/gorm/tests/group_by_test.go
deleted file mode 100644
index 50a54e3843265666feddf1dd9c7186aebc65e85c..0000000000000000000000000000000000000000
--- a/src/interface/go/gorm/tests/group_by_test.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package tests_test
-
-import "testing"
-
-func TestGroupBy(t *testing.T) {
- users := []User{
- *GetUser("groupby", Config{}),
- *GetUser("groupby", Config{}),
- *GetUser("groupby", Config{}),
- *GetUser("groupby1", Config{}),
- *GetUser("groupby1", Config{}),
- *GetUser("groupby1", Config{}),
- }
- users[0].Age = 10
- users[0].Active = true
- users[1].Age = 20
- users[2].Age = 30
- users[2].Active = true
-
- users[3].Age = 110
- users[4].Age = 220
- users[4].Active = true
- users[5].Age = 330
- users[5].Active = true
-
- if err := DB.Create(&users).Error; err != nil {
- t.Errorf("errors happened when create: %v", err)
- }
-
- var name string
- var total int
- if err := DB.Model(&User{}).Select("name, sum(age)").Where("name = ?", "groupby").Group("name").Row().Scan(&name, &total); err != nil {
- t.Errorf("no error should happen, but got %v", err)
- }
-
- if name != "groupby" || total != 60 {
- t.Errorf("name should be groupby, but got %v, total should be 60, but got %v", name, total)
- }
-
- if err := DB.Model(&User{}).Select("name, sum(age)").Where("name = ?", "groupby").Group("users.name").Row().Scan(&name, &total); err != nil {
- t.Errorf("no error should happen, but got %v", err)
- }
-
- if name != "groupby" || total != 60 {
- t.Errorf("name should be groupby, but got %v, total should be 60, but got %v", name, total)
- }
-
- if err := DB.Model(&User{}).Select("name, sum(age) as total").Where("name LIKE ?", "groupby%").Group("name").Having("name = ?", "groupby1").Row().Scan(&name, &total); err != nil {
- t.Errorf("no error should happen, but got %v", err)
- }
-
- if name != "groupby1" || total != 660 {
- t.Errorf("name should be groupby, but got %v, total should be 660, but got %v", name, total)
- }
-
- result := struct {
- Name string
- Total int64
- }{}
-
- if err := DB.Model(&User{}).Select("name, sum(age) as total").Where("name LIKE ?", "groupby%").Group("name").Having("name = ?", "groupby1").Find(&result).Error; err != nil {
- t.Errorf("no error should happen, but got %v", err)
- }
-
- if result.Name != "groupby1" || result.Total != 660 {
- t.Errorf("name should be groupby, total should be 660, but got %+v", result)
- }
-
- if err := DB.Model(&User{}).Select("name, sum(age) as total").Where("name LIKE ?", "groupby%").Group("name").Having("name = ?", "groupby1").Scan(&result).Error; err != nil {
- t.Errorf("no error should happen, but got %v", err)
- }
-
- if result.Name != "groupby1" || result.Total != 660 {
- t.Errorf("name should be groupby, total should be 660, but got %+v", result)
- }
-
- var active bool
- if err := DB.Model(&User{}).Select("name, active, sum(age)").Where("name = ? and active = ?", "groupby", true).Group("name").Group("active").Row().Scan(&name, &active, &total); err != nil {
- t.Errorf("no error should happen, but got %v", err)
- }
-
- if name != "groupby" || active != true || total != 40 {
- t.Errorf("group by two columns, name %v, age %v, active: %v", name, total, active)
- }
-}
diff --git a/src/interface/go/gorm/tests/helper_test.go b/src/interface/go/gorm/tests/helper_test.go
deleted file mode 100644
index c3295a52338285a8a0f97605350f70f2ecce6bef..0000000000000000000000000000000000000000
--- a/src/interface/go/gorm/tests/helper_test.go
+++ /dev/null
@@ -1,220 +0,0 @@
-package tests_test
-
-import (
- "database/sql/driver"
- "fmt"
- "go/ast"
- "path/filepath"
- "reflect"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "testing"
- "time"
-
- "gorm.io/gorm"
-)
-
-var gormSourceDir string
-
-func init() {
- _, file, _, _ := runtime.Caller(0)
- // compatible solution to get gorm source directory with various operating systems
- gormSourceDir = sourceDir(file)
-}
-
-func sourceDir(file string) string {
- dir := filepath.Dir(file)
- dir = filepath.Dir(dir)
-
- s := filepath.Dir(dir)
- if filepath.Base(s) != "gorm.io" {
- s = dir
- }
- return filepath.ToSlash(s) + "/"
-}
-
-type Config struct {
- Account bool
- Pets int
- Toys int
- Company bool
- Manager bool
- Team int
- Languages int
- Friends int
- NamedPet bool
-}
-
-var UserID int = 1
-var UserIDMutex sync.Mutex
-
-func GetUser(name string, config Config) *User {
- birthday := time.Now().Round(time.Second)
- user := User{
- Name: name,
- Age: 18,
- Birthday: birthday,
- }
-
- UserIDMutex.Lock()
- user.ID = UserID
- UserID++
- UserIDMutex.Unlock()
-
- return &user
-}
-
-func syncID() {
- var id int
- DB.Model(&User{}).Select("id").Order("id desc").Limit(1).Find(&id)
-
- UserIDMutex.Lock()
- UserID = id + 1
- UserIDMutex.Unlock()
-}
-
-func CheckUser(t *testing.T, user User, expect User) {
- if !assertEqual(user, expect) {
- t.Errorf("user: expect: %+v, got %+v", expect, user)
- return
- }
-}
-
-func assertEqual(src, dst interface{}) bool {
- return reflect.DeepEqual(src, dst)
-}
-
-func db(unscoped bool) *gorm.DB {
- if unscoped {
- return DB.Unscoped()
- } else {
- return DB
- }
-}
-
-// FileWithLineNum return the file name and line number of the current file
-func FileWithLineNum() string {
- // the second caller usually from gorm internal, so set i start from 2
- for i := 2; i < 15; i++ {
- _, file, line, ok := runtime.Caller(i)
- if ok && (!strings.HasPrefix(file, gormSourceDir) || strings.HasSuffix(file, "_test.go")) {
- return file + ":" + strconv.FormatInt(int64(line), 10)
- }
- }
-
- return ""
-}
-
-func AssertObjEqual(t *testing.T, r, e interface{}, names ...string) {
- rv := reflect.Indirect(reflect.ValueOf(r))
- ev := reflect.Indirect(reflect.ValueOf(e))
- if rv.IsValid() != ev.IsValid() {
- t.Errorf("%v: expect: %+v, got %+v", FileWithLineNum(), r, e)
- return
- }
-
- for _, name := range names {
- got := rv.FieldByName(name).Interface()
- expect := ev.FieldByName(name).Interface()
- t.Run(name, func(t *testing.T) {
- AssertEqual(t, got, expect)
- })
- }
-}
-
-func AssertEqual(t *testing.T, got, expect interface{}) {
- if !reflect.DeepEqual(got, expect) {
- isEqual := func() {
- if curTime, ok := got.(time.Time); ok {
- format := "2006-01-02T15:04:05Z07:00"
-
- if curTime.Round(time.Second).UTC().Format(format) != expect.(time.Time).Round(time.Second).UTC().Format(format) && curTime.Truncate(time.Second).UTC().Format(format) != expect.(time.Time).Truncate(time.Second).UTC().Format(format) {
- t.Errorf("%v: expect: %v, got %v after time round", FileWithLineNum(), expect.(time.Time), curTime)
- }
- } else if fmt.Sprint(got) != fmt.Sprint(expect) {
- t.Errorf("%v: expect: %#v, got %#v", FileWithLineNum(), expect, got)
- }
- }
-
- if fmt.Sprint(got) == fmt.Sprint(expect) {
- return
- }
-
- if reflect.Indirect(reflect.ValueOf(got)).IsValid() != reflect.Indirect(reflect.ValueOf(expect)).IsValid() {
- t.Errorf("%v: expect: %+v, got %+v", FileWithLineNum(), expect, got)
- return
- }
-
- if valuer, ok := got.(driver.Valuer); ok {
- got, _ = valuer.Value()
- }
-
- if valuer, ok := expect.(driver.Valuer); ok {
- expect, _ = valuer.Value()
- }
-
- if got != nil {
- got = reflect.Indirect(reflect.ValueOf(got)).Interface()
- }
-
- if expect != nil {
- expect = reflect.Indirect(reflect.ValueOf(expect)).Interface()
- }
-
- if reflect.ValueOf(got).IsValid() != reflect.ValueOf(expect).IsValid() {
- t.Errorf("%v: expect: %+v, got %+v", FileWithLineNum(), expect, got)
- return
- }
-
- if reflect.ValueOf(got).Kind() == reflect.Slice {
- if reflect.ValueOf(expect).Kind() == reflect.Slice {
- if reflect.ValueOf(got).Len() == reflect.ValueOf(expect).Len() {
- for i := 0; i < reflect.ValueOf(got).Len(); i++ {
- name := fmt.Sprintf(reflect.ValueOf(got).Type().Name()+" #%v", i)
- t.Run(name, func(t *testing.T) {
- AssertEqual(t, reflect.ValueOf(got).Index(i).Interface(), reflect.ValueOf(expect).Index(i).Interface())
- })
- }
- } else {
- name := reflect.ValueOf(got).Type().Elem().Name()
- t.Errorf("%v expects length: %v, got %v (expects: %+v, got %+v)", name, reflect.ValueOf(expect).Len(), reflect.ValueOf(got).Len(), expect, got)
- }
- return
- }
- }
-
- if reflect.ValueOf(got).Kind() == reflect.Struct {
- if reflect.ValueOf(expect).Kind() == reflect.Struct {
- if reflect.ValueOf(got).NumField() == reflect.ValueOf(expect).NumField() {
- exported := false
- for i := 0; i < reflect.ValueOf(got).NumField(); i++ {
- if fieldStruct := reflect.ValueOf(got).Type().Field(i); ast.IsExported(fieldStruct.Name) {
- exported = true
- field := reflect.ValueOf(got).Field(i)
- t.Run(fieldStruct.Name, func(t *testing.T) {
- AssertEqual(t, field.Interface(), reflect.ValueOf(expect).Field(i).Interface())
- })
- }
- }
-
- if exported {
- return
- }
- }
- }
- }
-
- if reflect.ValueOf(got).Type().ConvertibleTo(reflect.ValueOf(expect).Type()) {
- got = reflect.ValueOf(got).Convert(reflect.ValueOf(expect).Type()).Interface()
- isEqual()
- } else if reflect.ValueOf(expect).Type().ConvertibleTo(reflect.ValueOf(got).Type()) {
- expect = reflect.ValueOf(got).Convert(reflect.ValueOf(got).Type()).Interface()
- isEqual()
- } else {
- t.Errorf("%v: expect: %+v, got %+v", FileWithLineNum(), expect, got)
- return
- }
- }
-}
diff --git a/src/interface/go/gorm/tests/join_test.go b/src/interface/go/gorm/tests/join_test.go
deleted file mode 100644
index a00f7a72e85cf30b85a733323af025bd93a33f7d..0000000000000000000000000000000000000000
--- a/src/interface/go/gorm/tests/join_test.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package tests_test
-
-import "testing"
-
-func TestJoins(t *testing.T) {
- // user := *GetUser("joins-1", Config{Company: true, Manager: true, Account: true, NamedPet: false})
-
- // DB.Create(&user)
-
- // var user2 User
- // if err := DB.Joins("NamedPet").Joins("Company").Joins("Manager").Joins("Account").First(&user2, "users.name = ?", user.Name).Error; err != nil {
- // t.Fatalf("Failed to load with joins, got error: %v", err)
- // }
-
- // CheckUser(t, user2, user)
-}
diff --git a/src/interface/go/gorm/tests/model_test.go b/src/interface/go/gorm/tests/model_test.go
deleted file mode 100644
index d059a9fb93b0e256e1e672b6c8b1f357864c5af3..0000000000000000000000000000000000000000
--- a/src/interface/go/gorm/tests/model_test.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package tests_test
-
-import (
- "database/sql"
- "time"
-)
-
-type Model struct {
- ID int `gorm:"primarykey"`
- CreatedAt time.Time
- UpdatedAt time.Time
- DeletedAt sql.NullTime `gorm:"index"`
-}
-
-type User struct {
- ID int `gorm:"primarykey"`
- // gorm.Model
- Name string
- Age uint
- Birthday time.Time
- Active bool
-}
-
-type Account struct {
- Model
- UserID sql.NullInt64
- Number string
-}
-
-type Pet struct {
- Model
- UserID *uint
- Name string
-}
-
-type Toy struct {
- Model
- Name string
- OwnerID string
- OwnerType string
-}
-
-type Company struct {
- ID int
- Name string
-}
-
-type Language struct {
- Code string `gorm:"primarykey"`
- Name string
-}
-
-type Coupon struct {
- ID int `gorm:"primarykey; size:255"`
- AppliesToProduct []*CouponProduct `gorm:"foreignKey:CouponId;constraint:OnDelete:CASCADE"`
- AmountOff uint32 `gorm:"column:amount_off"`
- PercentOff float32 `gorm:"column:percent_off"`
-}
-
-type CouponProduct struct {
- CouponId int `gorm:"primarykey;size:255"`
- ProductId string `gorm:"primarykey;size:255"`
- Desc string
-}
-
-type Order struct {
- Model
- Num string
- Coupon *Coupon
- CouponID string
-}
-
-type Parent struct {
- Model
- FavChildID uint
- FavChild *Child
- Children []*Child
-}
-
-type Child struct {
- Model
- Name string
- ParentID *uint
-}
diff --git a/src/interface/go/gorm/tests/tests_test.go b/src/interface/go/gorm/tests/tests_test.go
deleted file mode 100644
index bfc43443ccccef5148d20ed328bb89b576097374..0000000000000000000000000000000000000000
--- a/src/interface/go/gorm/tests/tests_test.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package tests_test
-
-import (
- "flag"
- "go-api/orm-driver/intarkdb"
- "log"
- "os"
- "testing"
-
- "gorm.io/driver/sqlite"
- "gorm.io/gorm"
- "gorm.io/gorm/logger"
-)
-
-var (
- DB *gorm.DB
- dsn string
- dbName string
- logLevel int
-)
-
-func init() {
- if flag.Parsed() {
- return
- }
-
- flag.StringVar(&dsn, "dsn", ".", "dsn param")
- flag.StringVar(&dbName, "dbName", "intarkdb", "db name : intarkdb, sqlite")
- flag.IntVar(&logLevel, "logLevel", 4, "logLevel 1:Silent, 2:Error, 3:Warn, 4:Info")
-}
-
-func TestMain(m *testing.M) {
- flag.Parse()
-
- var err error
- DB, err = OpenTestConnection(&gorm.Config{})
- if err != nil {
- log.Printf("failed to connect database, got error %v", err)
- os.Exit(1)
- }
-
- sqlDB, err := DB.DB()
- if err != nil {
- log.Printf("failed to connect database, got error %v", err)
- os.Exit(1)
- }
-
- err = sqlDB.Ping()
- if err != nil {
- log.Printf("failed to ping sqlDB, got error %v", err)
- os.Exit(1)
- }
-
- RunMigrations()
-
- exitCode := m.Run()
- os.Exit(exitCode)
-}
-
-func OpenTestConnection(cfg *gorm.Config) (db *gorm.DB, err error) {
- switch dbName {
- case "intarkdb":
- log.Println("testing intarkdb...")
- db, err = gorm.Open(intarkdb.Open("../."), cfg)
- case "sqlite":
- log.Println("testing sqlite...")
- db, err = gorm.Open(sqlite.Open("../mydatabase.db"), cfg)
- default:
- log.Printf("Unsupported database %s", dbName)
- os.Exit(1)
- }
-
- if err != nil {
- log.Printf("failed to open %s, got error %v", dbName, err)
- return
- }
-
- switch logLevel {
- case int(logger.Silent):
- db.Logger = db.Logger.LogMode(logger.Silent)
- case int(logger.Error):
- db.Logger = db.Logger.LogMode(logger.Error)
- case int(logger.Warn):
- db.Logger = db.Logger.LogMode(logger.Warn)
- case int(logger.Info):
- db.Logger = db.Logger.LogMode(logger.Info)
- }
-
- return
-}
-
-func RunMigrations() {
- var err error
- allModels := []interface{}{&User{}, &Company{}}
- // rand.Seed(time.Now().UnixNano())
- // rand.Shuffle(len(allModels), func(i, j int) { allModels[i], allModels[j] = allModels[j], allModels[i] })
-
- // DB.Migrator().DropTable("user_friends", "user_speaks")
-
- if err = DB.Migrator().DropTable(allModels...); err != nil {
- log.Printf("Failed to drop table, got error %v\n", err)
- os.Exit(1)
- }
-
- if err = DB.AutoMigrate(allModels...); err != nil {
- log.Printf("Failed to auto migrate, but got error %v\n", err)
- os.Exit(1)
- }
-
- for _, m := range allModels {
- if !DB.Migrator().HasTable(m) {
- log.Printf("Failed to create table for %#v\n", m)
- os.Exit(1)
- }
- }
-}
diff --git a/src/interface/go/prometheus-remote-database/.gitignore b/src/interface/go/prometheus-remote-database/.gitignore
deleted file mode 100644
index ba13e7fbe4c68656298c58aaabefa4037b67707b..0000000000000000000000000000000000000000
--- a/src/interface/go/prometheus-remote-database/.gitignore
+++ /dev/null
@@ -1,7 +0,0 @@
-intarkdb/
-intarkdb_cli
-.vscode/
-go.sum
-*.log
-nohup.out
-core*
\ No newline at end of file
diff --git a/src/interface/go/prometheus-remote-database/config.yml b/src/interface/go/prometheus-remote-database/config.yml
deleted file mode 100644
index 1b6d2144771a51937c4ea16999006bd60526ac24..0000000000000000000000000000000000000000
--- a/src/interface/go/prometheus-remote-database/config.yml
+++ /dev/null
@@ -1,145 +0,0 @@
-table:
- interval: 4h
- retention: 2d
- limit_count: 5000
- max_count: 100000
- max_day: 7
- name_with_query:
- - pg_active_slowsql_query_runtime
- - pg_stat_activity_transaction_time
- - pg_thread_lock_time
- name:
- - agent_cpu_seconds_total
- - agent_disk_discarded_sectors_total
- - agent_disk_discards_completed_successfully_total
- - agent_disk_discards_merged_total
- - agent_disk_flush_requests_completed_successfully_total
- - agent_disk_io_pgr_total
- - agent_disk_rd_ios_total
- - agent_disk_rd_merges_total
- - agent_disk_rd_sectors_total
- - agent_disk_rd_ticks_total
- - agent_disk_rq_ticks_total
- - agent_disk_time_spent_discarding_total
- - agent_disk_time_spent_flushing_total
- - agent_disk_tot_ticks_total
- - agent_disk_wr_ios_total
- - agent_disk_wr_merges_total
- - agent_disk_wr_sectors_total
- - agent_disk_wr_ticks_total
- - agent_filesystem_free_size_kbytes
- - agent_filesystem_inode_free_size
- - agent_filesystem_inode_size
- - agent_filesystem_inode_used_size
- - agent_filesystem_size_kbytes
- - agent_filesystem_used_size_kbytes
- - agent_free_Mem_available_bytes
- - agent_free_Mem_cache_bytes
- - agent_free_Mem_free_bytes
- - agent_free_Mem_shared_bytes
- - agent_free_Mem_total_bytes
- - agent_free_Mem_used_bytes
- - agent_free_Swap_free_bytes
- - agent_free_Swap_total_bytes
- - agent_free_Swap_used_bytes
- - agent_host_conn_status
- - agent_load1
- - agent_load15
- - agent_load5
- - agent_memory_MemAvailable_bytes
- - agent_memory_MemTotal_bytes
- - agent_netstat_Tcp_CurrEstab
- - agent_netstat_Tcp_InSegs
- - agent_netstat_Tcp_OutSegs
- - agent_network_receive_bytes_total
- - agent_network_receive_compressed_total
- - agent_network_receive_dropped_total
- - agent_network_receive_errors_total
- - agent_network_receive_fifo_total
- - agent_network_receive_frame_total
- - agent_network_receive_multicast_total
- - agent_network_receive_packets_total
- - agent_network_socket
- - agent_network_transmit_bytes_total
- - agent_network_transmit_carrier_total
- - agent_network_transmit_colls_total
- - agent_network_transmit_compressed_total
- - agent_network_transmit_dropped_total
- - agent_network_transmit_errors_total
- - agent_network_transmit_fifo_total
- - agent_network_transmit_packets_total
- - agent_sockstat_TCP_alloc
- - agent_vmstat_b
- - agent_vmstat_bi
- - agent_vmstat_bo
- - agent_vmstat_buff
- - agent_vmstat_cache
- - agent_vmstat_cs
- - agent_vmstat_free
- - agent_vmstat_in
- - agent_vmstat_r
- - agent_vmstat_si
- - agent_vmstat_so
- - agent_vmstat_st
- - agent_vmstat_swpd
- - agent_vmstat_sy
- - agent_vmstat_us
- - agent_vmstat_wa
- - db_filesystem_data_used_size_kbytes
- - db_filesystem_free_size_kbytes
- - db_filesystem_size_kbytes
- - db_filesystem_used_size_kbytes
- - db_filesystem_xlog_used_size_kbytes
- - db_sql_delete_count
- - db_sql_insert_count
- - db_sql_select_count
- - db_sql_update_count
- - gauss_statement_responsetime_percentile_p80
- - gauss_statement_responsetime_percentile_p95
- - gauss_thread_wait_status_count
- - gauss_workload_sql_count_select_count
- - local_threadpool_status_pool_utilization_rate
- - pg_connections_max_conn
- - pg_connections_used_conn
- - pg_db_status
- - pg_lock_count
- - pg_stat_activity_count
- - pg_stat_activity_slow_count
- - pg_stat_bgwriter_buffers_backend_total
- - pg_stat_bgwriter_buffers_checkpoint_total
- - pg_stat_bgwriter_buffers_clean_total
- - pg_stat_database_blks_read_total
- - pg_stat_database_deadlocks_total
- - pg_stat_database_tup_deleted_total
- - pg_stat_database_tup_fetched_total
- - pg_stat_database_tup_inserted_total
- - pg_stat_database_tup_returned_total
- - pg_stat_database_tup_updated_total
- - pg_stat_database_xact_commit_total
- - pg_stat_database_xact_rollback_total
- - pg_state_activity_group_count
- - pg_tablespace_size
- - pg_tablespace_spcmaxsize
- - pg_wal_write_total_count
- - scrape_duration_seconds
- - scrape_samples_post_metric_relabeling
- - scrape_samples_scraped
- - scrape_series_added
- - top_db_cpu
- - top_db_mem
- - up
-log_file:
- path: ./
- name: run.log
- is_file: true
- max_file_size: 1000
- level: debug
-pool:
- write_pool_size: 10
- read_pool_size: 10
- write_timeout: 30
- read_timeout: 30
-http_server:
- port: 9201
- read_url: /read
- write_url: /write
diff --git a/src/interface/go/prometheus-remote-database/config/config.go b/src/interface/go/prometheus-remote-database/config/config.go
deleted file mode 100644
index 7b821d2e22502d7b753b3fc44e758ece3eccbe97..0000000000000000000000000000000000000000
--- a/src/interface/go/prometheus-remote-database/config/config.go
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
-* Copyright (c) GBA-NCTI-ISDC. 2022-2024.
-*
-* openGauss embedded is licensed under Mulan PSL v2.
-* You can use this software according to the terms and conditions of the Mulan PSL v2.
-* You may obtain a copy of Mulan PSL v2 at:
-*
-* http://license.coscl.org.cn/MulanPSL2
-*
-* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
-* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
-* MERCHANTABILITY OR FITFOR A PARTICULAR PURPOSE.
-* See the Mulan PSL v2 for more details.
-* -------------------------------------------------------------------------
-*
-* config.go
-*
-* IDENTIFICATION
-* openGauss-embedded/src/interface/go/prometheus-remote-database/config/config.go
-*
-* -------------------------------------------------------------------------
- */
-
-package config
-
-import (
- "fmt"
- "os"
-
- "gopkg.in/yaml.v2"
-)
-
-type Config struct {
- Table `yaml:"table"`
- LogFile `yaml:"log_file"`
- Pool `yaml:"pool"`
- HttpServer `yaml:"http_server"`
-}
-
-type Table struct {
- Interval string `yaml:"interval"`
- Retention string `yaml:"retention"`
- LimitCount uint32 `yaml:"limit_count"`
- MaxCount uint32 `yaml:"max_count"`
- MaxDay uint32 `yaml:"max_day"`
- Names []string `yaml:"name"`
- NameWithQuery []string `yaml:"name_with_query"`
-}
-
-type LogFile struct {
- Path string `yaml:"path"`
- Name string `yaml:"name"`
- IsFile bool `yaml:"is_file"`
- MaxFileSize uint64 `yaml:"max_file_size"`
- Level string `yaml:"level"`
-}
-
-type Pool struct {
- WritePoolSize uint `yaml:"write_pool_size"`
- ReadPoolSize uint `yaml:"read_pool_size"`
- WriteTimeout uint `yaml:"write_timeout"`
- ReadTimeout uint `yaml:"read_timeout"`
-}
-
-type HttpServer struct {
- Port uint `yaml:"port"`
- ReadURL string `yaml:"read_url"`
- WriteURL string `yaml:"write_url"`
-}
-
-func init() {
- _, errOne := os.Stat("config.yml") // 需要存在
- _, errTwo := os.Stat("config_temp.yml") // 需要不存在
- if errOne != nil || !os.IsNotExist(errTwo) {
- fmt.Println("config file err")
- os.Exit(1)
- }
-}
-
-func LoadConfig() (config Config, err error) {
- var content []byte
- content, err = os.ReadFile("config.yml")
- if err != nil {
- fmt.Println("error : ", err)
- return
- }
-
- err = yaml.Unmarshal(content, &config)
- if err != nil {
- fmt.Println("error : ", err)
- return
- }
-
- return
-}
-
-func (config *Config) UpdateConfig() error {
- // config.Tables = append(config.Tables, config.CreateTables...)
- // config.CreateTables = nil
-
- var output []byte
- output, err := yaml.Marshal(&config)
- if err != nil {
- return fmt.Errorf("yaml.Marshal err : %v", err)
- }
-
- err = os.WriteFile("config_temp.yml", output, 0644)
- if err != nil {
- return fmt.Errorf("WriteFile err : %v", err)
- }
-
- err = os.Remove("config.yml")
- if err != nil {
- return fmt.Errorf("Remove err : %v", err)
- }
-
- err = os.Rename("config_temp.yml", "config.yml")
- if err != nil {
- return fmt.Errorf("Rename err : %v", err)
- }
-
- return nil
-}
diff --git a/src/interface/go/prometheus-remote-database/go.mod b/src/interface/go/prometheus-remote-database/go.mod
deleted file mode 100644
index abebc25b88fec7548e31a85d5be6a80c730e00b1..0000000000000000000000000000000000000000
--- a/src/interface/go/prometheus-remote-database/go.mod
+++ /dev/null
@@ -1,71 +0,0 @@
-module prometheus-remote-database
-
-go 1.20
-
-require (
- github.com/gogo/protobuf v1.3.2
- github.com/golang/snappy v0.0.4
- github.com/prometheus/client_golang v1.19.1
- github.com/prometheus/common v0.55.0
- github.com/prometheus/prometheus v0.50.0
- gopkg.in/yaml.v2 v2.4.0
-)
-
-require (
- github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect
- github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect
- github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
- github.com/aws/aws-sdk-go v1.50.0 // indirect
- github.com/beorn7/perks v1.0.1 // indirect
- github.com/cespare/xxhash/v2 v2.2.0 // indirect
- github.com/dennwc/varint v1.0.0 // indirect
- github.com/felixge/httpsnoop v1.0.4 // indirect
- github.com/go-kit/log v0.2.1 // indirect
- github.com/go-logfmt/logfmt v0.6.0 // indirect
- github.com/go-logr/logr v1.4.1 // indirect
- github.com/go-logr/stdr v1.2.2 // indirect
- github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
- github.com/golang/protobuf v1.5.3 // indirect
- github.com/google/uuid v1.5.0 // indirect
- github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect
- github.com/hashicorp/go-version v1.6.0 // indirect
- github.com/jmespath/go-jmespath v0.4.0 // indirect
- github.com/jpillora/backoff v1.0.0 // indirect
- github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.17.4 // indirect
- github.com/kylelemons/godebug v1.1.0 // indirect
- github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.2 // indirect
- github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
- github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
- github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
- github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/common/sigv4 v0.1.0 // indirect
- github.com/prometheus/procfs v0.15.1 // indirect
- go.opentelemetry.io/collector/featuregate v1.0.1 // indirect
- go.opentelemetry.io/collector/pdata v1.0.1 // indirect
- go.opentelemetry.io/collector/semconv v0.93.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect
- go.opentelemetry.io/otel v1.22.0 // indirect
- go.opentelemetry.io/otel/metric v1.22.0 // indirect
- go.opentelemetry.io/otel/trace v1.22.0 // indirect
- go.uber.org/atomic v1.11.0 // indirect
- go.uber.org/multierr v1.11.0 // indirect
- golang.org/x/crypto v0.24.0 // indirect
- golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
- golang.org/x/net v0.26.0 // indirect
- golang.org/x/oauth2 v0.21.0 // indirect
- golang.org/x/sync v0.8.0
- golang.org/x/sys v0.21.0 // indirect
- golang.org/x/text v0.16.0 // indirect
- golang.org/x/time v0.5.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect
- google.golang.org/grpc v1.61.0 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
- k8s.io/apimachinery v0.28.6 // indirect
- k8s.io/client-go v0.28.6 // indirect
- k8s.io/klog/v2 v2.120.1 // indirect
- k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
-)
diff --git a/src/interface/go/prometheus-remote-database/intarkdb_interface/include/intarkdb.h b/src/interface/go/prometheus-remote-database/intarkdb_interface/include/intarkdb.h
deleted file mode 100644
index 7c2a91ef3a37e697880e1c2494ae2289e222f08c..0000000000000000000000000000000000000000
--- a/src/interface/go/prometheus-remote-database/intarkdb_interface/include/intarkdb.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Copyright (c) GBA-NCTI-ISDC. 2022-2024.
- *
- * openGauss embedded is licensed under Mulan PSL v2.
- * You can use this software according to the terms and conditions of the Mulan PSL v2.
- * You may obtain a copy of Mulan PSL v2 at:
- *
- * http://license.coscl.org.cn/MulanPSL2
- *
- * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
- * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITFOR A PARTICULAR PURPOSE.
- * See the Mulan PSL v2 for more details.
- * -------------------------------------------------------------------------
- *
- * intarkdb.h
- *
- * IDENTIFICATION
- * openGauss-embedded/src/interface/go/prometheus-remote-database/intarkdb_interface/include/intarkdb.h
- *
- * -------------------------------------------------------------------------
- */
-
-#pragma once
-#include
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifdef WIN32
-#define EXP_SQL_API __declspec(dllexport)
-#define EXPORT_API __declspec(dllexport)
-#else
-#define EXP_SQL_API __attribute__((visibility("default")))
-#define EXPORT_API __attribute__((visibility("default")))
-#endif
-
-// sql
-typedef __int64_t int64_t;
-
-typedef struct st_api_text {
- char *str;
- int64_t len;
- int64_t data_type;
-} api_text_t;
-
-typedef struct st_result_column {
- char *data;
- int64_t data_len;
-} result_column;
-
-typedef struct st_result_row {
- int64_t column_count; // 列数
- result_column *row_column_list; // 行包含的列列表
- struct st_result_row *next;
-} result_row;
-
-typedef struct st_intarkdb_res_def {
- int64_t row_count; // 行数
- bool is_select;
- void *res_row; // 行结果集 //这里实际是 RecordBatch*
-
- int64_t column_count; // 列数
- api_text_t *column_names; // 列名
- char *msg; // 执行结果信息
-
- char *value_ptr; // for free column value
- int64_t row_idx; // for next
-} intarkdb_res_def;
-
-typedef struct st_intarkdb_database {
- void *db;
-} *intarkdb_database;
-
-typedef struct st_intarkdb_connection {
- void *conn;
-} *intarkdb_connection;
-
-typedef enum en_status_def {
- SQL_ERROR = -1,
- SQL_SUCCESS = 0,
- SQL_TIMEDOUT = 1,
-} intarkdb_state_t;
-
-typedef struct st_intarkdb_res_def *intarkdb_result;
-
-EXP_SQL_API intarkdb_state_t intarkdb_open(const char *path, intarkdb_database *db);
-
-EXP_SQL_API void intarkdb_close(intarkdb_database *db);
-
-EXP_SQL_API intarkdb_state_t intarkdb_connect(intarkdb_database database, intarkdb_connection *conn);
-
-EXP_SQL_API void intarkdb_disconnect(intarkdb_connection *conn);
-
-EXP_SQL_API intarkdb_state_t intarkdb_query(intarkdb_connection connection, const char *query, intarkdb_result result);
-
-EXP_SQL_API intarkdb_result intarkdb_init_result();
-
-EXP_SQL_API int64_t intarkdb_row_count(intarkdb_result result);
-
-EXP_SQL_API int64_t intarkdb_column_count(intarkdb_result result);
-
-EXP_SQL_API const char *intarkdb_column_name(intarkdb_result result, int64_t col);
-
-EXP_SQL_API char *intarkdb_value_varchar(intarkdb_result result, int64_t row, int64_t col);
-
-EXP_SQL_API void intarkdb_free_row(intarkdb_result result);
-
-EXP_SQL_API void intarkdb_destroy_result(intarkdb_result result);
-
-EXP_SQL_API const char * intarkdb_result_msg(intarkdb_result result);
-
-// kv
-/* This is the reply object returned by redisCommand() */
-typedef __SIZE_TYPE__ size_t;
-
-typedef struct st_intarkdb_connection_kv {
- void *conn;
-} *intarkdb_connection_kv;
-
-/* This is the reply object */
-typedef struct KvReply_t {
- int type; /* return type */
- size_t len; /* Length of string */
- char *str; /* err or value*/
-} KvReply;
-
-typedef enum en_status_kv {
- KV_ERROR = -1,
- KV_SUCCESS = 0,
-} intarkdb_state_kv;
-
-EXP_SQL_API int intarkdb_connect_kv(intarkdb_database database, intarkdb_connection_kv *kvconn);
-
-EXP_SQL_API void intarkdb_disconnect_kv(intarkdb_connection_kv *kvconn);
-
-EXP_SQL_API int intarkdb_open_table_kv(intarkdb_connection_kv kvconn, const char *table_name);
-
-EXP_SQL_API void *intarkdb_set(intarkdb_connection_kv kvconn, const char *key, const char *val);
-
-EXP_SQL_API void *intarkdb_get(intarkdb_connection_kv kvconn, const char *key);
-
-EXP_SQL_API void *intarkdb_del(intarkdb_connection_kv kvconn, const char *key);
-
-EXP_SQL_API intarkdb_state_kv intarkdb_begin(intarkdb_connection_kv kvconn);
-
-EXP_SQL_API intarkdb_state_kv intarkdb_commit(intarkdb_connection_kv kvconn);
-
-EXP_SQL_API intarkdb_state_kv intarkdb_rollback(intarkdb_connection_kv kvconn);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/interface/go/prometheus-remote-database/intarkdb_interface/intarkdb_interface.go b/src/interface/go/prometheus-remote-database/intarkdb_interface/intarkdb_interface.go
deleted file mode 100644
index 464eecb1fb317db306bfec5948d55fd515916abd..0000000000000000000000000000000000000000
--- a/src/interface/go/prometheus-remote-database/intarkdb_interface/intarkdb_interface.go
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
-* Copyright (c) GBA-NCTI-ISDC. 2022-2024.
-*
-* openGauss embedded is licensed under Mulan PSL v2.
-* You can use this software according to the terms and conditions of the Mulan PSL v2.
-* You may obtain a copy of Mulan PSL v2 at:
-*
-* http://license.coscl.org.cn/MulanPSL2
-*
-* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
-* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
-* MERCHANTABILITY OR FITFOR A PARTICULAR PURPOSE.
-* See the Mulan PSL v2 for more details.
-* -------------------------------------------------------------------------
-*
-* intarkdb_interface.go
-*
-* IDENTIFICATION
-* openGauss-embedded/src/interface/go/prometheus-remote-database/intarkdb_interface/intarkdb_interface.go
-*
-* -------------------------------------------------------------------------
- */
-
-package intarkdb_interface
-
-/*
-// 头文件的位置,相对于源文件是当前目录,所以是 .,头文件在多个目录时写多个 #cgo CFLAGS: ...
-#cgo CFLAGS: -I./include
-// 从哪里加载动态库,位置与文件名,-ladd 加载 libadd.so 文件
-// #cgo LDFLAGS: -L${SRCDIR}/lib -lintarkdb -Wl,-rpath=${SRCDIR}/lib
-#cgo LDFLAGS: -L${SRCDIR}/../../../../../output/release/lib -lintarkdb -Wl,-rpath=${SRCDIR}/../../../../../output/release/lib
-
-#include
-#include
-#include
-#include
-#include "include/intarkdb.h"
-*/
-import "C"
-import (
- "fmt"
- "unsafe"
-)
-
-type DBStatus int
-
-const (
- dbError DBStatus = -1
- dbSuccess DBStatus = 0
- dbTimeout DBStatus = 1
- ignoreObjectExists DBStatus = 2
- fullConn DBStatus = 3
- notExist DBStatus = 10
-)
-
-var dbStatusTag = map[DBStatus]string{
- dbError: "error",
- dbSuccess: "success",
- dbTimeout: "timeout",
- ignoreObjectExists: "ignore object exists",
- fullConn: "full conn",
- notExist: "not exist",
-}
-
-func StatusMessage(dbStatus DBStatus) string {
- return dbStatusTag[dbStatus]
-}
-
-const (
- SQL string = "sql"
- KV string = "kv"
- Quit string = "quit"
- OK string = "ok"
- Failed string = "failed"
-)
-
-type Intarkdb struct {
- db C.intarkdb_database
-}
-
-type IntarkdbInterface interface {
- IntarkdbOpen(path string) (err error)
- IntarkdbClose()
-}
-
-func (g *Intarkdb) IntarkdbOpen(path string) (err error) {
- cPath := C.CString(path)
- defer C.free(unsafe.Pointer(cPath))
-
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_open(cPath, &g.db))
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("DB open %s", dbStatusTag[dbStatus])
- }
- return
-}
-
-func (g *Intarkdb) IntarkdbClose() {
- C.intarkdb_close(&g.db)
- fmt.Println("DB close")
-}
-
-type IntarkdbSQL struct {
- DB Intarkdb
- connection C.intarkdb_connection
- result C.intarkdb_result
-}
-
-type SQLInterface interface {
- IntarkdbConnect() (err error)
- IntarkdbDisconnect()
- IntarkdbInitResult() (err error)
- IntarkdbQuery(query string) (err error)
- IntarkdbRowCount() uint64
- IntarkdbColumnCount() uint64
- IntarkdbColumnName(col uint8) string
- IntarkdbValueVarchar(row, col uint8) string
- IntarkdbDestroyResult()
-}
-
-func (g *IntarkdbSQL) IntarkdbConnect() (err error) {
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_connect(g.DB.db, &g.connection))
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("sql connection %s", dbStatusTag[dbStatus])
- }
- return
-}
-
-func (g *IntarkdbSQL) IntarkdbDisconnect() {
- C.intarkdb_disconnect(&g.connection)
- fmt.Println("sql connection close")
-}
-
-func (g *IntarkdbSQL) IntarkdbInitResult() (err error) {
- g.result = C.intarkdb_init_result()
- if g.result == nil {
- err = fmt.Errorf("intarkdb init result fail")
- }
- return
-}
-
-func (g *IntarkdbSQL) IntarkdbQuery(query string) (err error) {
- cQuery := C.CString(query)
- defer C.free(unsafe.Pointer(cQuery))
-
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_query(g.connection, cQuery, g.result))
- if dbStatus != dbSuccess {
- err = fmt.Errorf("intarkdb query err : %s, sql : %s", C.GoString(C.intarkdb_result_msg(g.result)), query)
- // fmt.Printf("query err sql : %s, %s\n", query, err)
- }
- return
-}
-
-func (g *IntarkdbSQL) IntarkdbRowCount() int64 {
- return int64(C.intarkdb_row_count(g.result))
-}
-
-func (g *IntarkdbSQL) IntarkdbColumnCount() int64 {
- return int64(C.intarkdb_column_count(g.result))
-}
-
-func (g *IntarkdbSQL) IntarkdbColumnName(col int64) string {
- return C.GoString(C.intarkdb_column_name(g.result, C.long(col)))
-}
-
-func (g *IntarkdbSQL) IntarkdbValueVarchar(row, col int64) string {
- return C.GoString(C.intarkdb_value_varchar(g.result, C.long(row), C.long(col)))
-}
-
-func (g *IntarkdbSQL) IntarkdbDestroyResult() {
- C.intarkdb_destroy_result(g.result)
- fmt.Println("result destroy success")
-}
-
-type IntarkdbKV struct {
- DB Intarkdb
- connection C.intarkdb_connection_kv
-}
-
-type KVInterface interface {
- Connect() (err error)
- Disconnect()
- OpenTable(name string) (err error)
- Set(key, value string) (err error)
- Get(key string) (value string, err error)
- Del(key string) (err error)
- TransactionBegin() (err error)
- TransactionCommit() (err error)
- TransactionRollback() (err error)
-}
-
-func (g *IntarkdbKV) Connect() (err error) {
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_connect_kv(g.DB.db, &g.connection))
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("kv connection %s", dbStatusTag[dbStatus])
- }
- return
-}
-
-func (g *IntarkdbKV) Disconnect() {
- C.intarkdb_disconnect_kv(&g.connection)
- fmt.Println("kv connection close")
-}
-
-// 默认系统表,可通过OpenTable创建新的表
-func (g *IntarkdbKV) OpenTable(name string) (err error) {
- cName := C.CString(name)
- defer C.free(unsafe.Pointer(cName))
-
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_open_table_kv(g.connection, cName))
- if dbStatus != dbSuccess {
- err = fmt.Errorf("open kv table %s", dbStatusTag[dbStatus])
- }
- return
-}
-
-func (g *IntarkdbKV) Set(key, value string) (err error) {
- cKey := C.CString(key)
- cValue := C.CString(value)
- defer C.free(unsafe.Pointer(cKey))
- defer C.free(unsafe.Pointer(cValue))
-
- ptr := (*C.KvReply)(C.intarkdb_set(g.connection, cKey, cValue))
-
- dbStatus := dbError
- dbStatus = (DBStatus)(ptr._type)
- if dbStatus == dbSuccess {
- fmt.Println("set key success")
- } else {
- err = fmt.Errorf("set key %s %s", key, dbStatusTag[dbStatus])
- }
-
- return
-}
-
-func (g *IntarkdbKV) Get(key string) (value string, err error) {
- cKey := C.CString(key)
- defer C.free(unsafe.Pointer(cKey))
-
- ptr := (*C.KvReply)(C.intarkdb_get(g.connection, cKey))
-
- value = C.GoString(ptr.str)
- dbStatus := dbError
- dbStatus = (DBStatus)(ptr._type)
- if dbStatus == dbSuccess && value != "" {
- fmt.Println("get key success")
- } else {
- if value == "" {
- dbStatus = notExist
- }
- err = fmt.Errorf("get key %s %s", key, dbStatusTag[dbStatus])
- }
-
- return
-}
-
-func (g *IntarkdbKV) Del(key string) (err error) {
- cKey := C.CString(key)
- defer C.free(unsafe.Pointer(cKey))
-
- ptr := (*C.KvReply)(C.intarkdb_del(g.connection, cKey))
-
- dbStatus := dbError
- dbStatus = (DBStatus)(ptr._type)
- if dbStatus == dbSuccess {
- fmt.Println("del key success")
- } else {
- err = fmt.Errorf("del key %s %s", key, dbStatusTag[dbStatus])
- }
-
- return
-}
-
-func (g *IntarkdbKV) TransactionBegin() (err error) {
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_begin(g.connection))
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("kv TransactionBegin %s", dbStatusTag[dbStatus])
- }
-
- return
-}
-
-func (g *IntarkdbKV) TransactionCommit() (err error) {
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_commit(g.connection))
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("kv TransactionCommit %s", dbStatusTag[dbStatus])
- }
-
- return
-}
-
-func (g *IntarkdbKV) TransactionRollback() (err error) {
- dbStatus := dbError
- dbStatus = (DBStatus)(C.intarkdb_rollback(g.connection))
-
- if dbStatus != dbSuccess {
- err = fmt.Errorf("kv TransactionRollback %s", dbStatusTag[dbStatus])
- }
-
- return
-}
diff --git a/src/interface/go/prometheus-remote-database/log/log.go b/src/interface/go/prometheus-remote-database/log/log.go
deleted file mode 100644
index 1f2d15156963eafe3561c29b193769ac892152ca..0000000000000000000000000000000000000000
--- a/src/interface/go/prometheus-remote-database/log/log.go
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
-* Copyright (c) GBA-NCTI-ISDC. 2022-2024.
-*
-* openGauss embedded is licensed under Mulan PSL v2.
-* You can use this software according to the terms and conditions of the Mulan PSL v2.
-* You may obtain a copy of Mulan PSL v2 at:
-*
-* http://license.coscl.org.cn/MulanPSL2
-*
-* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
-* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
-* MERCHANTABILITY OR FITFOR A PARTICULAR PURPOSE.
-* See the Mulan PSL v2 for more details.
-* -------------------------------------------------------------------------
-*
-* log.go
-*
-* IDENTIFICATION
-* openGauss-embedded/src/interface/go/prometheus-remote-database/log/log.go
-*
-* -------------------------------------------------------------------------
- */
-
-package log
-
-import (
- "errors"
- "fmt"
- "os"
- "path"
- "prometheus-remote-database/config"
- "strings"
-)
-
-type LogLevel uint16
-
-var Log *FileLogger
-
-const (
- UNKNOWN LogLevel = iota
- DEBUG
- INFO
- WARNING
- ERROR
-)
-
-var logLevelToString = map[LogLevel]string{
- UNKNOWN: "unknown",
- DEBUG: "debug",
- INFO: "info",
- WARNING: "warning",
- ERROR: "error",
-}
-
-var stringToLogLevel = map[string]LogLevel{
- // "unknown": UNKNOWN,
- "debug": DEBUG,
- "info": INFO,
- "warning": WARNING,
- "error": ERROR,
-}
-
-func parseLogLevel(s string) (LogLevel, error) {
- s = strings.ToLower(s)
- level, ok := stringToLogLevel[s]
- if !ok {
- return UNKNOWN, errors.New("Invalid log level")
- }
- return level, nil
-}
-
-func getLogStr(level LogLevel) string {
- return logLevelToString[level]
-}
-
-type FileLogger struct {
- Level LogLevel
- filePath string
- fileName string
- fileObj *os.File
- maxFileSize uint64
-}
-
-func NewFlieLogger(l config.LogFile) {
- level, err := parseLogLevel(l.Level)
- if err != nil {
- panic(err)
- }
- Log = &FileLogger{
- Level: level,
- filePath: l.Path,
- fileName: l.Name,
- maxFileSize: l.MaxFileSize,
- }
- err = Log.initFile(l.IsFile)
- if err != nil {
- panic(err)
- }
-}
-
-func (f *FileLogger) initFile(isFile bool) error {
- if isFile {
- join := path.Join(f.filePath, f.fileName)
- fileObj, err := os.OpenFile(join, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
- if err != nil {
- fmt.Printf("open log fail ,err: %v\n", err)
- return err
- }
-
- f.fileObj = fileObj
- } else {
- f.fileObj = os.Stdout
- }
-
- return nil
-}
-
-func (f *FileLogger) enable(level LogLevel) bool {
- return level >= f.Level
-}
-
-func (f *FileLogger) log(leve LogLevel, msg string) {
- fmt.Fprintf(f.fileObj, "[%s] %s\n", getLogStr(leve), msg)
-}
-
-func Debug(msg string, a ...interface{}) {
- msg = fmt.Sprintf(msg, a...)
- if Log.enable(DEBUG) {
- Log.log(DEBUG, msg)
- }
-}
-
-func Info(msg string, a ...interface{}) {
- msg = fmt.Sprintf(msg, a...)
- if Log.enable(INFO) {
- Log.log(INFO, msg)
- }
-}
-
-func Warning(msg string, a ...interface{}) {
- msg = fmt.Sprintf(msg, a...)
- if Log.enable(WARNING) {
- Log.log(WARNING, msg)
- }
-}
-
-func Error(msg string, a ...interface{}) {
- msg = fmt.Sprintf(msg, a...)
- if Log.enable(ERROR) {
- Log.log(ERROR, msg)
- }
-}
-
-func (f *FileLogger) Close() {
- Info("log close!")
- f.fileObj.Close()
-}
diff --git a/src/interface/go/prometheus-remote-database/main.go b/src/interface/go/prometheus-remote-database/main.go
deleted file mode 100644
index 43b0800e60d556898e10f85fe32a1e70d7f031ab..0000000000000000000000000000000000000000
--- a/src/interface/go/prometheus-remote-database/main.go
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
-* Copyright (c) GBA-NCTI-ISDC. 2022-2024.
-*
-* openGauss embedded is licensed under Mulan PSL v2.
-* You can use this software according to the terms and conditions of the Mulan PSL v2.
-* You may obtain a copy of Mulan PSL v2 at:
-*
-* http://license.coscl.org.cn/MulanPSL2
-*
-* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
-* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
-* MERCHANTABILITY OR FITFOR A PARTICULAR PURPOSE.
-* See the Mulan PSL v2 for more details.
-* -------------------------------------------------------------------------
-*
-* main.go
-*
-* IDENTIFICATION
-* openGauss-embedded/src/interface/go/prometheus-remote-database/main.go
-*
-* -------------------------------------------------------------------------
- */
-
-package main
-
-import (
- "context"
- "fmt"
- "io"
- "math"
- "net/http"
- _ "net/http/pprof"
- "os"
- "os/signal"
- "strconv"
- "syscall"
- "time"
-
- "github.com/gogo/protobuf/proto"
- "github.com/golang/snappy"
- "github.com/prometheus/client_golang/prometheus/promhttp"
- "github.com/prometheus/common/model"
-
- "prometheus-remote-database/config"
- "prometheus-remote-database/log"
- "prometheus-remote-database/pool"
- "prometheus-remote-database/write_read"
- _ "prometheus-remote-database/write_read"
-
- "github.com/prometheus/prometheus/prompb"
- "github.com/prometheus/prometheus/storage/remote"
-)
-
-func main() {
- // 加载配置文件
- config, err := config.LoadConfig()
- if err != nil {
- os.Exit(1)
- }
-
- // 初始化log
- log.NewFlieLogger(config.LogFile)
- log.Info("log init success!")
- defer log.Log.Close()
-
- log.Debug("config : %v", config)
-
- // 初始化资源池
- pool.InitValue(config.Pool)
- err = pool.NewResourcePool()
- if err != nil {
- log.Error(err.Error())
- os.Exit(1)
- }
- log.Info("NewResourcePool success!")
- defer pool.CleanUp()
-
- // 初始化write_read
- write_read.InitValue(config.Table)
-
- http.Handle("/metrics", promhttp.Handler())
-
- server := &http.Server{
- Addr: ":" + strconv.FormatUint(uint64(config.Port), 10),
- Handler: http.DefaultServeMux,
- }
-
- go func() {
- if err := serve(config.ReadURL, config.WriteURL, server); err != nil {
- log.Warning("Failed to listen, %s", err)
- }
- }()
-
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- <-c
- log.Warning("Received signal, shutting down...")
-
- // 创建一个带有超时的Context,用于优雅关闭服务
- shutdownCtx, cancelShutdown := context.WithTimeout(context.Background(), 30*time.Second)
- defer cancelShutdown()
-
- // 关闭HTTP服务
- if err := server.Shutdown(shutdownCtx); err != nil {
- log.Error("Error shutting down server: %s\n", err)
- }
-}
-
-func serve(readURL, writeURL string, server *http.Server) (err error) {
- http.HandleFunc(writeURL, func(w http.ResponseWriter, r *http.Request) {
- req, err := remote.DecodeWriteRequest(r.Body)
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
-
- samples := protoToSamples(req)
-
- err = write_read.Write(samples)
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
- })
-
- http.HandleFunc(readURL, func(w http.ResponseWriter, r *http.Request) {
- compressed, err := io.ReadAll(r.Body)
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
-
- reqBuf, err := snappy.Decode(nil, compressed)
- if err != nil {
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
- }
-
- var req prompb.ReadRequest
- if err := proto.Unmarshal(reqBuf, &req); err != nil {
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
- }
-
- var resp *prompb.ReadResponse
- resp, err = write_read.Read(&req)
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
-
- if resp != nil {
- log.Debug("prompb.ReadRequest : %v", req)
- log.Debug("prompb.ReadResponse : %v", len(resp.Results))
- }
-
- data, err := proto.Marshal(resp)
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
-
- w.Header().Set("Content-Type", "application/x-protobuf")
- w.Header().Set("Content-Encoding", "snappy")
-
- compressed = snappy.Encode(nil, data)
- if _, err := w.Write(compressed); err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- log.Error("Error writing response, err : %s", err)
- }
- })
-
- if err = server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
- log.Error("Error starting server: %s", err)
- }
-
- return
-}
-
-func protoToSamples(req *prompb.WriteRequest) model.Samples {
- var samples model.Samples
- for _, ts := range req.Timeseries {
- metric := make(model.Metric, len(ts.Labels))
- for _, l := range ts.Labels {
- metric[model.LabelName(l.Name)] = model.LabelValue(l.Value)
- }
-
- for _, s := range ts.Samples {
- samples = append(samples, &model.Sample{
- Metric: metric,
- Value: model.SampleValue(s.Value),
- Timestamp: model.Time(s.Timestamp),
- })
- }
- }
- return samples
-}
-
-func forWrite() {
- var samples model.Samples
- myMap := make(map[model.LabelName]model.LabelValue)
- myMap[model.MetricNameLabel] = "agent_cpu_seconds_total"
- myMap["cpu"] = "80"
- myMap["host"] = "1813466887472824322"
- myMap["instance"] = "142a8d06-02e1-4fd0-b1fa-ecdf6b45171f"
- myMap["job"] = "142a8d06-02e1-4fd0-b1fa-ecdf6b45171f_5s"
- myMap["mode"] = "nice"
- myMap["type"] = "exporter"
- sample := &model.Sample{
- Metric: model.Metric(model.LabelSet(myMap)),
- Value: model.SampleValue(math.Inf(1)),
- Timestamp: model.Now(),
- }
- samples = append(samples, sample)
- for i := 1; i <= 10; i++ {
- time.Sleep(1 * 100 * time.Millisecond)
- samples[0].Timestamp = model.Now()
- go write_read.Write(samples)
- }
-
- time.Sleep(90 * 1000 * time.Millisecond)
-}
-
-func forRead() {
- var req prompb.ReadRequest
- var query prompb.Query
- var labels prompb.LabelMatcher
-
- labels.Type = prompb.LabelMatcher_EQ
- labels.Name = model.MetricNameLabel
- labels.Value = "agent_cpu_seconds_total"
-
- query.Matchers = append(query.Matchers, &labels)
- query.EndTimestampMs = 1728718643000
- query.StartTimestampMs = 1728700643000
- req.Queries = append(req.Queries, &query)
- resp, _ := write_read.Read(&req)
- fmt.Println(resp)
-}
diff --git a/src/interface/go/prometheus-remote-database/pool/pool.go b/src/interface/go/prometheus-remote-database/pool/pool.go
deleted file mode 100644
index c723cd7e9df08f7b1c2fc7eb644f3746410255aa..0000000000000000000000000000000000000000
--- a/src/interface/go/prometheus-remote-database/pool/pool.go
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
-* Copyright (c) GBA-NCTI-ISDC. 2022-2024.
-*
-* openGauss embedded is licensed under Mulan PSL v2.
-* You can use this software according to the terms and conditions of the Mulan PSL v2.
-* You may obtain a copy of Mulan PSL v2 at:
-*
-* http://license.coscl.org.cn/MulanPSL2
-*
-* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
-* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
-* MERCHANTABILITY OR FITFOR A PARTICULAR PURPOSE.
-* See the Mulan PSL v2 for more details.
-* -------------------------------------------------------------------------
-*
-* pool.go
-*
-* IDENTIFICATION
-* openGauss-embedded/src/interface/go/prometheus-remote-database/pool/pool.go
-*
-* -------------------------------------------------------------------------
- */
-
-package pool
-
-import (
- "fmt"
- "os"
- "prometheus-remote-database/config"
- "prometheus-remote-database/intarkdb_interface"
- "sync"
- "time"
-
- "math/rand"
-)
-
-var writePoolSize uint = 10
-var readPoolSize uint = 2
-var writeTimeout time.Duration = 60 * time.Second
-var readTimeout time.Duration = 30 * time.Second
-var writePool *ResourcePool
-var readPool *ResourcePool
-var intarkdb intarkdb_interface.Intarkdb
-
-type OperationType int
-
-const (
- WriteOperation OperationType = 1
- ReadOperation OperationType = 2
-)
-
-type Resource struct {
- ID uint
- IntarkdbSQL intarkdb_interface.IntarkdbSQL
-}
-
-type ResourcePool struct {
- mu sync.Mutex
- resources []*Resource
-}
-
-func init() {
- err := intarkdb.IntarkdbOpen(".")
- if err != nil {
- fmt.Println(err)
- os.Exit(1)
- }
-}
-
-func InitValue(p config.Pool) {
- writePoolSize = p.WritePoolSize
- readPoolSize = p.ReadPoolSize
- writeTimeout = time.Duration(p.WriteTimeout) * time.Second
- readTimeout = time.Duration(p.ReadTimeout) * time.Second
-}
-
-func NewResourcePool() (err error) {
- writePool = &ResourcePool{
- resources: make([]*Resource, writePoolSize),
- }
-
- err = initPool(WriteOperation)
- if err != nil {
- return fmt.Errorf("write pool err : %v", err)
- }
-
- readPool = &ResourcePool{
- resources: make([]*Resource, readPoolSize),
- }
-
- err = initPool(ReadOperation)
- if err != nil {
- return fmt.Errorf("read pool err : %v", err)
- }
-
- return
-}
-
-func initPool(operation OperationType) (err error) {
- var pool *ResourcePool
- var size uint
- switch operation {
- case WriteOperation:
- pool = writePool
- size = writePoolSize
- case ReadOperation:
- pool = readPool
- size = readPoolSize
- }
-
- for i := uint(0); i < size; i++ {
- pool.resources[i] = &Resource{ID: i}
- var intarkdbSQL intarkdb_interface.IntarkdbSQL
- intarkdbSQL.DB = intarkdb
-
- err = intarkdbSQL.IntarkdbConnect()
- if err != nil {
- return
- }
-
- err = intarkdbSQL.IntarkdbInitResult()
- if err != nil {
- return
- }
- pool.resources[i].IntarkdbSQL = intarkdbSQL
- }
-
- return
-}
-
-func Acquire(operation OperationType) (*Resource, error) {
- var pool *ResourcePool
- var timeout time.Duration
- switch operation {
- case WriteOperation:
- pool = writePool
- timeout = writeTimeout
- case ReadOperation:
- pool = readPool
- timeout = readTimeout
- }
-
- startTime := time.Now()
- rand.NewSource(time.Now().UnixNano())
- for {
- pool.mu.Lock()
- if len(pool.resources) > 0 {
- res := pool.resources[0]
- pool.resources = pool.resources[1:]
- pool.mu.Unlock()
- return res, nil
- }
- pool.mu.Unlock()
-
- if time.Since(startTime) >= timeout {
- return nil, fmt.Errorf("%s operation %d timeout: no resources available",
- time.Now().Format("2006-01-02 15:04:05"), operation)
- }
-
- randomInt := rand.Intn(500)
- time.Sleep(time.Duration(randomInt) * time.Millisecond) // 等待一段时间后重试
- }
-}
-
-func Release(operation OperationType, resource *Resource) {
- var pool *ResourcePool
- switch operation {
- case WriteOperation:
- pool = writePool
- case ReadOperation:
- pool = readPool
- }
-
- pool.mu.Lock()
- defer pool.mu.Unlock()
-
- pool.resources = append(pool.resources, resource)
-}
-
-func CleanUp() {
- cleanPool(ReadOperation)
- cleanPool(WriteOperation)
-
- intarkdb.IntarkdbClose()
-}
-
-func cleanPool(operation OperationType) {
- var pool *ResourcePool
- switch operation {
- case WriteOperation:
- pool = writePool
- case ReadOperation:
- pool = readPool
- }
-
- pool.mu.Lock()
- defer pool.mu.Unlock()
-
- for _, res := range pool.resources {
- res.IntarkdbSQL.IntarkdbDestroyResult()
- res.IntarkdbSQL.IntarkdbDisconnect()
- }
-
- pool.resources = nil
-}
diff --git a/src/interface/go/prometheus-remote-database/write_read/read.go b/src/interface/go/prometheus-remote-database/write_read/read.go
deleted file mode 100644
index c5e400c3591c41f72014f9fc31b16e03d64b9a4b..0000000000000000000000000000000000000000
--- a/src/interface/go/prometheus-remote-database/write_read/read.go
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
-* Copyright (c) GBA-NCTI-ISDC. 2022-2024.
-*
-* openGauss embedded is licensed under Mulan PSL v2.
-* You can use this software according to the terms and conditions of the Mulan PSL v2.
-* You may obtain a copy of Mulan PSL v2 at:
-*
-* http://license.coscl.org.cn/MulanPSL2
-*
-* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
-* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
-* MERCHANTABILITY OR FITFOR A PARTICULAR PURPOSE.
-* See the Mulan PSL v2 for more details.
-* -------------------------------------------------------------------------
-*
-* read.go
-*
-* IDENTIFICATION
-* openGauss-embedded/src/interface/go/prometheus-remote-database/write_read/read.go
-*
-* -------------------------------------------------------------------------
- */
-
-package write_read
-
-import (
- "errors"
- "fmt"
- "prometheus-remote-database/config"
- "prometheus-remote-database/intarkdb_interface"
- "prometheus-remote-database/log"
- "prometheus-remote-database/pool"
- "sort"
- "strconv"
- "strings"
- "time"
-
- "github.com/prometheus/common/model"
- "github.com/prometheus/prometheus/model/timestamp"
- "github.com/prometheus/prometheus/prompb"
-)
-
-var separator string = "\xff"
-var limitValue uint32 = 5000
-var maxCount uint32 = 50000
-var maxDayTimestamp uint64 = 7 * 24 * 60 * 60 * 1000
-var rowCountEmptyError error = errors.New("The query result is empty")
-
-func initQueryParam(config config.Table) {
- limitValue = config.LimitCount
- maxCount = config.MaxCount
- maxDayTimestamp = uint64(config.MaxDay) * 24 * 60 * 60 * 1000
-}
-
-func Read(req *prompb.ReadRequest) (*prompb.ReadResponse, error) {
- resource, err := pool.Acquire(pool.ReadOperation)
- if err != nil {
- return nil, err
- }
- defer pool.Release(pool.ReadOperation, resource)
- defer resource.IntarkdbSQL.IntarkdbInitResult()
- defer resource.IntarkdbSQL.IntarkdbDestroyResult()
-
- labelsToSeries := map[string]*prompb.TimeSeries{}
- for _, q := range req.Queries {
- if uint64(q.EndTimestampMs-q.StartTimestampMs) > maxDayTimestamp {
- log.Warning("The maximum query interval is exceeded, Querie : %v", q)
- return nil, err
- }
- sql, sqlcount, tableName, err := buildSQL(q)
- if err != nil {
- log.Error("%s, Querie : %v", err.Error(), q)
- return nil, err
- }
-
- _, exists := scrapeNameMap[tableName]
- if !exists {
- continue
- }
-
- _, exists = tableMap[tableName]
- if !exists {
- continue
- }
-
- if tableName == "" {
- log.Warning("Metric Name Label is empty, Querie : %v", q)
- return nil, fmt.Errorf("Metric Name Label is empty, Querie : %v", q)
- }
-
- log.Debug("sql : %s", sql)
-
- err = resource.IntarkdbSQL.IntarkdbQuery(sqlcount)
- if err != nil {
- return nil, err
- }
-
- count := getCount(resource.IntarkdbSQL)
- if count == 0 {
- log.Warning("result is empty, Querie : %v", q)
- continue
- } else if count > maxCount {
- log.Warning("The maximum number of results was exceeded, sql : %s", sqlcount)
- continue
- }
- log.Debug("count : %d", count)
-
- startTime := time.Now()
- err = resource.IntarkdbSQL.IntarkdbQuery(sql)
- log.Debug("%s:%d IntarkdbQuery time : ", tableName, count, time.Since(startTime))
- if err != nil {
- log.Error(err.Error())
- return nil, err
- }
- startTime = time.Now()
- if err = mergeResult(labelsToSeries, tableName, resource.IntarkdbSQL); err != nil {
- if err == rowCountEmptyError {
- log.Warning("mergeResult err : %s, sql : %s", err.Error(), sql)
- break
- }
- log.Error("mergeResult err : %s, sql : %s", err.Error(), sql)
- return nil, err
- }
- log.Debug("%d mergeResult time : ", count, time.Since(startTime))
-
- // var offset uint32
- // for offset = 0; offset < count; offset += limitValue {
- // query := fmt.Sprintf("%s offset %d limit %d", sql, offset, limitValue)
- // startTime := time.Now()
- // err = resource.IntarkdbSQL.IntarkdbQuery(query)
- // log.Debug("IntarkdbQuery time : ", time.Since(startTime))
- // if err != nil {
- // log.Error(err.Error())
- // return nil, err
- // }
- // startTime = time.Now()
- // if err = mergeResult(labelsToSeries, tableName, resource.IntarkdbSQL); err != nil {
- // if err == rowCountEmptyError {
- // log.Warning("mergeResult err : %s, sql : %s", err.Error(), query)
- // break
- // }
- // log.Error("mergeResult err : %s, sql : %s", err.Error(), query)
- // return nil, err
- // }
- // log.Debug("mergeResult time : ", time.Since(startTime))
- // }
- }
-
- if len(labelsToSeries) == 0 {
- return nil, nil
- }
-
- resp := prompb.ReadResponse{
- Results: []*prompb.QueryResult{
- {Timeseries: make([]*prompb.TimeSeries, 0, len(labelsToSeries))},
- },
- }
- startTime := time.Now()
- var count int
- for _, ts := range labelsToSeries {
- count += len(ts.Samples)
- for i := 1; i < len(ts.Samples); i++ {
- if ts.Samples[i-1].Timestamp > ts.Samples[i].Timestamp {
- log.Warning("Not in order")
- sort.Sort(ByTimestamp(ts.Samples))
- if ts.Samples[i-1].Timestamp > ts.Samples[i].Timestamp {
- log.Warning("sort not work")
- }
- break
- }
- }
- resp.Results[0].Timeseries = append(resp.Results[0].Timeseries, ts)
- }
- log.Debug("%d labelsToSeries time : ", count, time.Since(startTime))
- return &resp, nil
-}
-
-type ByTimestamp []prompb.Sample
-
-func (a ByTimestamp) Len() int { return len(a) }
-func (a ByTimestamp) Less(i, j int) bool { return a[i].Timestamp < a[j].Timestamp }
-func (a ByTimestamp) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-func buildSQL(q *prompb.Query) (sql, sqlCount, tableName string, err error) {
- matchers := make([]string, 0, len(q.Matchers))
- for _, m := range q.Matchers {
- if m.Name == model.MetricNameLabel {
- switch m.Type {
- case prompb.LabelMatcher_EQ:
- tableName = m.Value
- default:
- err = errors.New("non-equal or regex matchers are not supported on the metric name yet")
- return
- }
- continue
- }
-
- switch m.Type {
- case prompb.LabelMatcher_EQ:
- matchers = append(matchers, fmt.Sprintf("%s = '%s'", m.Name, m.Value))
- case prompb.LabelMatcher_NEQ:
- matchers = append(matchers, fmt.Sprintf("%s != '%s'", m.Name, m.Value))
- default:
- err = fmt.Errorf("unknown match type %v or regex matchers are not supported", m.Type)
- return
- }
- }
-
- if tableName == "" {
- err = errors.New("No a metric name matcher found")
- return
- }
-
- if q.StartTimestampMs == q.EndTimestampMs {
- matchers = append(matchers, fmt.Sprintf("date = '%s'", model.Time(q.StartTimestampMs).Time().Format(IntarkdbTimestampFormats[4])))
- } else {
- matchers = append(matchers, fmt.Sprintf("date >= '%s'", model.Time(q.StartTimestampMs).Time().Format(IntarkdbTimestampFormats[4])))
- matchers = append(matchers, fmt.Sprintf("date <= '%s'", model.Time(q.EndTimestampMs).Time().Format(IntarkdbTimestampFormats[4])))
- }
-
- // sql = fmt.Sprintf("SELECT * FROM \"%s\" WHERE %v order by date asc", tableName, strings.Join(matchers, " AND "))
- sql = fmt.Sprintf("SELECT * FROM \"%s\" WHERE %v", tableName, strings.Join(matchers, " AND "))
- sqlCount = fmt.Sprintf("SELECT count(*) FROM \"%s\" WHERE %v", tableName, strings.Join(matchers, " AND "))
-
- return
-}
-
-func getCount(intarkdbSQL intarkdb_interface.IntarkdbSQL) uint32 {
- numStr := intarkdbSQL.IntarkdbValueVarchar(0, 0)
- num, err := strconv.ParseUint(numStr, 10, 32)
- if err != nil {
- log.Error("getCount err : %v", err)
- return 0
- }
-
- return uint32(num)
-}
-
-// 按所有列值group by
-func mergeResult(labelsToSeries map[string]*prompb.TimeSeries, tableName string, intarkdbSQL intarkdb_interface.IntarkdbSQL) error {
- rowCount := intarkdbSQL.IntarkdbRowCount()
- log.Debug("rowCount = %d", rowCount)
- if rowCount > 0 {
- columnCount := intarkdbSQL.IntarkdbColumnCount()
- if columnCount < 2 {
- return errors.New("The number of resulting columns must be greater than or equal to two")
- }
-
- var columnName []string
- // 最后两列固定为date,value
- for col := int64(0); col < columnCount-2; col++ {
- columnName = append(columnName, intarkdbSQL.IntarkdbColumnName(col))
- }
-
- for row := int64(0); row < rowCount; row++ {
- col := int64(0)
- var columnValue []string
- for ; col < columnCount-2; col++ {
- columnValue = append(columnValue, intarkdbSQL.IntarkdbValueVarchar(row, col))
- }
-
- k := tableName + separator + concatLabels(columnName, columnValue)
- ts, ok := labelsToSeries[k]
- if !ok {
- ts = &prompb.TimeSeries{
- Labels: columnToLabelPairs(tableName, columnName, columnValue),
- }
- labelsToSeries[k] = ts
- }
-
- var sample prompb.Sample
- date := intarkdbSQL.IntarkdbValueVarchar(row, col)
- if timeVal, err := time.ParseInLocation(IntarkdbTimestampFormats[4], date, time.Local); err == nil {
- sample.Timestamp = timestamp.FromTime(timeVal)
- } else {
- continue
- }
-
- value := intarkdbSQL.IntarkdbValueVarchar(row, col+1)
- if valueFloat, err := strconv.ParseFloat(value, 64); err == nil {
- sample.Value = valueFloat
- } else {
- continue
- }
-
- ts.Samples = append(ts.Samples, sample)
- // ts.Samples = mergeSamples(ts.Samples, sample)
- }
- } else {
- return rowCountEmptyError
- }
-
- return nil
-}
-
-func concatLabels(name, value []string) string {
- // 0xff cannot occur in valid UTF-8 sequences, so use it
- // as a separator here.
- if len(name) != len(value) {
- return ""
- }
-
- pairs := make([]string, 0, len(name))
- for k, _ := range name {
- pairs = append(pairs, name[k]+separator+value[k])
- }
- return strings.Join(pairs, separator)
-}
-
-func columnToLabelPairs(name string, columnName, columnValue []string) []prompb.Label {
- if len(columnName) != len(columnValue) {
- return nil
- }
-
- pairs := make([]prompb.Label, 0, len(columnName)+1)
-
- pairs = append(pairs, prompb.Label{
- Name: model.MetricNameLabel,
- Value: name,
- })
-
- for k, _ := range columnName {
- // if v == "" {
- // continue
- // }
- pairs = append(pairs, prompb.Label{
- Name: columnName[k],
- Value: columnValue[k],
- })
- }
-
- return pairs
-}
-
-// 按顺序合入数据
-func mergeSamples(a []prompb.Sample, b prompb.Sample) []prompb.Sample {
- // 使用二分查找找到插入位置
- index := sort.Search(len(a), func(i int) bool { return a[i].Timestamp >= b.Timestamp })
-
- // 在插入位置插入新值
- a = append(a, prompb.Sample{}) // 添加一个元素
- copy(a[index+1:], a[index:]) // 后移元素
- a[index] = b
-
- return a
-}
-
-func getPartitionTable() (tables []string) {
- resource, _ := pool.Acquire(pool.WriteOperation)
-
- defer pool.Release(pool.WriteOperation, resource)
-
- resource.IntarkdbSQL.IntarkdbQuery("select NAME from 'SYS_TABLES' where PARTITIONED = 1;")
- rowCount := resource.IntarkdbSQL.IntarkdbRowCount()
- log.Debug("getPartitionTable rowCount = %d", rowCount)
- if rowCount > 0 {
- for row := int64(0); row < rowCount; row++ {
- tables = append(tables, resource.IntarkdbSQL.IntarkdbValueVarchar(row, 0))
- }
- }
-
- return tables
-}
diff --git a/src/interface/go/prometheus-remote-database/write_read/write.go b/src/interface/go/prometheus-remote-database/write_read/write.go
deleted file mode 100644
index 6e40bfe291f185c2317bf977551c246f844e9aba..0000000000000000000000000000000000000000
--- a/src/interface/go/prometheus-remote-database/write_read/write.go
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
-* Copyright (c) GBA-NCTI-ISDC. 2022-2024.
-*
-* openGauss embedded is licensed under Mulan PSL v2.
-* You can use this software according to the terms and conditions of the Mulan PSL v2.
-* You may obtain a copy of Mulan PSL v2 at:
-*
-* http://license.coscl.org.cn/MulanPSL2
-*
-* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
-* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
-* MERCHANTABILITY OR FITFOR A PARTICULAR PURPOSE.
-* See the Mulan PSL v2 for more details.
-* -------------------------------------------------------------------------
-*
-* write.go
-*
-* IDENTIFICATION
-* openGauss-embedded/src/interface/go/prometheus-remote-database/write_read/write.go
-*
-* -------------------------------------------------------------------------
- */
-
-package write_read
-
-import (
- "fmt"
- "math"
- "prometheus-remote-database/config"
- "prometheus-remote-database/log"
- "prometheus-remote-database/pool"
- "strings"
- "time"
-
- "github.com/prometheus/common/model"
- "golang.org/x/sync/singleflight"
-)
-
-var IntarkdbTimestampFormats = []string{
- "2006-01-02 15:04:05.999999999-07:00",
- "2006-01-02T15:04:05.999999999-07:00",
- "2006-01-02 15:04:05.999999999",
- "2006-01-02T15:04:05.999999999",
- "2006-01-02 15:04:05.999999",
- "2006-01-02T15:04:05",
- "2006-01-02 15:04",
- "2006-01-02T15:04",
- "2006-01-02",
-}
-
-var tableMap = make(map[string]struct{})
-var scrapeNameMap = make(map[string]struct{})
-var scrapeNameWithQueryMap = make(map[string]struct{})
-var interval, retention string
-var g singleflight.Group
-var queryLabel string = "query"
-
-// 初始化指标map
-func InitValue(config config.Table) {
- interval = config.Interval
- retention = config.Retention
-
- tables := getPartitionTable()
-
- for _, v := range tables {
- tableMap[v] = struct{}{}
- }
-
- for _, v := range config.Names {
- scrapeNameMap[v] = struct{}{}
- }
- for _, v := range config.NameWithQuery {
- scrapeNameMap[v] = struct{}{}
- scrapeNameWithQueryMap[v] = struct{}{}
- }
- log.Debug("scrapeNameMap len : ", len(scrapeNameMap))
- log.Debug("scrapeNameWithQueryMap len : ", len(scrapeNameWithQueryMap))
-
- log.Debug("tableMap len : ", len(tableMap))
-
- initQueryParam(config)
-}
-
-func Write(samples model.Samples) error {
- var insertSQL []*string
-
- for _, s := range samples {
- tableName := string(s.Metric[model.MetricNameLabel])
- if tableName == "" {
- log.Warning("Metric Name Label is empty, sample : %v", s)
- continue
- }
-
- _, exists := scrapeNameMap[tableName]
- if !exists {
- continue
- }
-
- _, exists = scrapeNameWithQueryMap[tableName]
- if exists {
- if _, labelExists := s.Metric[model.LabelName(queryLabel)]; labelExists {
- s.Metric[model.LabelName(queryLabel)] = ""
- }
- }
-
- value := float64(s.Value)
- var isNaNOrInf bool
- if math.IsNaN(value) || math.IsInf(value, 0) {
- // log.Warning("Cannot send to intarkdb, skipping sample, sample : %v", s)
- isNaNOrInf = true
- // continue
- }
-
- timestamp := s.Timestamp.Time().Local().Format(IntarkdbTimestampFormats[4])
-
- var columns, values []string
- for l, v := range s.Metric {
- if l != model.MetricNameLabel {
- columns = append(columns, string(l))
- values = append(values, ("'" + string(v) + "'"))
- }
- }
-
- _, exists = tableMap[tableName]
- if !exists {
- log.Debug("tableName : ", tableName)
- log.Debug("tableMap : ", tableMap)
- _, err, _ := g.Do(tableName, func() (i interface{}, e error) {
- return nil, createTable(tableName, columns)
- })
-
- if err != nil {
- log.Error("create table %s err : %s", tableName, err.Error())
- continue
- }
- }
-
- columns = append(columns, "date", "value")
- values = append(values, "'"+timestamp+"'")
- if isNaNOrInf {
- values = append(values, fmt.Sprintf("'%g'", value))
- } else {
- values = append(values, fmt.Sprintf("%g", value))
- }
-
- query := fmt.Sprintf("insert into \"%s\"(%s) values (%s);", tableName, strings.Join(columns, ","), strings.Join(values, ","))
-
- insertSQL = append(insertSQL, &query)
- }
-
- resource, err := pool.Acquire(pool.WriteOperation)
- if err != nil {
- log.Warning(err.Error())
- return err
- }
- defer pool.Release(pool.WriteOperation, resource)
-
- for _, v := range insertSQL {
- err := resource.IntarkdbSQL.IntarkdbQuery(*v)
- if err != nil {
- log.Error(err.Error())
- }
- }
-
- log.Info("%s write %d success", time.Now().Local().Format(IntarkdbTimestampFormats[4]), len(insertSQL))
- return nil
-}
-
-func createTable(tableName string, columns []string) error {
- sql := fmt.Sprintf("CREATE TABLE \"%s\" (", tableName)
- for _, v := range columns {
- sql += (v + " string, ")
- }
- sql += "date timestamp, value float64) PARTITION BY RANGE(date) "
- sql += fmt.Sprintf("timescale interval '%s' retention '%s' autopart;", interval, retention)
-
- log.Info("createTable sql : ", sql)
- resource, err := pool.Acquire(pool.WriteOperation)
- if err != nil {
- return err
- }
- defer pool.Release(pool.WriteOperation, resource)
-
- err = resource.IntarkdbSQL.IntarkdbQuery(sql)
- if err == nil {
- tableMap[tableName] = struct{}{}
- }
-
- return err
-}
diff --git a/src/network/CMakeLists.txt b/src/network/CMakeLists.txt
index b4fbc536de3799605a7bfdc72ab8e2eaa3668a70..523add18626dc16f5c0bdd7b5cd7d8350cf558ed 100644
--- a/src/network/CMakeLists.txt
+++ b/src/network/CMakeLists.txt
@@ -73,6 +73,7 @@ include_directories(${ZEKERNEL_KERNEL_PATH}/xact)
include_directories(${ZEKERNEL_KERNEL_PATH}/tablespace)
include_directories(${ZEKERNEL_KERNEL_PATH}/catalog)
include_directories(${SERVER_COMMON_PATH})
+include_directories(${INTARKDB_HOME}/interface/c)
link_directories(${LIBRARY_OUTPUT_PATH})
diff --git a/src/network/login/login.c b/src/network/login/login.c
index a2be79fa41a2c5afd511a7a4b2c4e8ea0198ba37..082771e8f12a8b77bf3deb6cae2f04250ed41b29 100644
--- a/src/network/login/login.c
+++ b/src/network/login/login.c
@@ -22,7 +22,7 @@
*/
#include "login.h"
-#include "interface/c/intarkdb_sql.h"
+#include "intarkdb_sql.h"
#include "storage/gstor/zekernel/common/cm_log.h"
#include "common/srv_def.h"
// #include "dependency/GmSSL/include/gmssl/sha2.h"
diff --git a/src/network/login/login.h b/src/network/login/login.h
index bbdddfc27535e135bb0a8ffa22c101accd297629..b6c8fdf6d69e1cf84387fcffc65b7de0c12a7ae7 100644
--- a/src/network/login/login.h
+++ b/src/network/login/login.h
@@ -25,7 +25,7 @@
#define __OM_LOGIN_H__
#include "storage/gstor/zekernel/common/cm_defs.h"
-#include "interface/c/intarkdb_sql.h"
+#include "intarkdb_sql.h"
// #include "dependency/GmSSL/include/gmssl/sm3.h"
// #include "dependency/GmSSL/include/gmssl/sm4.h"
// #include "dependency/GmSSL/include/gmssl/sha2.h"
diff --git a/src/network/server/srv_instance.h b/src/network/server/srv_instance.h
index bbeaac7765313c60da7fa8e47c5b59f910d1045a..b9c2344c6a4f39881dcdc5740a0b13697d52880e 100644
--- a/src/network/server/srv_instance.h
+++ b/src/network/server/srv_instance.h
@@ -28,7 +28,7 @@
#include "srv_lsnr.h"
#include "srv_agent.h"
#include "srv_reactor.h"
-#include "interface/c/intarkdb_sql.h"
+#include "intarkdb_sql.h"
#include "srv_def.h"
#include "cm_base.h"
#include "srv_interface.h"
diff --git a/src/network/server/srv_interface.h b/src/network/server/srv_interface.h
index 0290100ed7094b57be64f18d9b3aae16a3e50c09..368f7e6dc31c4d2fd98d04464ea8eacd406dee3b 100644
--- a/src/network/server/srv_interface.h
+++ b/src/network/server/srv_interface.h
@@ -25,7 +25,7 @@
*/
#include "cm_base.h"
#include "cm_defs.h"
-#include "interface/c/intarkdb_sql.h"
+#include "intarkdb_sql.h"
#ifdef __cplusplus
extern "C" {
diff --git a/src/network/server/srv_session_base.h b/src/network/server/srv_session_base.h
index e0bff54262865edc44a9af9b785552209d56fe78..dc4b4a4c4589765d5b1faae892d7389788d7a9fb 100644
--- a/src/network/server/srv_session_base.h
+++ b/src/network/server/srv_session_base.h
@@ -25,7 +25,7 @@
#include "cm_sync.h"
#include "cs_pipe.h"
#include "compute/kv/intarkdb_kv.h"
-#include "interface/c/intarkdb_sql.h"
+#include "intarkdb_sql.h"
#include "cm_base.h"
#ifndef __SRV_SESSION_BASE_H__
diff --git a/testshell.sh b/testshell.sh
index 8c563177a988b8000e86eb1133b267d1f028f139..11b7673b79593622de2670bfb6c9f91f1084aeeb 100644
--- a/testshell.sh
+++ b/testshell.sh
@@ -2,17 +2,12 @@
# 运行测试用例
if [ $# == 1 ] && [ $1 == "all" ]; then
cd build/debug/src/compute/sql/test && ctest
- cd -
- cd build/debug/src/compute/ts/test && ctest
elif [ $# == 1 ] && [ $1 == "sql" ]; then
cd build/debug/src/compute/sql/test && ctest
-elif [ $# == 1 ] && [ $1 == "ts" ]; then
- cd build/debug/src/compute/ts/test && ctest
else
echo "请输入正确的参数! 语法:"
echo "./testshell.sh