diff --git "a/content/zh/post/gaoyunlong/openGauss\344\270\216postgresql\346\227\245\345\270\270\344\275\277\347\224\250\345\267\256\345\274\202.md" "b/content/zh/post/gaoyunlong/openGauss\344\270\216postgresql\346\227\245\345\270\270\344\275\277\347\224\250\345\267\256\345\274\202.md"
new file mode 100644
index 0000000000000000000000000000000000000000..0701005004fd7de64e36cfa09360dd227da169c1
--- /dev/null
+++ "b/content/zh/post/gaoyunlong/openGauss\344\270\216postgresql\346\227\245\345\270\270\344\275\277\347\224\250\345\267\256\345\274\202.md"
@@ -0,0 +1,163 @@
++++
+
+title = "openGauss与postgresql日常使用差异"
+
+date = "2020-11-17"
+
+tags = ["openGauss与postgresql日常使用差异"]
+
+archives = "2020-11"
+
+author = "高云龙"
+
+summary = "openGauss与postgresql日常使用差异"
+
+img = "/zh/post/gaoyunlong/title/title.png"
+
+times = "12:40"
+
++++
+
+# openGauss与postgresql日常使用差异
+
+## 密码加密
+
+- postgresql默认密码加密方式是md5。
+
+- openGauss默认密码加密方式是sha256。
+- 使用navicate、pgadmin3等客户端开发工具访问og,需要修改加密方式。
+- 如果在本地用用户名密码登陆数据库没问题。
+- 但是用其他工具连接数据库报用户或密码错误。
+- 可能是密码加密方式不对,需要看pg\_hba.conf 及 参数。
+
+```
+password_encryption_type = 0 #Password storage type, 0 is md5 for PG, 1 is sha256 + md5, 2 is sha256 only
+```
+
+## 字符串存储
+
+在postgresql里,char\(n\)、varchar\(n\) n代表是字符;最多存储1GB。
+
+在openGauss里,char\(n\)、varcahr\(n\) n代表的是字节,nvarchar2\(n\) n代表是字符;最多存储100MB。
+
+```
+---
+---postgresql字符测试
+---
+postgres=# \d dt
+ Table "public.dt"
+ Column | Type | Collation | Nullable | Default
+--------+----------------------+-----------+----------+---------
+ id | integer | | |
+ col1 | character varying(8) | | |
+
+postgres=# insert into dt values(3,'中文字符长度测试');
+INSERT 0 1
+postgres=# insert into dt values(4,'yingwen8');
+INSERT 0 1
+postgres=# insert into dt values(4,'yingwen88');
+ERROR: value too long for type character varying(8)
+
+---
+---openGauss字符测试
+---
+mydb=# \d+ dt
+ Table "public.dt"
+ Column | Type | Modifiers | Storage | Stats target | Description
+--------+----------------------+-----------+----------+--------------+-------------
+ id | integer | | plain | |
+ col1 | character varying(8) | | extended | |
+ col2 | nvarchar2(8) | | extended | |
+Has OIDs: no
+Options: orientation=row, compression=no
+
+mydb=# insert into dt(id,col1) values(3,'yingwen8');
+INSERT 0 1
+mydb=# insert into dt(id,col1) values(3,'yingwen88');
+ERROR: value too long for type character varying(8)
+CONTEXT: referenced column: col1
+mydb=# insert into dt(id,col1) values(3,'中文测试');
+ERROR: value too long for type character varying(8)
+CONTEXT: referenced column: col1
+mydb=# insert into dt(id,col1) values(3,'中文测');
+ERROR: value too long for type character varying(8)
+CONTEXT: referenced column: col1
+
+mydb=# insert into dt(id,col2) values(4,'中文字符长度测试');
+INSERT 0 1
+mydb=# insert into dt(id,col2) values(4,'yingwen8');
+INSERT 0 1
+mydb=# insert into dt(id,col2) values(4,'yingwen88');
+ERROR: value too long for type nvarchar2(8)
+CONTEXT: referenced column: col2
+mydb=#
+```
+
+## null 与 空字符
+
+- 在postgresql里 null != ‘’
+
+- 在openGauss里‘’转换成 null,没有‘’
+
+----+------
+
+```
+---
+---postgresql测试
+---
+postgres=# create table dt(id int,col1 varchar(8));
+CREATE TABLE
+postgres=# insert into dt values(1,null);
+INSERT 0 1
+postgres=# insert into dt values(2,'');
+INSERT 0 1
+postgres=# select * from dt;
+ id | col1
+----+------
+ 1 |
+ 2 |
+(2 rows)
+
+postgres=# select * from dt where col1 is null;
+ id | col1
+----+------
+ 1 |
+(1 row)
+
+postgres=# select * from dt where col1='';
+ id | col1
+----+------
+ 2 |
+(1 row)
+
+postgres=#
+---
+---openGauss测试
+---
+mydb=# create table dt(id int,col1 varchar(8));
+CREATE TABLE
+mydb=# insert into dt values(1,null);
+INSERT 0 1
+mydb=# insert into dt values(1,'');
+INSERT 0 1
+mydb=# select * from dt;
+ id | col1
+----+------
+ 1 |
+ 1 |
+(2 rows)
+
+mydb=# select * from dt where col1 is null;
+ id | col1
+----+------
+ 1 |
+ 1 |
+(2 rows)
+
+mydb=# select * from dt where col1='';
+ id | col1
+----+------
+(0 rows)
+
+mydb=#
+```
diff --git "a/content/zh/post/gaoyunlong/openGauss\350\241\214\345\255\230\344\270\216\345\210\227\345\255\230.md" "b/content/zh/post/gaoyunlong/openGauss\350\241\214\345\255\230\344\270\216\345\210\227\345\255\230.md"
new file mode 100644
index 0000000000000000000000000000000000000000..58a1e3221a2f8a164fcf554b847209ea4f5b9cb1
--- /dev/null
+++ "b/content/zh/post/gaoyunlong/openGauss\350\241\214\345\255\230\344\270\216\345\210\227\345\255\230.md"
@@ -0,0 +1,348 @@
++++
+
+title = "openGauss行存与列存"
+
+date = "2020-11-17"
+
+tags = ["openGauss行存与列存"]
+
+archives = "2020-11"
+
+author = "高云龙"
+
+summary = "openGauss行存与列存"
+
+img = "/zh/post/gaoyunlong/title/title.png"
+
+times = "12:30"
+
++++
+
+# openGauss行存与列存
+
+## 列存表限制
+
+- 列存表不支持数组。
+- 列存表的数量建议不超过1000个。
+- 列存表的表级约束只支持PARTIAL CLUSTER KEY,不支持主外键等表级约束。
+- 列存表的字段约束只支持NULL、NOT NULL和DEFAULT常量值。
+- 列存表不支持alter命令修改字段约束。
+- 列存表支持delta表,受参数enable\_delta\_store 控制是否开启,受参数deltarow\_threshold控制进入delta表的阀值。
+
+## 列存相关参数
+
+- cstore\_buffers
+
+ 列存所使用的共享缓冲区的大小,默认值:32768KB。
+
+
+- partition\_mem\_batch
+
+ 指定缓存个数,为了优化对列存分区表的批量插入,在批量插入过程中会对数据进行缓存后再批量写盘。默认值:256 。
+
+
+- partition\_max\_cache\_size
+
+ 指定数据缓存区大小,为了优化对列存分区表的批量插入,在批量插入过程中会对数据进行缓存后再批量写盘。默认值:2GB。
+
+
+- enable\_delta\_store
+
+ 为了增强列存单条数据导入的性能和解决磁盘冗余问题,是否需要开启列存delta表功能,与参数DELTAROW\_THRESHOLD 配合使用。默认值:off。
+
+
+## 建表语法
+
+openGauss 创建普通表默认是未压缩的行存表。
+
+```
+mydb=# \dt
+No relations found.
+mydb=# create table test_t(id serial primary key ,col1 varchar(8),col2 decimal(6,2),create_time timestamptz not null default now());
+NOTICE: CREATE TABLE will create implicit sequence "test_t_id_seq" for serial column "test_t.id"
+NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_t_pkey" for table "test_t"
+CREATE TABLE
+mydb=# \dt+
+ List of relations
+ Schema | Name | Type | Owner | Size | Storage | Description
+--------+--------+-------+-------+---------+----------------------------------+-------------
+ public | test_t | table | omm | 0 bytes | {orientation=row,compression=no} |
+(1 row)
+
+mydb=#
+```
+
+创建列存表,需要指定**orientation=column**,默认压缩级别是low。
+
+```
+mydb=# create table column_t(id serial,col1 varchar(8),col2 decimal(6,2),create_time timestamptz not null default now()) with (orientation=column );
+NOTICE: CREATE TABLE will create implicit sequence "column_t_id_seq" for serial column "column_t.id"
+CREATE TABLE
+mydb=# \dt+
+ List of relations
+ Schema | Name | Type | Owner | Size | Storage | Description
+--------+----------+-------+-------+---------+--------------------------------------+-------------
+ public | column_t | table | omm | 16 kB | {orientation=column,compression=low} |
+ public | test_t | table | omm | 0 bytes | {orientation=row,compression=no} |
+(2 rows)
+
+mydb=#mydb=# \d+ column_t
+ Table "public.column_t"
+ Column | Type | Modifiers | Storage | Stats target | Description
+-------------+--------------------------+-------------------------------------------------------+----------+--------------+-------------
+ id | integer | not null default nextval('column_t_id_seq'::regclass) | plain | |
+ col1 | character varying(8) | | extended | |
+ col2 | numeric(6,2) | | main | |
+ create_time | timestamp with time zone | not null default now() | plain | |
+Has OIDs: no
+Options: orientation=column, compression=low
+```
+
+列存表添加局部聚簇存储列。
+
+```
+mydb=# \d+ column_t
+ Table "public.column_t"
+ Column | Type | Modifiers | Storage | Stats target | Description
+-------------+--------------------------+-------------------------------------------------------+----------+--------------+-------------
+ id | integer | not null default nextval('column_t_id_seq'::regclass) | plain | |
+ col1 | character varying(8) | | extended | |
+ col2 | numeric(6,2) | | main | |
+ create_time | timestamp with time zone | not null default now() | plain | |
+Has OIDs: no
+Options: orientation=column, compression=low
+
+mydb=# alter table column_t add PARTIAL CLUSTER KEY(id);
+ALTER TABLE
+mydb=# \d+ column_t
+ Table "public.column_t"
+ Column | Type | Modifiers | Storage | Stats target | Description
+-------------+--------------------------+-------------------------------------------------------+----------+--------------+-------------
+ id | integer | not null default nextval('column_t_id_seq'::regclass) | plain | |
+ col1 | character varying(8) | | extended | |
+ col2 | numeric(6,2) | | main | |
+ create_time | timestamp with time zone | not null default now() | plain | |
+Partial Cluster :
+ "column_t_cluster" PARTIAL CLUSTER KEY (id)
+Has OIDs: no
+Options: orientation=column, compression=low
+
+mydb=#
+```
+
+直接创建带局部聚簇存储的列存表。
+
+```
+NOTICE: CREATE TABLE will create implicit sequence "column_c_id_seq" for serial column "column_c.id"
+CREATE TABLE
+mydb=# \d+ column_c
+ Table "public.column_c"
+ Column | Type | Modifiers | Storage | Stats target | Description
+-------------+--------------------------+-------------------------------------------------------+----------+--------------+-------------
+ id | integer | not null default nextval('column_c_id_seq'::regclass) | plain | |
+ col1 | character varying(8) | | extended | |
+ col2 | numeric(6,2) | | main | |
+ create_time | timestamp with time zone | not null default now() | plain | |
+Partial Cluster :
+ "column_c_cluster" PARTIAL CLUSTER KEY (id)
+Has OIDs: no
+Options: orientation=column, compression=low
+
+mydb=#
+```
+
+## 列存与行存对比
+
+**磁盘使用空间**
+
+- 列存表默认大小16K,low压缩级别。
+- 行存表默认大小0bytes,非压缩级别。
+- 分别向两个表中插入100万条数据,占用磁盘大小对比。
+
+```
+mydb=# \dt+
+ List of relations
+ Schema | Name | Type | Owner | Size | Storage | Description
+--------+-----------+-------+-------+---------+-----------------------------------------+-------------
+ public | column_t | table | omm | 16 kB | {orientation=column,compression=low} |
+ public | column_th | table | omm | 16 kB | {orientation=column,compression=high} |
+ public | column_tm | table | omm | 16 kB | {orientation=column,compression=middle} |
+ public | row_tc | table | omm | 0 bytes | {orientation=row,compression=yes} |
+ public | test_t | table | omm | 0 bytes | {orientation=row,compression=no} |
+(5 rows)
+
+mydb=# insert into column_t select generate_series(1,1000000),left(md5(random()::text),8),random()::numeric(6,2);
+INSERT 0 1000000
+Time: 11328.880 ms
+mydb=# insert into column_th select generate_series(1,1000000),left(md5(random()::text),8),random()::numeric(6,2);
+INSERT 0 1000000
+Time: 10188.634 ms
+mydb=# insert into column_tm select generate_series(1,1000000),left(md5(random()::text),8),random()::numeric(6,2);
+INSERT 0 1000000
+Time: 9802.739 ms
+mydb=# insert into test_t select generate_series(1,1000000),left(md5(random()::text),8),random()::numeric(6,2);
+INSERT 0 1000000
+Time: 17404.945 ms
+mydb=# insert into row_tc select generate_series(1,1000000),left(md5(random()::text),8),random()::numeric(6,2);
+INSERT 0 1000000
+Time: 12394.866 ms
+mydb=# \dt+
+ List of relations
+ Schema | Name | Type | Owner | Size | Storage | Description
+--------+-----------+-------+-------+----------+-----------------------------------------+-------------
+ public | column_t | table | omm | 12 MB | {orientation=column,compression=low} |
+ public | column_th | table | omm | 8304 kB | {orientation=column,compression=high} |
+ public | column_tm | table | omm | 10168 kB | {orientation=column,compression=middle} |
+ public | row_tc | table | omm | 58 MB | {orientation=row,compression=yes} |
+ public | test_t | table | omm | 58 MB | {orientation=row,compression=no} |
+(5 rows)
+
+mydb=#
+```
+
+- 列存表开启的压缩级别越高。
+- 占用磁盘空间越少行存表开启压缩后,磁盘空间大小占比减少不明显。
+- 列存表占用磁盘空间比行存表占用磁盘空间少近6倍。
+
+**DML对比**
+
+查找单列
+
+```
+---
+---按范围查找,列存比行存快近20倍
+---
+mydb=# select col1 from test_t where id>=100010 and id<100020;
+ col1
+----------
+ 4257a3f3
+ 3d397284
+ 64343438
+ 6eb7bdb7
+ d1c9073d
+ 6aeb037c
+ 1d424974
+ 223235ab
+ 329de235
+ 2f02adc1
+(10 rows)
+
+Time: 77.341 ms
+mydb=# select col1 from column_t where id>=100010 and id<100020;
+ col1
+----------
+ d4837c30
+ 87a46f7a
+ 2f42a9c9
+ 4481c793
+ 68800204
+ 613b9205
+ 9d8f4a0a
+ 5cc4ff9e
+ f948cd10
+ f2775cee
+(10 rows)
+
+Time: 3.884 ms
+
+---
+---随机查找,列存比行存快近35倍
+---
+
+mydb=# select col1 from test_t limit 10;
+ col1
+----------
+ c2780d93
+ 294be14d
+ 4e53b761
+ 2c10f8a2
+ ae776743
+ 7d683c66
+ b3b40054
+ 7e56edf9
+ a7b7336e
+ ea3d47d9
+(10 rows)
+
+Time: 249.887 ms
+mydb=# select col1 from column_t limit 10;
+ col1
+----------
+ a745d77b
+ 4b6df494
+ 76fed9c1
+ 70c9664d
+ 3384de8a
+ 4158f3bf
+ 5d1c3b9f
+ 341876bb
+ f396f4ed
+ abfd78bb
+(10 rows)
+
+Time: 7.738 ms
+```
+
+select \*
+
+```
+---
+---行存比列存查询快30%
+---
+mydb=# select * from test_t limit 10;
+ id | col1 | col2 | create_time
+----+----------+------+-------------------------------
+ 1 | c2780d93 | .37 | 2020-10-26 14:27:33.304108+08
+ 2 | 294be14d | .57 | 2020-10-26 14:27:33.304108+08
+ 3 | 4e53b761 | .98 | 2020-10-26 14:27:33.304108+08
+ 4 | 2c10f8a2 | .27 | 2020-10-26 14:27:33.304108+08
+ 5 | ae776743 | .97 | 2020-10-26 14:27:33.304108+08
+ 6 | 7d683c66 | .58 | 2020-10-26 14:27:33.304108+08
+ 7 | b3b40054 | .44 | 2020-10-26 14:27:33.304108+08
+ 8 | 7e56edf9 | .43 | 2020-10-26 14:27:33.304108+08
+ 9 | a7b7336e | .31 | 2020-10-26 14:27:33.304108+08
+ 10 | ea3d47d9 | .42 | 2020-10-26 14:27:33.304108+08
+(10 rows)
+
+Time: 6.822 ms
+
+mydb=# select * from column_t limit 10;
+ id | col1 | col2 | create_time
+----+----------+------+-------------------------------
+ 1 | a745d77b | .33 | 2020-10-26 14:28:20.633253+08
+ 2 | 4b6df494 | .42 | 2020-10-26 14:28:20.633253+08
+ 3 | 76fed9c1 | .73 | 2020-10-26 14:28:20.633253+08
+ 4 | 70c9664d | .74 | 2020-10-26 14:28:20.633253+08
+ 5 | 3384de8a | .48 | 2020-10-26 14:28:20.633253+08
+ 6 | 4158f3bf | .59 | 2020-10-26 14:28:20.633253+08
+ 7 | 5d1c3b9f | .63 | 2020-10-26 14:28:20.633253+08
+ 8 | 341876bb | .97 | 2020-10-26 14:28:20.633253+08
+ 9 | f396f4ed | .73 | 2020-10-26 14:28:20.633253+08
+ 10 | abfd78bb | .30 | 2020-10-26 14:28:20.633253+08
+(10 rows)
+
+Time: 9.982 ms
+```
+
+update
+
+```
+---
+---直接更新一个字段,列存比行存快近7倍
+---
+mydb=# update test_t set col1=col1;
+UPDATE 1000000
+Time: 19779.978 ms
+mydb=# update column_t set col1=col1;
+UPDATE 1000000
+Time: 2702.339 ms
+```
+
+## 结论
+
+1. 列存表比行存表在磁盘空间占用上节省近6倍。
+2. 查询指定字段,列存表比行存表快约20-35倍。
+3. select \* 的方式,列存表比行存表慢30%。
+4. 默认压缩方式批量导入数据,列存表比行存表快40%。
+
+
diff --git a/content/zh/post/gaoyunlong/title/title.PNG b/content/zh/post/gaoyunlong/title/title.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..912319f07cad56d38dd41efe4125c4959c0da394
Binary files /dev/null and b/content/zh/post/gaoyunlong/title/title.PNG differ
diff --git "a/content/zh/post/gaoyunlong/\351\200\202\351\205\215openGauss\347\232\204exporter.md" "b/content/zh/post/gaoyunlong/\351\200\202\351\205\215openGauss\347\232\204exporter.md"
new file mode 100644
index 0000000000000000000000000000000000000000..06f6a2bf65c05d457855a8961a6d98c0ab2955b1
--- /dev/null
+++ "b/content/zh/post/gaoyunlong/\351\200\202\351\205\215openGauss\347\232\204exporter.md"
@@ -0,0 +1,206 @@
++++
+
+title = "适配openGauss的exporter"
+
+date = "2020-11-17"
+
+tags = ["适配openGauss的exporter"]
+
+archives = "2020-11"
+
+author = "高云龙"
+
+summary = "适配openGauss的exporter"
+
+img = "/zh/post/gaoyunlong/title/title.png"
+
+times = "13:30"
+
++++
+
+# 适配openGauss的exporter
+
+基于postgres\_exporter-0.8.0 适配 openGauss。
+
+针对postgres\_exporter.go文件做修改。
+
+## 数据库用户
+
+新建一个exporter的数据库用户,需要**sysadmin**权限,否则有些系统视图无法获取数据。
+
+新建用户之前,需要修改加密参数,由sha256 改成md5。
+
+password\_encryption\_type = 0
+
+## 版本号适配
+
+```
+//var versionRegex = regexp.MustCompile(`^\w+ ((\d+)(\.\d+)?(\.\d+)?)`)
+//var lowestSupportedVersion = semver.MustParse("9.1.0")
+
+//opengauss
+var versionRegex = regexp.MustCompile(`^\(?\w+ ((\d+)(\.\d+)?(\.\d+)?)`)
+var lowestSupportedVersion = semver.MustParse("1.0.0")
+```
+
+## 系统视图适配
+
+pg\_stat\_archiveropengauss 不支持该系统视图,去掉。
+
+```
+// "pg_stat_archiver": {
+// map[string]ColumnMapping{
+// "archived_count": {COUNTER, "Number of WAL files that have been successfully archived", nil, nil},
+// "last_archived_wal": {DISCARD, "Name of the last WAL file successfully archived", nil, nil},
+// "last_archived_time": {DISCARD, "Time of the last successful archive operation", nil, nil},
+// "failed_count": {COUNTER, "Number of failed attempts for archiving WAL files", nil, nil},
+// "last_failed_wal": {DISCARD, "Name of the WAL file of the last failed archival operation", nil, nil},
+// "last_failed_time": {DISCARD, "Time of the last failed archival operation", nil, nil},
+// "stats_reset": {DISCARD, "Time at which these statistics were last reset", nil, nil},
+// "last_archive_age": {GAUGE, "Time in seconds since last WAL segment was successfully archived", nil, nil},
+// },
+// true,
+// 0,
+// },
+
+
+// "pg_stat_archiver": {
+// {
+// semver.MustParseRange(">=0.0.0"),
+// `
+// SELECT *,
+// extract(epoch from now() - last_archived_time) AS last_archive_age
+// FROM pg_stat_archiver
+// `,
+// },
+// },
+```
+
+pg\_stat\_activity 系统视图。
+
+```
+//semver.MustParseRange(">=9.2.0"),
+semver.MustParseRange(">=1.0.0"),
+
+
+// {
+// semver.MustParseRange("<9.2.0"),
+// `
+// SELECT
+// datname,
+// 'unknown' AS state,
+// COALESCE(count(*),0) AS count,
+// COALESCE(MAX(EXTRACT(EPOCH FROM now() - xact_start))::float,0) AS max_tx_duration
+// FROM pg_stat_activity GROUP BY datname
+// `,
+// },
+```
+
+pg\_stat\_replication 系统视图。
+
+```
+// map[string]ColumnMapping{
+// "procpid": {DISCARD, "Process ID of a WAL sender process", nil, semver.MustParseRange("<9.2.0")},
+// "pid": {DISCARD, "Process ID of a WAL sender process", nil, semver.MustParseRange(">=9.2.0")},
+// "usesysid": {DISCARD, "OID of the user logged into this WAL sender process", nil, nil},
+// "usename": {DISCARD, "Name of the user logged into this WAL sender process", nil, nil},
+// "application_name": {LABEL, "Name of the application that is connected to this WAL sender", nil, nil},
+// "client_addr": {LABEL, "IP address of the client connected to this WAL sender. If this field is null, it indicates that the client is connected via a Unix socket on the server machine.", nil, nil},
+// "client_hostname": {DISCARD, "Host name of the connected client, as reported by a reverse DNS lookup of client_addr. This field will only be non-null for IP connections, and only when log_hostname is enabled.", nil, nil},
+// "client_port": {DISCARD, "TCP port number that the client is using for communication with this WAL sender, or -1 if a Unix socket is used", nil, nil},
+// "backend_start": {DISCARD, "with time zone Time when this process was started, i.e., when the client connected to this WAL sender", nil, nil},
+// "backend_xmin": {DISCARD, "The current backend's xmin horizon.", nil, nil},
+// "state": {LABEL, "Current WAL sender state", nil, nil},
+// "sent_location": {DISCARD, "Last transaction log position sent on this connection", nil, semver.MustParseRange("<10.0.0")},
+// "write_location": {DISCARD, "Last transaction log position written to disk by this standby server", nil, semver.MustParseRange("<10.0.0")},
+// "flush_location": {DISCARD, "Last transaction log position flushed to disk by this standby server", nil, semver.MustParseRange("<10.0.0")},
+// "replay_location": {DISCARD, "Last transaction log position replayed into the database on this standby server", nil, semver.MustParseRange("<10.0.0")},
+// "sent_lsn": {DISCARD, "Last transaction log position sent on this connection", nil, semver.MustParseRange(">=10.0.0")},
+// "write_lsn": {DISCARD, "Last transaction log position written to disk by this standby server", nil, semver.MustParseRange(">=10.0.0")},
+// "flush_lsn": {DISCARD, "Last transaction log position flushed to disk by this standby server", nil, semver.MustParseRange(">=10.0.0")},
+// "replay_lsn": {DISCARD, "Last transaction log position replayed into the database on this standby server", nil, semver.MustParseRange(">=10.0.0")},
+// "sync_priority": {DISCARD, "Priority of this standby server for being chosen as the synchronous standby", nil, nil},
+// "sync_state": {DISCARD, "Synchronous state of this standby server", nil, nil},
+// "slot_name": {LABEL, "A unique, cluster-wide identifier for the replication slot", nil, semver.MustParseRange(">=9.2.0")},
+// "plugin": {DISCARD, "The base name of the shared object containing the output plugin this logical slot is using, or null for physical slots", nil, nil},
+// "slot_type": {DISCARD, "The slot type - physical or logical", nil, nil},
+// "datoid": {DISCARD, "The OID of the database this slot is associated with, or null. Only logical slots have an associated database", nil, nil},
+// "database": {DISCARD, "The name of the database this slot is associated with, or null. Only logical slots have an associated database", nil, nil},
+// "active": {DISCARD, "True if this slot is currently actively being used", nil, nil},
+// "active_pid": {DISCARD, "Process ID of a WAL sender process", nil, nil},
+// "xmin": {DISCARD, "The oldest transaction that this slot needs the database to retain. VACUUM cannot remove tuples deleted by any later transaction", nil, nil},
+// "catalog_xmin": {DISCARD, "The oldest transaction affecting the system catalogs that this slot needs the database to retain. VACUUM cannot remove catalog tuples deleted by any later transaction", nil, nil},
+// "restart_lsn": {DISCARD, "The address (LSN) of oldest WAL which still might be required by the consumer of this slot and thus won't be automatically removed during checkpoints", nil, nil},
+// "pg_current_xlog_location": {DISCARD, "pg_current_xlog_location", nil, nil},
+// "pg_current_wal_lsn": {DISCARD, "pg_current_xlog_location", nil, semver.MustParseRange(">=10.0.0")},
+// "pg_current_wal_lsn_bytes": {GAUGE, "WAL position in bytes", nil, semver.MustParseRange(">=10.0.0")},
+// "pg_xlog_location_diff": {GAUGE, "Lag in bytes between master and slave", nil, semver.MustParseRange(">=9.2.0 <10.0.0")},
+// "pg_wal_lsn_diff": {GAUGE, "Lag in bytes between master and slave", nil, semver.MustParseRange(">=10.0.0")},
+// "confirmed_flush_lsn": {DISCARD, "LSN position a consumer of a slot has confirmed flushing the data received", nil, nil},
+// "write_lag": {DISCARD, "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it (but not yet flushed it or applied it). This can be used to gauge the delay that synchronous_commit level remote_write incurred while committing if this server was configured as a synchronous standby.", nil, semver.MustParseRange(">=10.0.0")},
+// "flush_lag": {DISCARD, "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it (but not yet applied it). This can be used to gauge the delay that synchronous_commit level remote_flush incurred while committing if this server was configured as a synchronous standby.", nil, semver.MustParseRange(">=10.0.0")},
+// "replay_lag": {DISCARD, "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it. This can be used to gauge the delay that synchronous_commit level remote_apply incurred while committing if this server was configured as a synchronous standby.", nil, semver.MustParseRange(">=10.0.0")},
+// },
+ map[string]ColumnMapping{
+ "pid": {DISCARD, "Process ID of a WAL sender process", nil, semver.MustParseRange(">=1.0.0")},
+ "usesysid": {DISCARD, "OID of the user logged into this WAL sender process", nil, nil},
+ "usename": {DISCARD, "Name of the user logged into this WAL sender process", nil, nil},
+ "application_name": {LABEL, "Name of the application that is connected to this WAL sender", nil, nil},
+ "client_addr": {LABEL, "IP address of the client connected to this WAL sender. If this field is null, it indicates that the client is connected via a Unix socket on the server machine.", nil, nil},
+ "client_hostname": {DISCARD, "Host name of the connected client, as reported by a reverse DNS lookup of client_addr. This field will only be non-null for IP connections, and only when log_hostname is enabled.", nil, nil},
+ "client_port": {DISCARD, "TCP port number that the client is using for communication with this WAL sender, or -1 if a Unix socket is used", nil, nil},
+ "backend_start": {DISCARD, "with time zone Time when this process was started, i.e., when the client connected to this WAL sender", nil, nil},
+ "backend_xmin": {DISCARD, "The current backend's xmin horizon.", nil, nil},
+ "state": {LABEL, "Current WAL sender state", nil, nil},
+ "sender_sent_location": {DISCARD, "Last transaction log position sent on this connection", nil, semver.MustParseRange(">=1.0.0")},
+ "receiver_write_location": {DISCARD, "Last transaction log position written to disk by this standby server", nil, semver.MustParseRange(">=1.0.0")},
+ "receiver_flush_location": {DISCARD, "Last transaction log position flushed to disk by this standby server", nil, semver.MustParseRange(">=1.0.0")},
+ "receiver_replay_location": {DISCARD, "Last transaction log position replayed into the database on this standby server", nil, semver.MustParseRange(">=1.0.0")},
+ "sync_priority": {DISCARD, "Priority of this standby server for being chosen as the synchronous standby", nil, nil},
+ "sync_state": {DISCARD, "Synchronous state of this standby server", nil, nil},
+ "slot_name": {LABEL, "A unique, cluster-wide identifier for the replication slot", nil, nil},
+ "plugin": {DISCARD, "The base name of the shared object containing the output plugin this logical slot is using, or null for physical slots", nil, nil},
+ "slot_type": {DISCARD, "The slot type - physical or logical", nil, nil},
+ "datoid": {DISCARD, "The OID of the database this slot is associated with, or null. Only logical slots have an associated database", nil, nil},
+ "database": {DISCARD, "The name of the database this slot is associated with, or null. Only logical slots have an associated database", nil, nil},
+ "active": {DISCARD, "True if this slot is currently actively being used", nil, nil},
+ "active_pid": {DISCARD, "Process ID of a WAL sender process", nil, nil},
+ "xmin": {DISCARD, "The oldest transaction that this slot needs the database to retain. VACUUM cannot remove tuples deleted by any later transaction", nil, nil},
+ "catalog_xmin": {DISCARD, "The oldest transaction affecting the system catalogs that this slot needs the database to retain. VACUUM cannot remove catalog tuples deleted by any later transaction", nil, nil},
+ "restart_lsn": {DISCARD, "The address (LSN) of oldest WAL which still might be required by the consumer of this slot and thus won't be automatically removed during checkpoints", nil, nil},
+ "pg_current_xlog_location": {DISCARD, "pg_current_xlog_location", nil, nil},
+ "pg_xlog_location_diff": {GAUGE, "Lag in bytes between master and slave", nil, semver.MustParseRange(">=1.0.0")},
+ "confirmed_flush_lsn": {DISCARD, "LSN position a consumer of a slot has confirmed flushing the data received", nil, nil},
+ },
+
+// {
+// semver.MustParseRange(">=10.0.0"),
+// `
+// SELECT *,
+// (case pg_is_in_recovery() when 't' then null else pg_current_wal_lsn() end) AS pg_current_wal_lsn,
+// (case pg_is_in_recovery() when 't' then null else pg_wal_lsn_diff(pg_current_wal_lsn(), pg_lsn('0/0'))::float end) AS pg_current_wal_lsn_bytes,
+// (case pg_is_in_recovery() when 't' then null else pg_wal_lsn_diff(pg_current_wal_lsn(), replay_lsn)::float end) AS pg_wal_lsn_diff
+// FROM pg_stat_replication
+// `,
+// },
+
+ {
+ semver.MustParseRange(">=1.0.0"),
+ `
+ SELECT *,
+ (case pg_is_in_recovery() when 't' then null else pg_current_xlog_location() end) AS pg_current_xlog_location,
+ (case pg_is_in_recovery() when 't' then null else pg_xlog_location_diff(pg_current_xlog_location(), receiver_replay_location)::float end) AS pg_xlog_location_diff
+ FROM pg_stat_replication
+ `,
+ },
+
+// {
+// semver.MustParseRange("<9.2.0"),
+// `
+// SELECT *,
+// (case pg_is_in_recovery() when 't' then null else pg_current_xlog_location() end) AS pg_current_xlog_location
+// FROM pg_stat_replication
+// `,
+// },
+```
+
diff --git a/content/zh/post/shujukujiagouzhimei/figures/modb_ef290bfc-0cea-11eb-bd37-38f9d3cd240d.png b/content/zh/post/shujukujiagouzhimei/figures/modb_ef290bfc-0cea-11eb-bd37-38f9d3cd240d.png
new file mode 100644
index 0000000000000000000000000000000000000000..7f66213ad752461271d4fec3b54af2e247147bc5
Binary files /dev/null and b/content/zh/post/shujukujiagouzhimei/figures/modb_ef290bfc-0cea-11eb-bd37-38f9d3cd240d.png differ
diff --git "a/content/zh/post/shujukujiagouzhimei/openGauss\344\270\255\347\232\204\345\271\266\350\241\214\345\233\236\346\224\276\344\273\245\345\217\212\345\222\214PG\347\232\204\345\257\271\346\257\224.md" "b/content/zh/post/shujukujiagouzhimei/openGauss\344\270\255\347\232\204\345\271\266\350\241\214\345\233\236\346\224\276\344\273\245\345\217\212\345\222\214PG\347\232\204\345\257\271\346\257\224.md"
new file mode 100644
index 0000000000000000000000000000000000000000..589c418346dc2239c6636ae9849ce1b27f571657
--- /dev/null
+++ "b/content/zh/post/shujukujiagouzhimei/openGauss\344\270\255\347\232\204\345\271\266\350\241\214\345\233\236\346\224\276\344\273\245\345\217\212\345\222\214PG\347\232\204\345\257\271\346\257\224.md"
@@ -0,0 +1,169 @@
++++
+
+title = "openGauss中的并行回放以及和PG的对比"
+
+date = "2020-11-17"
+
+tags = ["openGauss中的并行回放以及和PG的对比"]
+
+archives = "2020-11"
+
+author = "数据库架构之美"
+
+summary = "openGauss中的并行回放以及和PG的对比"
+
+img = "/zh/post/shujukujiagouzhimei/title/title.png"
+
+times = "14:30"
+
++++
+
+# openGauss中的并行回放以及和PG的对比
+
+openGauss在非极致rto特性下通过recovery\_max\_workers和recovery\_parallelism参数控制并行回放的线程数。下面测试一下效果,可以通过停止备库来模拟主库xlog堆积,此处统一模拟堆积到1000个xlog后启动备库,来进行应用和回放,最终统计回放完成时间。可以通过比对receiver\_replay\_location和receiver\_flush\_location之间的差距来判断是否回放完成。
+
+由于recovery\_max\_workers和recovery\_parallelism参数具有联动性,设置一个另外一个会自动设置为相同的值,此处只设置recovery\_max\_workers。
+
+## recovery\_max\_workers=1
+
+停止备库,启动压测程序,待xlog达到1000时关闭压测程序。
+
+```
+[omm@db01 gaussdata]$ ll pg_xlog/ |wc -l
+1000
+[omm@db02 pg_xlog]$ gs_ctl start -M standby
+[omm@db02 pg_xlog]$ date
+Wed Sep 16 15:26:53 CST 2020
+```
+
+可以看到处于catchup过程:
+
+```
+[omm@db02 pg_xlog]$ gs_ctl query
+[2020-09-16 15:27:12.958][213287][][gs_ctl]: gs_ctl query ,datadir is (null)
+ HA state:
+ local_role : Standby
+ static_connections : 2
+ db_state : Catchup
+ detail_information : Normal
+
+
+ Senders info:
+No information
+ Receiver info:
+ receiver_pid : 211639
+ local_role : Standby
+ peer_role : Primary
+ peer_state : Normal
+ state : Catchup
+ sender_sent_location : 3C/28800000
+ sender_write_location : 3D/EE528328
+ sender_flush_location : 3D/EE528328
+ sender_replay_location : 3D/EE528328
+ receiver_received_location : 3C/28000000
+ receiver_write_location : 3C/27000000
+ receiver_flush_location : 3C/27000000
+ receiver_replay_location : 3A/8F4A9910
+ sync_percent : 97%
+ channel : 192.168.1.2:48458<--192.168.1.1:5533
+```
+
+过一段时间发现追平了,但是receiver\_replay\_location和receiver\_flush\_location之间有差距,持续观察:
+
+```
+[omm@db02 pg_xlog]$ gs_ctl query[2020-09-16 15:32:08.432][237296][][gs_ctl]: gs_ctl query ,datadir is (null) HA state: local_role : Standby static_connections : 2 db_state : Normal detail_information : Normal Senders info: No information Receiver info: receiver_pid : 211639 local_role : Standby peer_role : Primary peer_state : Normal state : Normal sender_sent_location : 3D/FC647630 sender_write_location : 3D/FC647630 sender_flush_location : 3D/FC647630 sender_replay_location : 3D/FC647630 receiver_received_location : 3D/FC647630 receiver_write_location : 3D/FC647630 receiver_flush_location : 3D/FC647630 receiver_replay_location : 3D/FC647630 sync_percent : 100% channel : 192.168.1.2:48458<--192.168.1.1:5533
+
+
+[omm@db02 pg_xlog]$ date
+Wed Sep 16 15:32:09 CST 2020
+```
+
+总共耗时:316s
+
+## recovery\_max\_workers=8
+
+```
+[omm@db01 gaussdata]$ ll pg_xlog |wc -l
+1002
+```
+
+启动一个备库,开始追数和回放。
+
+```
+[omm@db02 gaussdata]$ date
+Thu Sep 17 09:32:59 CST 2020
+[omm@db02 gaussdata]$ gs_ctl query
+[2020-09-17 09:33:02.663][53466][][gs_ctl]: gs_ctl query ,datadir is (null)
+ HA state:
+ local_role : Standby
+ static_connections : 2
+ db_state : Catchup
+ detail_information : Normal
+
+
+ Senders info:
+No information
+ Receiver info:
+ receiver_pid : 53065
+ local_role : Standby
+ peer_role : Primary
+ peer_state : Normal
+ state : Catchup
+ sender_sent_location : 44/65800000
+ sender_write_location : 47/A600A858
+ sender_flush_location : 47/A600A858
+ sender_replay_location : 47/A600A858
+ receiver_received_location : 44/65800000
+ receiver_write_location : 44/65000000
+ receiver_flush_location : 44/65000000
+ receiver_replay_location : 44/432AFCC8
+ sync_percent : 95%
+ channel : 192.168.1.2:38322<--192.168.1.1:5533
+```
+
+稳定后:
+
+```
+[omm@db02 gaussdata]$ gs_ctl query
+[2020-09-17 09:41:05.963][93661][][gs_ctl]: gs_ctl query ,datadir is (null)
+ HA state:
+ local_role : Standby
+ static_connections : 2
+ db_state : Normal
+ detail_information : Normal
+
+
+ Senders info:
+No information
+ Receiver info:
+ receiver_pid : 53065
+ local_role : Standby
+ peer_role : Primary
+ peer_state : Normal
+ state : Normal
+ sender_sent_location : 47/AF961308
+ sender_write_location : 47/AF961308
+ sender_flush_location : 47/AF961308
+ sender_replay_location : 47/AF961308
+ receiver_received_location : 47/AF961308
+ receiver_write_location : 47/AF961308
+ receiver_flush_location : 47/AF961308
+ receiver_replay_location : 47/AF961308
+ sync_percent : 100%
+ channel : 192.168.1.2:38322<--192.168.1.1:5533
+
+
+[omm@db02 gaussdata]$ date
+Thu Sep 17 09:41:07 CST 2020
+```
+
+总耗时:428s
+
+可以看到并行回放并没有起到作用,甚至开启多线程回放后反而回放变慢了。
+
+测试过程中大致看了下日志产生速度:70M/s,插入的数据每秒6万条,而pg中在插入每秒6万条的时候,日志产生速度只有35M/s左右,是openGauss的一半,这块也需要优化。在极限测试情况下,在150并发时,pg的日志产生速度大概70M/s,tps大概是每秒13万行的插入,此时replay\_lag确实在逐渐增大,说明在这个大小的日志产生速度下,pg已经回放不过来这么大量的日志。openGauss在6万tps插入时已经达到这么大日志量。
+
+
+
+所以从上面的表格以及测试结果可以看到首先开启并行回放参数不会加快回放速度,甚至回放速度回变慢,openGauss备库回放跟不上的原因并不在于openGauss的回放速度比pg慢,回放速度他俩是差不多的,而在于openGauss在插入同样的数据时产生的日志量是pg的两倍,这样造成它的回放跟不上。这个日志量的问题需要解决。
+
diff --git a/content/zh/post/shujukujiagouzhimei/title/title.PNG b/content/zh/post/shujukujiagouzhimei/title/title.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..651eea95c0232db71aa778c388b91b08202b3835
Binary files /dev/null and b/content/zh/post/shujukujiagouzhimei/title/title.PNG differ
diff --git "a/content/zh/post/xiaoxiaoliang/figures/\345\233\2761.png" "b/content/zh/post/xiaoxiaoliang/figures/\345\233\2761.png"
new file mode 100644
index 0000000000000000000000000000000000000000..8a7c8c0729f7d0265d0db2414805e10c9190505c
Binary files /dev/null and "b/content/zh/post/xiaoxiaoliang/figures/\345\233\2761.png" differ
diff --git "a/content/zh/post/xiaoxiaoliang/figures/\345\233\2762.png" "b/content/zh/post/xiaoxiaoliang/figures/\345\233\2762.png"
new file mode 100644
index 0000000000000000000000000000000000000000..efdc1589935ecc36a33ee4d86b5e6cc91aee3825
Binary files /dev/null and "b/content/zh/post/xiaoxiaoliang/figures/\345\233\2762.png" differ
diff --git "a/content/zh/post/xiaoxiaoliang/openGauss\345\205\274\345\256\271Oracle\346\225\260\346\215\256\347\261\273\345\236\213\345\222\214\345\270\270\347\224\250\350\241\250\347\261\273\345\236\213.md" "b/content/zh/post/xiaoxiaoliang/openGauss\345\205\274\345\256\271Oracle\346\225\260\346\215\256\347\261\273\345\236\213\345\222\214\345\270\270\347\224\250\350\241\250\347\261\273\345\236\213.md"
new file mode 100644
index 0000000000000000000000000000000000000000..b9f6f71bfbcf738047af99fea61ed6e60a4fc27f
--- /dev/null
+++ "b/content/zh/post/xiaoxiaoliang/openGauss\345\205\274\345\256\271Oracle\346\225\260\346\215\256\347\261\273\345\236\213\345\222\214\345\270\270\347\224\250\350\241\250\347\261\273\345\236\213.md"
@@ -0,0 +1,32 @@
++++
+
+title = "openGauss兼容Oracle数据类型和常用表类型"
+
+date = "2020-11-17"
+
+tags = ["openGauss兼容Oracle数据类型和常用表类型"]
+
+archives = "2020-11"
+
+author = "小小亮"
+
+summary = "openGauss兼容Oracle数据类型和常用表类型"
+
+img = "/zh/post/xiaoxiaoliang/title/title.png"
+
+times = "13:30"
+
++++
+
+# openGauss兼容Oracle数据类型和常用表类型
+
+从Oracle数据库向其他数据库过度时,很多朋友会自然而然的寻找属性都过度方式,例如字典表。
+
+openGauss可全面兼容Oracle所有数据类型,对于常见数据类型无需进行改造,对于少数非常用数据类型,需要进行少量代码改造,可采取下列替代方案进行替换。
+
+
+
+openGauss兼容Oracle常用表类型,索引组织表需要用集群索引方式进行改造。对于少数非常用数据类型,需要进行少量代码改造,可采取下列替代方案进行替换。
+
+
+
diff --git a/content/zh/post/xiaoxiaoliang/title/title.PNG b/content/zh/post/xiaoxiaoliang/title/title.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..c3998e7e4dff8ac1e3a00044ed85eae564053773
Binary files /dev/null and b/content/zh/post/xiaoxiaoliang/title/title.PNG differ