diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000000000000000000000000000000000000..95becc00cae37a4c6f9727b247ebd6c3e8961e82 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,18 @@ +{ + "editor.tokenColorCustomizations": { + "textMateRules": [ + { + "scope": "kunpeng.func", + "settings": { + "foreground": "#28a745" + } + }, + { + "scope": "kunpeng.intrinsics", + "settings": { + "foreground": "#28a745" + } + } + ] + } +} \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 7ac805cb871821c6362d394d8201b9d5830e51a0..5d6a2b9b85fea931493710f9e7126fbac3147255 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM nginx +FROM nginx:1.20.0 RUN apt-get update && \ apt install curl -y && \ diff --git a/content/en/guidance/index_.md b/content/en/guidance/index_.md index 0b1f584075bdeda1230bb982dbd90af6ec32d9cb..a1354efaff51efbd47334e06597288be46c42975 100644 --- a/content/en/guidance/index_.md +++ b/content/en/guidance/index_.md @@ -6,13 +6,13 @@ title = "Guidance to Post a Blog" ## Preparation -1. Refer to http: //git.mydoc.io/?t=179267 to register Gitee account. +1. Refer to https://gitee.com/help/articles/4113 to register Gitee account. -2. Set your primary mail box in gitee settings https: //gitee.com/profile/emails. +2. Set your primary mail box in gitee settings https://gitee.com/profile/emails. -3. Sign your CLA in . +3. Sign your CLA in . -4. Prepare your git environment refering to http: //git.mydoc.io/?t=180692. +4. Prepare your git environment refering to https://gitee.com/help/articles/4107. ## Understand blog format @@ -39,7 +39,7 @@ Tips: you can copy content/_example/2020-03-03-sample-post.md to your folder and The blog posting follows the pull request of Gitee. -1. Fork openGauss blog project to your own gitee. Refer to for detailed guidance. +1. Fork openGauss blog project to your own gitee. Refer to for detailed guidance. 2. Clone the code to your local environment. @@ -82,6 +82,6 @@ git commit -m "" git push origin : ``` -7. Refer to http: //git.mydoc.io/?t=153749to submit your Pull Request +7. Refer to https://gitee.com/help/articles/4122to submit your Pull Request 8. Wait for reviewing and merging. diff --git a/content/en/post/2022/A-Leap-of-openGauss-from-Standalone-System-to-Distributed-System.md b/content/en/post/2022/A-Leap-of-openGauss-from-Standalone-System-to-Distributed-System.md new file mode 100644 index 0000000000000000000000000000000000000000..45fcf70ee4864a52a1423e5dc8f503773b8d1415 --- /dev/null +++ b/content/en/post/2022/A-Leap-of-openGauss-from-Standalone-System-to-Distributed-System.md @@ -0,0 +1,74 @@ ++++ + +title = "A Leap of openGauss from Standalone System to Distributed System" + +date = "2021-09-14" + +tags = [ "A Leap of openGauss from Standalone System to Distributed Systems"] + +archives = "2021-09" + +author = "Bin Zhou" + +summary = "A Leap of openGauss from Standalone System to Distributed System" + +img = "/en/post/2022/title/img2.png" + +times = "12:30" + ++++ + +# A Leap of openGauss from Standalone System to Distributed System + + + +Since June 2020, the openGauss standalone database has received wide attention from the industry and attracted many partners and developers to build a prosperous database ecosystem. In the face of massive data and ultra-high concurrency scenarios, openGauss turns to distributed solutions and focuses on solving problems in different aspects, such as massive data storage, ultra-high concurrent throughput, and large table bottlenecks, achieving another breakthrough from standalone system to distributed system. + +## **Distributed solution** + +![](../figures/21.png) + +Figure 1 Overall architecture of the distributed solution + +openGauss integrates many open-source components to build a full-stack open-source distributed solution that integrates data scale-out, distributed transactions, and governance. Figure 1 shows the overall architecture of openGauss. ShardingSphere-proxy is an open-source distributed solution that provides sharding, table partitioning, distributed transactions, auto scaling, and read/write isolation capabilities. HAProxy, working with Patroni REST APIs, can always identify the database primary node, ensuring HA and implementing load balancing. Each Patroni HA node supports one primary node and multiple standby nodes. Each node uses the Paxos protocol to ensure data consistency. Nodes can be deployed in the same or different regions to ensure data security in multiple regions and data centers. This distributed solution uses the powerful distributed capability of shardingSphere-proxy to manage clusters through Kubernetes and monitor cluster status through Prometheus. In this way, a full-stack open-source distributed solution is built. + +## **Product advantages** + +1. **Ultimate scalability and flexible scaling** + + The computing and storage capabilities can be linearly expanded through horizontal sharding to a maximum of 6400 shards. The performance increases quasi-linearly with the expansion, which effectively solves the problem of data volume expansion in a single table. Depending on service traffic, data nodes can be flexibly and smoothly scaled in or out, and read and write operations are intelligently separated to implement automatic load balancing of distributed databases. + +2. **Abundant enterprise-class features** + + Provides abundant enterprise-class features, such as distributed stored procedures, triggers, distributed transactions, fully-encrypted data, and Workload Diagnosis Reports \(WDRs\). + +3. **One-click deployment, shielding underlying dependencies** + + Standard images ensure consistent delivery in multiple environments, container-based deployment, and physical resource pooling, reducing dependency on the platform and implementing second-level application deployment. + +4. **Ultra-high availability, implementing remote disaster recovery** + + Provides powerful cluster management and O&M capabilities, and flexible deployment of multiple data centers in the same city, different cities, and multiple regions, ensuring data security and consistency based on the Paxos protocol, and providing multiple DR capabilities with "RPO = 0". + +5. **Open source, building a full-stack ecosystem** + + The standalone and distributed open-source openGauss solutions encourage more partners and developers to jointly build a prosperous database ecosystem and build a full-stack open-source ecosystem. + + +## **Application scenarios** + +1. **Banking system** + + Based on the consensus protocol Paxos, the distributed strong consistency transaction capability is provided to ensure strong data consistency in the distributed environment. Multiple data centers in multiple regions provide the "RPO = 0" capability to ensure bank-grade reliability. + +2. **Government/Enterprise office** + + Provides fully-encrypted data encryption, which is secure and reliable and supports software and hardware ecosystems to ensure HA and safeguard government and enterprise office work. + +3. **Smart grid** + + Supports flexible deployment of multiple data centers in the same city, in different cities, and in multiple regions, and flexible scaling based on service tidal characteristics, helping build smart grids. + + Currently, the distributed openGauss solution is in the crowdtesting phase. Developers are welcome to participate in this phase and work together to build a powerful distributed solution. + + diff --git a/content/en/post/2022/Automatic-Test-Framework-YAT.md b/content/en/post/2022/Automatic-Test-Framework-YAT.md new file mode 100644 index 0000000000000000000000000000000000000000..b98994c13018ba1b097c85cde977548d8ee1b526 --- /dev/null +++ b/content/en/post/2022/Automatic-Test-Framework-YAT.md @@ -0,0 +1,195 @@ ++++ + +title = "Automatic Test Framework YAT" + +date = "2021-11-19" + +tags = [ "Automatic Test Framework YAT"] + +archives = "2021-11" + +author = "Yansong LI" + +summary = "Automatic Test Framework YAT" + +img = "/en/post/2022/title/img1.png" + +times = "12:30" + ++++ + +# Automatic Test Framework — YAT + +The Yet another test \(YAT\) framework is an automatic test framework based on Python3. The core of the framework is implemented by the Kotlin language. The framework is encapsulated and bonded through Python to provide command line interfaces \(CLIs\). Figure 1 shows the overall framework. The YAT framework is continuously evolving to become a more efficient and advanced automatic test framework. + +YAT is the automatic test framework used for openGauss database specification monitoring. openGauss 2.1.0 not only releases open-source YAT, but also contributes over 30,000 automatic test cases to enhance the test capability of the openGauss community, enrich the openGauss ecosystem and attract more developers to participate in community construction. YAT code repository: [https://gitee.com/opengauss/Yat](https://gitee.com/opengauss/Yat) + +![](../figures/zh-cn_image_0000001206146876.jpg) + +Figure 1 Overall YAT framework + +## Product Advantages + +- It is friendly to database tests. Users can directly write SQL code and organize the code into a test suite for tests without additional configuration. Test cases are executed through the JDBC API and can adapt to various databases. +- It supports multiple languages and is extensible. + + Currently, languages such as SQL, Shell, Python \(unittes\), and Groovy \(Junit/Spock\) are supported. New languages and frameworks can be added through adapters. SQL statements supported by YAT are supersets of standard SQL statements.That is, YAT is extended based on standard SQL statements. Users can run shell commands, control connections, execute loops, bind SQL statements, control multiple sessions, and concurrently execute SQL statements in SQL scripts. For example: + + ``` + @conn user/passwd@127.0.0.1:9090; -- Reconnect to the database as the new user. + drop table if exists tbl_x; -- Execute SQL statements. + + create table tbl_x (id int, age int, xb int); + + insert into tbl_x values(1, 2, 4); + insert into tbl_x values(3, 4, 5); + + -- Perform the binding operation. + insert into tbl_x values(?, ?, ?); + @bind { + int 3 + int 5 + int 7 + } + -- Perform the binding operation in batches. + insert into tbl_x values(?, ?, ?); + @batch { + int 3 int 4 int 0 + int 3 int 4 int 9 + int 3 int 4 int 8 + int 3 int 4 int 7 + } + -- Run the shell commands. + @sh zctl.py -t stop; + @sh zctl.py -t start; + + -- Define sessions. + @session(name: s1) + { + @set autocommit false; + update table tbl_x set par1 = 2 par2 = 2; + insert into tbl_x values(1, 3, 4); + commit; + } + + @session(name: s2, user: abc, password: 'gauss@123') + { + @set autocommit false; + update table tbl_x set par1 = 2 par2 = 2; + insert into tbl_x values(1, 3, 4); + @step + { + select * from tbl_x for update; + } + commit; + } + -- Executes SQL statements in sessions. + @steps s1.0 s2.0 s1.1 s1.2 s2.2 s2.1; + -- Execute loops. + @for (count: 10) + { + insert into abc values(1,1,3,4); + } + -- Concurrently execute SQL statements. + @parallel { + @session { + select * from abc for update; + commit; + } + + @session { + select * from abc for update; + commit; + } + } + ``` + + The Python language test script must be a unittest test script. YAT provides a public library to facilitate database and remote SSH operations in the Python unittest test script. For example: + + ``` + class TestPrimaryStandby(TestCase): + node = None + node_standby = None + + test_table_drop = 'drop table if exists tbl_test'; + test_table_create = '''create table tbl_test ( + id int, + name char(20), + address varchar(1024) + ) + ''' + + @classmethod + def setUpClass(cls): + # Initialize the connection object. + cls.node = Node(node='primary') + cls.node_standby = Node(node='standby') + cls.node.sql(cls.test_table_drop) + cls.node.sql(cls.test_table_create) + + @classmethod + def tearDownClass(cls) -> None: + cls.node.close() + cls.node_standby.close() # Close the connection object. + + def test_abc_001(self): + # Run the shell command through SSH and check the result. + self.node.sh('echo "success"').expect('success') + + # Run the SQL statement and check the result. + self.node.sql('select * from tbl_test').expect( + (1, 'xxx', 'xxxxxxxxxxxxxxxxxxx'), + (2, 'xxx', 'xxxxxxxxxxxxxxxxxxx')) + + def test_abc_003(self): + # Run the shell command and determine the result through regular expression matching. + self.node.sh('cm ctl query').regex(r'.*success.*') + ``` + + Groovy scripts can be used to write JUnit cases or Spock cases. For more details, visit the official website. + +- It defines a set of standard test case writing specifications, test execution processes, and test report presentation modes. + + Run the **yat init** command to create a test suite template. After the **yat init** command is executed, modify the content in the specified test suite directory. Then, install the JDBC driver, add the **lib** directory to the root directory of the test suite, copy the driver to this directory, and run the **yat suite run** command to run the test suite. You can add different parameters to set whether to print the report and the report format. Figure 2 shows the test suite directory result. Configure the following directories before running the test suite: + + - **conf** directory, which stores node configuration files. + - **except** directory, which is the expected file of the test case. + - **schedule** directory, which stores the schedule files. + - **testcase** directory, which stores test case files. + + ![](../figures/zh-cn_image_0000001206626828.jpg) + + Figure 2 Directory structure of the test suite + + +- Multi-suite Schedule + + YAT Schedule is a scheduler provided by YAT to schedule multiple YAT test suites at the same time. You can schedule multiple YAT test suites in parallel or serial mode by customizing a schedule file. When there are a large number of test suites, you need to determine the combination and sequence of the test suites. YAT provides a convenient method to organize multiple test suites into a large test suite, as shown in the following figure. + + ``` + # File name: all.ys + serial {# Serial execution of test suites or test suite sets + suite '../suite1'; + suite '../suite2'; + ... + + parallel { # Parallel execution of test suites or test suite sets + suite 'parallel/suite1'; + suite 'parallel/suite2'; + ... + + serial { ... } + } + + suite 'suite3'; + ... + } + ``` + + Run the following command to execute all test suites in one-click mode: + + ``` + yat schedule -s all.ys + ``` + + diff --git a/content/en/post/2022/Basic-Principles-of-the-Patroni-for-openGauss-HA-Solution.md b/content/en/post/2022/Basic-Principles-of-the-Patroni-for-openGauss-HA-Solution.md new file mode 100644 index 0000000000000000000000000000000000000000..cfef9bc63754a99e85b3a8fb0f67b9c0107ed8f2 --- /dev/null +++ b/content/en/post/2022/Basic-Principles-of-the-Patroni-for-openGauss-HA-Solution.md @@ -0,0 +1,45 @@ ++++ + +title = "Discussion on openGauss Memory Management" + +date = "2021-9-17" + +tags = [ "Discussion on openGauss Memory Management"] + +archives = "2021-9" + +author = "Mengen Xue" + +summary = "Discussion on openGauss Memory Management" + +img = "/en/post/2022/title/img5.png" + +times = "12:30" + ++++ + +# Basic Principles of the Patroni for openGauss HA Solution + + + +## 1. Introduction to Patroni + +Patroni is an open-source product developed in Python by Zalando. It can use the distributed configuration system \(DCS\) to detect the status and configuration of each node in the storage database cluster, and perform automatic management and failover for the database cluster. + +## 2. Working Principles of Patroni + +An HA cluster consists of Patroni, DCS, and databases. This solution uses editable text configuration daemon \(ETCD\) as DCS and openGauss as the database. + +ETCD is a distributed key-value pair store. It is designed to reliably and quickly store key data and provide access services. It uses distributed locks, leader election, and write barriers to implement reliable distributed collaboration. ETCD clusters are prepared for HA and persistent data storage and retrieval. + +Patroni connects to ETCD through an API and inserts key-value pairs to record Patroni parameters, database parameters, primary/standby information, and connection information. Generally, ETCD is used to detect heartbeats of other nodes. The primary/standby information stored in the key-value pairs is obtained from ETCD to determine the status of each node and automatically manage the cluster. The following figure shows the basic principle. + +![](../figures/zh-cn_image_0000001208491336.png) + +As shown in the preceding figure, only one Patroni node can become the leader at a time. That is, only one Patroni node can hold the leader lock. This prevents split-brain. Currently, patroni-for-openGauss can rectify the following faults: + +- 1. If the primary database stops unexpectedly but can be recovered by restarting, it can be automatically started immediately. +- 2. If the primary database is faulty and cannot be started, the current primary database releases the leader lock and is demoted to standby. Then, the system automatically selects the most healthy standby database, that is, the standby database whose synchronization status is closest to that of the primary database, and promotes it to primary. +- 3. If the standby database is hung up unexpectedly and can be recovered and connected to the primary database immediately after being restarted, restart the standby database immediately. +- 4. If the standby database is faulty unexpectedly and can be started properly but it is later than the primary database after startup, rebuild the standby database to restore its status. + diff --git a/content/en/post/2022/Basic-Use-of-the-Fully-encrypted-Database.md b/content/en/post/2022/Basic-Use-of-the-Fully-encrypted-Database.md new file mode 100644 index 0000000000000000000000000000000000000000..4ba44aa39dbf5c7fbc56975d483ab6a2583a5a8e --- /dev/null +++ b/content/en/post/2022/Basic-Use-of-the-Fully-encrypted-Database.md @@ -0,0 +1,288 @@ ++++ + +title = "Basic Use of the Fully-encrypted Database" + +date = "2021-10-16" + +tags = [ "Basic Use of the Fully-encrypted Database"] + +archives = "2021-10" + +author = "Jinxiang Xiao" + +summary = "Basic Use of the Fully-encrypted Database" + +img = "/en/post/2022/title/img10.png" + +times = "12:30" + ++++ + +# Basic Use of the Fully-encrypted Database + +## 1. Introduction to the Fully-encrypted Database Features + +A fully-encrypted database aims to protect privacy throughout the data lifecycle. Data is always encrypted during transmission, computing, and storage regardless of the service scenario or environment. After the data owner encrypts data on the client and sends the encrypted data to the server, even if an attacker manages to exploit some system vulnerability and steal user data, they cannot obtain valuable information. Data privacy is protected. + +## 2. Customer Benefits of the Fully-encrypted Database + +The entire service data flow is encrypted during processing. A fully-encrypted database: + +1. Protects data privacy and security throughout the lifecycle on the cloud. Attackers cannot obtain information from the database server regardless of the data status. +2. Helps cloud service providers earn the trust of third-party users. Users, including service administrators and O&M administrators in enterprise service scenarios and application developers in consumer cloud services, can keep the encryption keys themselves so that even users with high permissions cannot access unencrypted data. +3. Enables cloud databases to better comply with personal privacy protection laws and regulations. + +## 3. Use of the Fully-encrypted Database + +Currently, the fully-encrypted database supports two connection modes: gsql and JDBC. This chapter describes how to use the database in the two connection modes. + +### 3.1 Connecting to a Fully-encrypted Database + +1. Run the **gsql -p PORT –d postgres -r –C** command to enable the encryption function. + +Parameter description: + +**-p** indicates the port number. **-d** indicates the database name. **-C** indicates that the encryption function is enabled. + +2. To support JDBC operations on a fully-encrypted database, set **enable\_ce** to **1**. + +### 3.2 Creating a User Key + +A fully-encrypted database has two types of keys: client master key \(CMK\) and data encryption key \(CEK\). + +The CMK is used to encrypt the CEK. The CEK is used to encrypt user data. + +Before creating a key, use gs\_ktool to create a key ID for creating a CMK. + +openGauss=\# **\\! gs\_ktool -g** + +The sequence and dependency of creating a key are as follows: creating a key ID \> creating a CMK \> creating a CEK. + +- **1. Creating a CMK and a CEK in the GSQL Environment** + +- [Creating a CMK\] + + CREATE CLIENT MASTER KEY client\_master\_key\_name WITH \(KEY\_STORE = key\_store\_name, KEY\_PATH = "key\_path\_value", ALGORITHM = algorithm\_type\); + + Parameter description: + + - client\_master\_key\_name + + This parameter is used as the name of a key object. In the same namespace, the value of this parameter must be unique. + + Value range: a string. It must comply with the naming convention. + + - KEY\_STORE + + Tool or service that independently manages keys. Currently, only the key management tool gs\_ktool provided by GaussDB Kenel and the online key management service huawei\_kms provided by Huawei Cloud are supported. Value range: **gs\_ktool** and **huawei\_kms** + + - KEY\_PATH + + A key in the key management tool or service. The **KEY\_STORE** and **KEY\_PATH** parameters can be used to uniquely identify a key entity. When **KEY\_STORE** is set to **gs\_ktool**, the value is **gs\_ktool** or **KEY\_ID**. When **KEY\_STORE** is set to **huawei\_kms**, the value is a 36-byte key ID. + + - ALGORITHM + + This parameter specifies the encryption algorithm used by the key entity. When **KEY\_STORE** is set to **gs\_ktool**, the value can be **AES\_256\_CBC** or **SM4**. When **KEY\_STORE** is set to **huawei\_kms**, the value is **AES\_256**. + +- \[Creating a CEK\] + + CREATE COLUMN ENCRYPTION KEY column\_encryption\_key\_name WITH\(CLIENT\_MASTER\_KEY = client\_master\_key\_name, ALGORITHM = algorithm\_type, ENCRYPTED\_VALUE = encrypted\_value\); + + Parameter description: + + - column\_encryption\_key\_name + + This parameter is used as the name of a key object. In the same namespace, the value of this parameter must be unique. + + Value range: String, which must comply with the naming convention. + + - CLIENT\_MASTER\_KEY + + Specifies the CMK used to encrypt the CEK. The value is the CMK object name, which is created using the **CREATE CLIENT MASTER KEY** syntax. + + - ALGORITHM + + Encryption algorithm to be used by the CEK. The value can be **AEAD\_AES\_256\_CBC\_HMAC\_SHA256**, **AEAD\_AES\_128\_CBC\_HMAC\_SHA256**, or **SM4\_SM3**. + + - **ENCRYPTED\_VALUE \(optional\)** + + A key password specified by a user. The key password length ranges from 28 to 256 bits. The derived 28-bit key meets the AES128 security requirements. If the user needs to use AES256, the key password length must be 39 bits. If the user does not specify the key password length, a 256-bit key is automatically generated. + + \[Example in the GSQL environment\] + + + + + + +

1

+

2

+

3

+

4

+

5

+

6

+

7

+

8

+

9

+

10

+

11

+

-- (1) Use the key management tool gs_ktool to create a key. The tool returns the ID of the newly generated key.

+

[cmd] gs_ktool -g

+

+

-- (2) Use a privileged account to create a common user named alice.

+

openGauss=# CREATE USER alice PASSWORD '********';

+

+

-- (3) Use the account of common user alice to connect to the encrypted database and execute the syntax.

+

gsql -p 57101 postgres -U alice -r -C

+

+

-- Create a CMK object.

+

openGauss=> CREATE CLIENT MASTER KEY alice_cmk WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC);

+

-- Create a CEK object.

+

openGauss=> CREATE COLUMN ENCRYPTION KEY a_cek WITH VALUES (CLIENT_MASTER_KEY = a_cmk, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256);

+

+

openGauss=> CREATE COLUMN ENCRYPTION KEY another_cek WITH VALUES (CLIENT_MASTER_KEY = a_cmk, ALGORITHM = SM4_SM3);

+
+ +- **2. Creating a CMK and a CEK in the JDBC Environment** + + + + + + +

1

+

2

+

3

+

4

+

5

+

6

+

7

+

8

+

// Create a CMK.

+

Connection conn = DriverManager.getConnection("url","user","password");

+

Statement stmt = conn.createStatement();

+

int rc = stmt.executeUpdate("CREATE CLIENT MASTER KEY ImgCMK1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = \"gs_ktool/1\" , ALGORITHM = AES_256_CBC);");

+

+

// Create a CEK.

+

int rc2 = stmt.executeUpdate("CREATE COLUMN ENCRYPTION KEY ImgCEK1 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256);");

+
+### 3.3 Creating an Encrypted Table + +After creating the CMK and CEK, you can use the CEK to create an encrypted table. + +An encrypted table can be created in two modes: randomized encryption and deterministic encryption. + +- **Creating an Encrypted Table in the GSQL Environment** + +\[Example\] + + + + + + +

1

+

2

+

3

+

openGauss=# CREATE TABLE creditcard_info (id_number int,

+

name text encrypted with (column_encryption_key = ImgCEK, encryption_type = DETERMINISTIC),

+

credit_card varchar(19) encrypted with (column_encryption_key = ImgCEK1, encryption_type = DETERMINISTIC));

+
+ +Parameter description: + +**ENCRYPTION\_TYPE** indicates the encryption type in the ENCRYPTED WITH constraint. The value of **encryption\_type\_value** can be **DETERMINISTIC** or **RANDOMIZED**. + +**** + +- **Creating an Encrypted Table in the JDBC Environment** + + + + + + +

1

+

2

+

int rc3 = stmt.executeUpdate("CREATE TABLE creditcard_info (id_number int, name varchar(50) encrypted with (column_encryption_key = ImgCEK1, encryption_type = DETERMINISTIC),credit_card varchar(19) encrypted with (column_encryption_key = ImgCEK1, encryption_type = DETERMINISTIC));");

+
+ +### 3.4 Inserting Data into the Encrypted Table and Querying the Data + +After an encrypted table is created, you can insert and view data in the encrypted table in encrypted database mode \(enabling the connection parameter **-C**\). When the common environment \(disabling the connection parameter **-C**\) is used, operations cannot be performed on the encrypted table, and only ciphertext data can be viewed in the encrypted table. + +- **Inserting Data into the Encrypted Table and Viewing the Data in the GSQL Environment** + + + + + + +

1

+

2

+

3

+

4

+

5

+

6

+

7

+

8

+

9

+

openGauss=# INSERT INTO creditcard_info VALUES (1,'joe','6217986500001288393');

+

INSERT 0 1

+

openGauss=# INSERT INTO creditcard_info VALUES (2, 'joy','6219985678349800033');

+

INSERT 0 1

+

openGauss=# select * from creditcard_info where name = 'joe';

+

id_number | name | credit_card

+

-----------+------+---------------------

+

1 | joe | 6217986500001288393

+

(1 row)

+
+ + Note: The data in the encrypted table is displayed in ciphertext when you use a non-encrypted client to view the data. + + + + + + +

1

+

2

+

3

+

4

+

5

+

6

+

openGauss=# select id_number,name from creditcard_info;

+

id_number | name

+

-----------+-------------------------------------------

+

1 | \x011aefabd754ded0a536a96664790622487c4d36

+

2 | \x011aefabd76853108eb406c0f90e7c773b71648f

+

(2 rows)

+
+ +- **Inserting Data into the Encrypted Table and Viewing the Data in the JDBC Environment** + + + + + + +

1

+

2

+

3

+

4

+

5

+

6

+

7

+

// Insert data.

+

int rc4 = stmt.executeUpdate("INSERT INTO creditcard_info VALUES (1,'joe','6217986500001288393');");

+

// Query the encrypted table.

+

ResultSet rs = null;

+

rs = stmt.executeQuery("select * from creditcard_info where name = 'joe';");

+

// Close the statement object.

+

stmt.close();

+
+ + The preceding describes how to use the fully-encrypted database features. For details, see the corresponding sections in the official document. However, for a common user, the functions described above are sufficient to ensure smooth implementation of daily work. In the future, fully-encrypted databases will evolve to be easier to use and provide higher performance. Stay tuned! + + + diff --git a/content/en/post/2022/Compilation-Guide-for-openGauss-Under-Heterogeneous-Conditions.md b/content/en/post/2022/Compilation-Guide-for-openGauss-Under-Heterogeneous-Conditions.md new file mode 100644 index 0000000000000000000000000000000000000000..40e1a02e2a298895429efe3d493b00d091e6f4dd --- /dev/null +++ b/content/en/post/2022/Compilation-Guide-for-openGauss-Under-Heterogeneous-Conditions.md @@ -0,0 +1,539 @@ ++++ + +title = "Compilation Guide for openGauss Under Heterogeneous Conditions" + +date = "2021-07-07" + +tags = [ "Compilation Guide for openGauss Under Heterogeneous Conditions"] + +archives = "2021-07" + +author = "Yansong LI" + +summary = "Compilation Guide for openGauss Under Heterogeneous Conditions" + +img = "/en/post/2022/title/img1.png" + +times = "12:30" + ++++ + +# Compilation Guide for openGauss Under Heterogeneous Conditions + +## **Download** + +opengauss-openGauss-server-v2.0.0.tar.gz \(openGauss source code package\) + +**Download link:** + +https://gitee.com/opengauss/openGauss-server/repository/archive/v2.0.0?ref=v2.0.0&sha=78689da92cdc811cad2458dc213d007e96864062&format=tar.gz&captcha\_type=yunpian + +opengauss-openGauss-third\_party-2.0.0.zip \(source code package of all third-party libraries on which openGauss-server depends\) + +**Download link:** + +https://gitee.com/opengauss/openGauss-third\_party/repository/archive/v2.0.0.zip?ref=v2.0.0&sha=3a38c6c134e5b2e39d0557d575ec04302a83584a&format=zip&captcha\_type=yunpian + +openGauss-third\_party\_binarylibs.tar.gz \(medium packages of all third-party libraries on which openGauss-server depends\) + +**Download link:** + +https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.0/openGauss-third\_party\_binarylibs.tar.gz + +Note: This document is compiled by referring to the following official compilation document: + +https://opengauss.org/en/docs/2.0.0/docs/Quickstart/Quickstart.html + +## **Precautions** + +Currently, 64-bit executable programs are generated. + +## **Environment Information** + +The compilation is performed on Phytium Kirin V10. The detailed information is as follows: + +``` +Linux localhost.localdomain 4.19.90-vhulk2001.1.0.0026.ns7.15.aarch64 #1 SMP Sun Apr 19 22:34:29 CST 2020 aarch64 aarch64 aarch64 GNU/Linux + + +NAME="Kylin Linux Advanced Server" + +VERSION="V10 (Azalea)" +ID="kylin" + +ID_LIKE="fedora" +VARIANT="Server" + +VARIANT_ID="server" +VERSION_ID="V10" + +PRETTY_NAME="Kylin Linux Advanced Server V10 (Azalea)" +ANSI_COLOR="0;31" + +CPE_NAME="cpe:/o:kylin:enterprise_linux:V10:GA:server" +HOME_URL="https://www.kylinos.cn/" + +BUG_REPORT_URL="https://bugzilla.kylinos.cn/bugzilla/" +KYLIN_BUGZILLA_PRODUCT="Kylin Linux Advanced Server 10" + +KYLIN_BUGZILLA_PRODUCT_VERSION=V10 +KYLIN_SUPPORT_PRODUCT="Kylin Linux Advanced Server" + +KYLIN_SUPPORT_PRODUCT_VERSION="V10" +``` + +## **Compilation** + +- **openGauss-third\_party** + + This repository is used to compile all open-source third-party software on which the openGauss depends. + + There are four directories. + + a. The **build** directory contains all third-party scripts on which the compilation depends. + + b. The **buildtools** directory contains build tools for compiling these opensources and openGauss servers. + + c. The **dependency** directory contains all open-source components on which the openGauss server depends. + + d. The **platform** directory contains open-source software such as OpenJDK. + +- **Dependencies** + + The following lists the software requirements for compiling openGauss. + + \(CMake, GCC, and Python must be installed. You are advised to install other components. You can try to compile the code even if the installation is not complete.\) + + ``` + libaio-devel + ncurses-devel + + pam-devel + libffi-devel + + libtool + libtool-devel + + libtool-ltdl + python-devel + + openssl-devel + lsb_release (The medium package name in Phytium-Kylin is neokylin-lsb.) + + bison + cmake + + gcc + ``` + + Note: CMake and GCC have strict version requirements. The CMake version must be later than 3.16.5, and the GCC version must be 7.3.0. + + Compile Python 3 and set environment variables. The installation of CMake and GCC will be described later. Run the **yum install** command to install other dependencies. For network limitations, mount the Kylin ISO file for installation. + + Before installing CMake and GCC, install Python 3 and the preceding software. + + +- **Installing CMake** + + The version of CMake is cmake-3.17.1, and the installation path is **/home/opengauss/3rd/cmake**. \(Related dependencies are installed in **/home/opengauss/3rd**.\) To install the CMake, run the following commands: + + ``` + tar –zxvf cmake-3.17.1.tar.gz + ./bootstrap --prefix=/home/opengauss/3rd/cmake-3.17.1 + + make –j4 + make install + ``` + +- **Installing GCC** + + The GCC installation depends on GMP 6.2.0, MPFR 4.0.2, and MPC 1.1.0. To install the GCC, run the following commands: + + ``` + gmp + tar –zxvf gmp-6.2.0.tar.gz + + cd gmp-6.2.0 + ./configure --prefix=/home/opengauss/3rd/gmp-6.2.0 --build=x86 CFLAGS=-fPIC + + make –j4 + make install + + + mpfr + + tar –zxvf mpfr-4.0.2.tar.gz + cd mpfr-4.0.2 + + ./configure --prefix=/home/opengauss/3rd/mpfr-4.0.2 --with-gmp=/home/opengauss/3rd/gmp-6.2.0 + make –j4 + + make install + + + mpc + tar –zxvf mpc-1.1.0.tar.gz + + cd mpc-1.1.0 + ./configure --prefix=/home/opengauss/3rd/mpc-1.1.0 --with-gmp=/home/opengauss/3rd/gmp-6.2.0 --with-mpfr=/home/opengauss/3rd/mpfr-4.0.2 + + make –j4 + make install + + + gcc + + tar –zxvf gcc-7.3.0 + cd gcc-7.3.0 + + export LD_LIBRARY_PATH=/home/opengauss/3rd/gmp-6.2.0/lib:/home/opengauss/3rd/mpfr-4.0.2/lib:/home/opengauss/3rd/mpc-1.1.0/lib:$ LD_LIBRARY_PATH + ./configure --prefix=/home/opengauss/3rd/gcc-7.3.0 --with-gmp=/home/opengauss/3rd/gmp-6.2.0 --with-mpfr=/home/opengauss/3rd/mpfr-4.0.2 --with-mpc=/home/opengauss/3rd/mpc-1.1.0 + + make –j4 + make install + ``` + +- **Compiling openGauss-third\_party** + + Save the **opengauss-openGauss-third\_party-2.0.0.zip** package to **/home/opengauss** and decompress it. + + ``` + cd openGauss-third_party + ``` + + By default, compilation commands are executed concurrently, which occupies a large amount of memory. If the memory is insufficient, run the **find. -name "\*.sh" | xargs grep "make" | grep j** command to find all MAKE statements, delete **-sj**, **-sj$\{cpus\_num\}**, or **–sj 8**, and run the statements in single-thread mode. You can also change the value based on the number of cores and memory size of the host. + + Set the environment variables. + + ``` + export CMAKEROOT=/home/opengauss/3rd/cmake-3.17.1 + export GCC_PATH=/home/opengauss/3rd/gcc-7.3.0 + + export CC=$GCC_PATH/bin/gcc + export CXX=$GCC_PATH/bin/g++ + + export LD_LIBRARY_PATH=$GCC_PATH/lib64:/home/opengauss/3rd/mpc-1.1.0/lib:/home/opengauss/3rd/mpfr-4.0.2/lib:/home/opengauss/3rd/gmp-6.2.0/lib:$CMAKEROOT/lib:$LD_LIBRARY_PATH + export PATH=$CMAKEROOT/bin:$PATH + ``` + + The compilation procedure is as follows: + + ``` + 1. Run cd /home/opengauss/openGauss-third_party/build. + 2. Run sh build_all.sh. + ``` + + After the compilation is complete, the result is exported to **/home/opengauss/openGauss-third\_party/output**. + + ``` + /home/opengauss/openGauss-third_party/output is the third-party medium directory on which openGauss-server depends. + ``` + + +## **openGauss-server** + +This repository is used to compile GaussDB binary executable files. + +- **Dependencies** + + The following lists the software requirements for compiling openGauss. + + You are advised to use the default installation packages of the following dependency software obtained from the operating system installation CD-ROM or installation source. If the following software does not exist, refer to the recommended software versions. \(You can try compilation even if the installation is not complete.\) + + ``` + libaio-devel 0.3.109-13 + flex 2.5.31 or later + bison 2.7-4 + ncurses-devel 5.9-13.20130511 + glibc-devel 2.17-111 + patch 2.7.1-10 + readline-devel 7.0-13 + ``` + + +- **Compiling openGauss-server** + + Save the **opengauss-openGauss-server-v2.0.0.tar.gz** package to **/home/opengauss** and decompress it. + + ``` + cd openGauss-server + ``` + + By default, compilation commands are executed concurrently, which occupies a large amount of memory. If the memory is insufficient, run the **find. -name "\*.sh" | xargs grep "make" | grep j** command to find all MAKE statements, delete **-sj**, **-sj$\{cpus\_num\}**, or **–sj 8**, and run the statements in single-thread mode. You can also change the value based on the number of cores and memory size of the host. + + Set the environment variables. + + ``` + export CODE_BASE=`pwd` + export BINARYLIBS=`pwd`/../openGauss-third_party/output + export GAUSSHOME=$CODE_BASE/dest + export GCC_PATH=/home/opengauss/3rd/gcc-7.3.0 + export CC=$GCC_PATH/bin/gcc + export CXX=$GCC_PATH/bin/g++ + export LD_LIBRARY_PATH=$GCC_PATH/lib64:/home/opengauss/3rd/mpc-1.1.0/lib:/home/opengauss/3rd/mpfr-4.0.2/lib:/home/opengauss/3rd/gmp-6.2.0/lib:$LD_LIBRARY_PATH + export PATH=$GCC_PATH/bin:$PATH + ``` + + The compilation procedure is as follows: + + ``` + Run the following commands: ./configure --gcc-version=7.3.0 CC=g++ CFLAGS="-O2 -g3" --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-thread-safety --with-readline --without-zlib + make –j4 + make install + Errors may be reported during the compilation. After the errors are rectified, the compilation result is exported to the $GAUSSHOME directory. + ``` + +- **Rectifying Errors** + + Save **openGauss-third\_party\_binarylibs.tar.gz** to **/home/opengauss**. + + ``` + cd /home/opengauss + tar –zxvf openGauss-third_party_binarylibs.tar.gz + ``` + + Most compilation errors are caused by the lack of some dynamic libraries and header files in the third-party software compiled in section 4.2.4. The following solutions are available: + + - 1. Run **cd openGauss-third\_party/dependency** to go to the corresponding library directory, and perform compilation by referring to **README.md**. The compilation result is output to **openGauss-third\_party/output/dependency/kylin\_aarch64**. + - 2. Copy the corresponding library in the **openGauss-third\_party\_binarylibs/dependency/openeuler\_aarch64** directory to the **openGauss-third\_party/output/dependency/kylin\_aarch64** directory. \(In this method, some libraries cannot be used after being copied, because some function symbols are missing.\) + + Note: Select the first solution if possible, unless the first solution is busy and the second solution can solve the problem. + + The following analyzes and rectifies possible errors one by one: + + - ../../../../../src/include/access/obs/obs\_am.h:33:10: fatal error: eSDKOBS.h: The file or directory does not exist. + + Cause: The **libobs** library is missing in **openGauss-third\_party/output**. + + Solution: If the **libobs** source code does not exist in **openGauss-third\_party/dependency**, use the second solution. + + cp -r openGauss-third\_party\_binarylibs/dependency/openeuler\_aarch64/libobs openGauss-third\_party/output/dependency/kylin\_aarch64 + + - ../../../../src/include/gs\_policy/gs\_string.h:32:10: fatal error: boost/functional/hash.hpp: The file or directory does not exist. + + Cause: The **boost** library is missing in **openGauss-third\_party/output**. + + Solution: Add the **boost** source code to **openGauss-third\_party/dependency**. However, the compilation process is complex. In addition, it is verified that the **boost** library in **openGauss-third\_party\_binarylibs** can be used normally. Therefore, the second solution is selected. + + cp -r openGauss-third\_party\_binarylibs/dependency/openeuler\_aarch64/boost openGauss-third\_party/output/dependency/kylin\_aarch64 + + - Cipherfn.cpp:1231:5: error: 'krb5\_set\_profile\_path' has not been declared in this scope. + + Cause: The **kerberos** library is missing in **openGauss-third\_party/output**. + + Solution: If the **kerberos** library in **openGauss-third\_party\_binarylibs/dependency** is unavailable, select the first solution. + + cd openGauss-third\_party/dependency/kerberos + + python build.py -m all -f krb5-1.17.1.tar.gz -t "comm|llt" + + Compilation error: + + /home/opengauss/3rd/gcc-7.3.0/lib/gcc/aarch64-unknown-linux-gnu/7.3.0/include-fixed/openssl/bn.h:138:11: fatal error: openssl/e\_os2.h: The file or directory does not exist. + + Solution: + + export C\_INCLUDE\_PATH=/home/opengauss/openGauss-third\_party/output/dependency/kylin\_aarch64/openssl/comm/include + + Run the **python build.py -m all -f krb5-1.17.1.tar.gz -t "comm|llt"** command. + + Continue to report the following error: + + make\[2\]: \*\*\* There is no rule to create the target libcom\_err\_gauss.exports required by binutils.versions. \(Several similar errors occur. Rectify the errors before continuing the compilation.\) + + Solution: + + cd /home /opengauss/openGauss-third\_party/dependency/kerberos/krb5-1.17.1/src/util/et/ + + cp –r libcom\_err.exports libcom\_err\_gauss.exports + + cd /home /opengauss/openGauss-third\_party/dependency/kerberos/krb5-1.17.1/src/lib/krb5 + + cp –r libkrb5.exports libkrb5\_gauss.exports + + cd /home /opengauss/openGauss-third\_party/dependency/kerberos/krb5-1.17.1/src/lib/crypto + + cp –r libk5crypto.exports libk5crypto\_gauss.exports + + cd /home /opengauss/openGauss-third\_party/dependency/kerberos/krb5-1.17.1/src/lib/rpc + + cp –r libgssrpc.exports libgssrpc\_gauss.exports + + cd /home /opengauss/openGauss-third\_party/dependency/kerberos/krb5-1.17.1/src/lib/gssapi + + cp –r libgssapi\_krb5.exports libgssapi\_krb5\_gauss.exports + + Run the **python build.py -m all -f krb5-1.17.1.tar.gz -t "comm|llt"** command. + + Continue to report the following error: + + openssl.so: In the 'unmarshal\_w' function: + + openssl.c:\(.text+0x330\): undefined reference to'BN\_set\_flags' + + openssl.so: In the 'ossl\_hash' function: + + openssl.c:\(.text+0x8b8\): undefined reference to 'EVP\_MD\_CTX\_new' + + openssl.c:\(.text+0x9ac\): undefined reference to 'EVP\_MD\_CTX\_free' + + Solution: + + cp /home/opengauss/openGauss-third\_party\_binarylibs/dependency/kylin\_aarch64/openssl/comm/lib/libcrypto.so /home/opengauss/openGauss-third\_party/dependency/kerberos/krb5-1.17.1/src/lib + + cp /home/opengauss/openGauss-third\_party\_binarylibs/dependency/kylin\_aarch64/openssl/comm/lib/libssl.so /home/opengauss/openGauss-third\_party/dependency/kerberos/krb5-1.17.1/src/lib + + Run the **python build.py -m all -f krb5-1.17.1.tar.gz -t "comm|llt"** command. + + Note: After a problem is solved, a message similar to the following is displayed: + + The next patch would create the file src/lib/crypto/libk5crypto\_gauss.exports,which already exists! Assume -R? \[n\] + + If you enter **y**, the system automatically deletes the **libcom\_err\_gauss.exports**, **ibkrb5\_gauss.exports**, **libk5crypto\_gauss.exports**, **libgssrpc\_gauss.exports** and **libgssapi\_krb5\_gauss.exports** files. Therefore, you need to copy the five files immediately after you enter **y**. + + Suggestion: Solve the preceding problems before continuing the compilation. + + - ../../../../src/include/gs\_policy/curl\_utils.h:17:10: fatal error: curl/curl.h: The file or directory does not exist. + + Cause: The **libcurl** library is missing in **openGauss-third\_party/output**. + + Solution: The **libcurl** source code exists in **openGauss-third\_party/dependency**, but the compilation process is complex. In addition, it is verified that the **libcurl** library in **openGauss-third\_party\_binarylibs** can be used normally. Therefore, the second solution is selected. + + cp -r openGauss-third\_party\_binarylibs/dependency/openeuler\_aarch64/libcurl openGauss-third\_party/output/dependency/kylin\_aarch64 + + - client\_logic.cpp:50:10: fatal error: MurmurHash3.h: The file or directory does not exist. + + costsize.cpp:94:10: fatal error: hll.h: The file or directory does not exist. + + Cause: The **postgresql-hll** library is missing in **openGauss-third\_party/output**. + + Solution: If the **postgresql-hll** source code exists in **openGauss-third\_party/dependency**, use the first solution. + + cd openGauss-third\_party/dependency/postgresql-hll + + sh build.sh –m all + + After the compilation is complete, only the **lib** folder exists and the **include** folder is missing. Copy the **lib** folder from **openGauss-third\_party\_binarylibs**. + + cp -r openGauss-third\_party\_binarylibs/dependency/openeuler\_aarch64/postgresql-hll /comm/include openGauss-third\_party/output/dependency/kylin\_aarch64/postgresql-hll/comm + + - ../../../../src/include/access/dfs/dfs\_query.h:29:10: fatal error: orc/Exceptions.hh: The file or directory does not exist. + + Cause: The **liborc** library is missing in **openGauss-third\_party/output**. + + Solution: If the **liborc** source code does not exist in **openGauss-third\_party/dependency**, use the second solution. + + cp -r openGauss-third\_party\_binarylibs/dependency/openeuler\_aarch64/liborc openGauss-third\_party/output/dependency/kylin\_aarch64 + + - remote\_read.pb.h:10:10: fatal error: google/protobuf/port\_def.inc: The file or directory does not exist. + + Cause: The **protobuf** library is missing in **openGauss-third\_party/output**. + + Solution: If the **protobuf** source code exists in **openGauss-third\_party/dependency**, use the first solution. + + cd openGauss-third\_party/dependency/protobuf + + python build.py -m all -f protobuf-3.11.3.zip -t "comm|llt" + + - remote\_read.grpc.pb.h:10:10: fatal error: grpc/impl/codegen/port\_platform.h: The file or directory does not exist. + + Cause: The **grpc** library is missing in **openGauss-third\_party/output**. + + Solution: The **grpc** source code exists in **openGauss-third\_party/dependency**, but the compilation process is complex. In addition, it is verified that the **grpc** library in **openGauss-third\_party\_binarylibs** can be used properly. Therefore, the second solution is selected. + + cp -r openGauss-third\_party\_binarylibs/dependency/openeuler\_aarch64/grpc openGauss-third\_party/output/dependency/kylin\_aarch64 + + - parquet\_file\_reader.h:27:10: fatal error: parquet/api/reader.h: The file or directory does not exist. + + Cause: The **libparquet** library is missing in **openGauss-third\_party/output**. + + Solution: If the **libparquet** source code does not exist in **openGauss-third\_party/dependency**, use the second solution. + + cp -r openGauss-third\_party\_binarylibs/dependency/openeuler\_aarch64/libparquet openGauss-third\_party/output/dependency/kylin\_aarch64 + + - /usr/bin/ld: Cannot find **–lthrift**. + + Cause: The **libthrift** library is missing in **openGauss-third\_party/output**. + + Solution: If the **libthrift** source code exists in **openGauss-third\_party/dependency**, use the first solution. + + cd openGauss-third\_party/dependency/libthrift + + sh ./build.sh + + - /usr/bin/ld: Cannot find **-lsnappy**. + + Cause: The **snappy** library is missing in **openGauss-third\_party/output**. + + Solution: If the **snappy** source code exists in **openGauss-third\_party/dependency**, use the first solution. + + cd openGauss-third\_party/dependency/snappy + + sh ./build.sh + + - /usr/bin/ld: Cannot find **-lzstd**. + + Cause: The **zstd** library is missing in **openGauss-third\_party/output**. + + Solution: If the **zstd** source code exists in **openGauss-third\_party/dependency**, use the first solution. + + cd openGauss-third\_party/dependency/zstd + + sh ./build.sh + + - /home/opengauss/openGauss-server/../openGauss-third\_party/output/dependency/kylin\_aarch64/libobs/comm/lib/libxml2.so: undefined reference to 'fcntl64@GLIBC\_2.28' + + Cause: During the compilation, the **libxml2.so** file in the **openGauss-third\_party/output/dependency/ kylin\_aarch64/libobs** directory is found, which lacks 'fcntl64@GLIBC\_2.28'. + + Solution: The **libxml2** source code exists in **openGauss-third\_party/dependency**. However, during the compilation, the **libxml2-2.9.9.tar.gz** package cannot be decompressed. In addition, **libobs** is copied from **openGauss-third\_party\_binarylibs**. Therefore, neither the first solution nor the second solution can solve this problem. + + Run the **find / -name "libxml2\*"** command. You can find the **libxm12.so\*** library in **/usr/lib64**. + + cp –r /usr/lib64/libxml2.so.2.9.1 openGauss-third\_party/output/dependency/kylin\_aarch64/libobs/comm/lib + + cd openGauss-third\_party/output/dependency/kylin\_aarch64/libobs/comm/lib + + ln –s libxml2.so.2.9.1 libxml2.so.2 + + ln –s libxml2.so.2.9.1 libxml2.so + + If the **libxml2.so\*** file already exists in **openGauss-third\_party/output/dependency/kylin\_aarch64/libobs/comm/lib**, back up the file. + + The following error information is displayed when you run the **make install** command: + + - ./zic: error while loading shared libraries: libssl.so.1.1: cannot open shared object file: No such file or directory + + Cause: **libssl.so.1.1** cannot be found. + + Solution: Run the **find / -name "libssl.so.1.1"** command. + + You can find it in **/home/opengauss/openGauss-third\_party/output/dependency/kylin\_aarch64/openssl/comm/lib**. Set the environment variable. + + export LD\_LIBRARY\_PATH=/home/opengauss/openGauss-third\_party/output/dependency/kylin\_aarch64/openssl/comm/lib:$LD\_LIBRARY\_PATH + + - cp: Failed to obtain the file status \(stat\) of "/home/opengauss/openGauss-server/../openGauss-third\_party/output/buildtools/kylin\_aarch64/gcc7.3/gcc/lib64/libstdc++.so.6": The file or directory does not exist. + + Cause: The **gcc** folder is missing in **openGauss-third\_party/output/buildtools**. + + Solution: Copy the compiled **gcc** folder to the directory. + + cd openGauss-third\_party/output/buildtools + + mkdir -p kylin\_aarch64/gcc7.3 + + cd kylin\_aarch64/gcc7.3 + + cp –r /home/opengauss/3rd/gcc-7.3.0 . + + mv gcc-7.3.0 gcc + + - cp: Failed to obtain the file status \(stat\) of "/home/opengauss/openGauss-server/../openGauss-third\_party/output/dependency/kylin\_aarch64/pljava/comm/lib/\*": The file or directory does not exist. + + Cause: The **pljava** library is missing in **openGauss-third\_party/output**. + + Solution: The **pljava** source code exists in **openGauss-third\_party/dependency**, but the compilation process is complex. In addition, it is verified that the **pljava** library in **openGauss-third\_party\_binarylibs** can be used properly. Therefore, the second solution is selected. + + cp -r openGauss-third\_party\_binarylibs/dependency/openeuler\_aarch64/pljava openGauss-third\_party/output/dependency/kylin\_aarch64 + + + diff --git a/content/en/post/2022/Core-Technologies-of-openGauss-Database-(I).md b/content/en/post/2022/Core-Technologies-of-openGauss-Database-(I).md new file mode 100644 index 0000000000000000000000000000000000000000..fbeb69b4dabeb1b529eef3d72060ec937b90df70 --- /dev/null +++ b/content/en/post/2022/Core-Technologies-of-openGauss-Database-(I).md @@ -0,0 +1,244 @@ ++++ + +title = "Core Technologies of openGauss Database I" + +date = "2020-07-23" + +tags = [ "Core Technologies of openGauss Database (I)"] + +archives = "2020-07" + +author = "Shujie Zhang" + +summary = "Core Technologies of openGauss Database (I)" + +img = "/en/post/2022/title/img5.png" + +times = "12:30" + ++++ + +# Core Technologies of openGauss Database \(I\) + +## Overview + +An SQL engine is one of the important subsystems of the database. It receives SQL statements sent by applications and directs executors to execute execution plans. As an important and complex module in an SQL engine, optimizer is regarded as the "brain" of a database. An execution plan generated by the optimizer directly determines the database performance. The following describes modules of the SQL engine. + +## **01** SQL Engine Overview + +An SQL engine is an important part of a database system. It is responsible for generating efficient execution plans based on the SQL statements entered by applications in the current load scenario. It plays an important role in efficient SQL execution. The following figure shows the SQL execution process in the SQL engine. + +SQL execution process + +![](../figures/61.png) + +As shown in the preceding figure, the SQL statement needs to be parsed to generate a logical execution plan, and the physical execution plan needs to be generated through query optimization. Then, the physical execution plan is transferred to the query execution engine for executing the physical operator. + +## **02** SQL Parsing + +The compiling process of SQL statements in DBMS accords with the routine process of compiler implementation, which requires lexical analysis, syntax analysis, and semantic analysis. + +In the SQL standards, keywords and syntax rules of the SQL language are determined. During lexical analysis, the SQL parser divides an SQL statement into independent atomic units according to keywords and interval information, and each unit is presented as a word. For example: + +◾ Lexical analysis: Identify keywords, identifiers, operators, and terminators supported by the system from query statements. Each word determines its own part-of-speech \(POS\). + +◾ Syntax analysis: Define syntax rules based on SQL language standards, and use words generated during lexical analysis to match syntax rules. If an SQL statement can match a syntax rule, an abstract syntax tree \(AST\) is generated. + +◾ Semantic analysis: Check the validity of the AST, check whether the tables, columns, functions, and expressions in the syntax tree have corresponding metadata, and convert the AST into a logical execution plan \(relational algebra expression\). + +``` +SELECT w_name FROM warehouse WHERE w_no = 1; +``` + +The following table lists atomic units such as keywords, identifiers, operators, and constants. + + + + + + + + + + + + + + + + + + + +

POS

+

Content

+

Keywords

+

SELECT, FROM, WHERE

+

Identifiers

+

w_name, warehouse, w_no

+

Operators

+

=

+

Constants

+

1

+
+ +During syntax analysis, words obtained through lexical analysis are matched with syntax rules, and an AST is generated. Each word is displayed as a leaf node of the syntax tree, as shown in the following figure. + +Abstract syntax tree + +![](../figures/zh-cn_image_0000001251458611.jpg) + +The semantics expressed by the AST is limited to ensuring that the applied SQL statements comply with the SQL standard specifications. However, the validity of the internal meaning of the SQL statements needs to be checked. + +◾ Check the use of relationships: A relationship in the FROM clause must be a relationship or view in the schema corresponding to the query. + +◾ Check and parse the use of attributes: Each attribute in the SELECT statement or WHERE clause must be the attribute of a relationship or view in the FROM clause. + +◾ Check data types: The data types of all attributes must be matched. + +During the validity check, the semantic analysis process is the same as the validity semantic binding process. Through the semantic analysis check, the AST is converted into a logical execution plan. The logical execution plan can be represented by a relational algebra expression, as shown in the following figure. + +Relational algebraic expression + +![](../figures/zh-cn_image_0000001207138590.jpg) + +## **03** Query Optimization + +Based on different optimization methods, the optimization technologies of the optimizer can be classified into the following types: + +- Rule-based optimization \(RBO\): optimizes SQL statements based on predefined heuristic rules. +- Cost-based query optimization \(CBO\): performs cost estimation on the to-be-selected execution paths corresponding to the SQL statement, and selects an execution path with a lowest cost from the to-be-selected paths as a final execution plan. +- AI-based optimization \(ABO\): collects feature information of an execution plan , obtains experience information by using a machine learning model, and then optimizes the execution plan to obtain an optimal execution plan. + +In recent years, AI technologies, especially in the deep learning field, have developed rapidly. ABOs have great advantages in modeling efficiency, estimation accuracy, and adaptability. They are expected to break the restrictions of RBO and CBO based on static models. By continuously learning historical experience, the mode of the target scenario is abstracted to form a dynamic model, which is adaptively optimized based on the actual scenario of the user. openGauss uses the CBO technology and is actively exploring ABOs. + +- **3.1 Query Rewriting** + + Query rewriting is to convert SQL statements entered by users into more efficient equivalent SQL statements. It has two basic principles. + + Equivalence: The output of the original statement is the same as that of the rewritten statement. + + Efficiency: The rewritten statement is more efficient in execution time and resource usage than the original statement. + + +- **3.2 Common Query Rewriting Technologies** + + Key query rewriting technologies of openGauss: constant expression simplification, subquery optimization, selection pushdown, and equivalent inference. + + - Simplification of Constant Expressions + + A constant expression is an expression whose calculation result is a constant in the SQL statement entered by a user. Constant expressions are classified into arithmetic expressions, logical operation expressions, and function expressions. Query rewriting can pre-calculate constant expressions to improve efficiency. + + **Example 1: **This statement is a typical arithmetic expression query rewriting statement. After the rewriting, 1+1 calculation is not required for each data record during execution. + + ``` + SELECT * FROM t1 WHERE c1 = 1+1; + SELECT * FROM t1 WHERE c1 = 2; + ``` + + **Example 2:** This statement is a typical logical operation expression. After rewriting, the condition is always false, and no result is returned. This avoids the execution of the entire statement. + + ``` + SELECT * FROM t1 WHERE 1=0 AND a=1; + SELECT * FROM t1 WHERE false; + ``` + + **Example 3: **This statement contains a function expression. The input parameters of the function are constants. After rewriting, the function calculation result is directly calculated in the optimization phase, avoiding the function calling overhead of data records one by one during the execution. + + ``` + SELECT * FROM t1 WHERE c1 = ADD(1,1); + SELECT * FROM t1 WHERE c1 = 2; + ``` + + - Subquery Optimization + + The subquery structure is clearer and complies with the reading and understanding habits of users. Therefore, the SQL statements entered by users usually contain a large number of subqueries. Subqueries are classified into correlated subqueries and non-correlated subqueries based on whether subqueries can be solved independently. + + Correlated subquery: A correlated subquery contains conditions that depend on its parent query. For example: + + ``` + SELECT * FROM t1 WHERE EXISTS (SELECT t2.c1 FROM t2 WHERE t1.c1=t2.c1); + ``` + + In the statement, the subquery depends on the value of t1.c1 transferred by the parent query. + + Non-correlated subquery: A non-correlated subquery is a subquery that does not depend on a parent query and can be solved independently. For example: + + ``` + SELECT * FROM t1 WHERE c1 = 1+1; + SELECT * FROM t1 WHERE c1 = 2; + SELECT * FROM t1 WHERE EXISTS (SELECT t2.c1 FROM t2) + ``` + + In the statement, the subquery does not depend on the conditions of its parent query. + + The correlated subquery needs to execute a parent query to obtain a result, and then drives a subquery operation. Execution efficiency of this nested loop manner is relatively low. If the subquery can be promoted to the same level as the parent query, the table in the subquery can be directly joined with the table in the parent query. Because the join operation can be implemented in multiple ways, the optimizer can select the optimal one. In this way, the query execution efficiency can be improved. In addition, the optimizer can use the Join Reorder optimization rule to exchange the join sequences of different tables to generate a better execution plan. + + **Example: **This statement is a typical subquery rewriting. After rewriting, you can use hash join to improve the query performance. + + ``` + SELECT * FROM t1 WHERE t1.c1 IN (SELECT t2.c1 FROM t2); + SELECT * FROM t1 Semi Join t2 ON t1.c1 = t2.c1; + ``` + + - SELECT Pushdown and Equivalence Inference + + The SELECT pushdown can greatly reduce the calculation workload of upper-layer operators to achieve optimization. If the SELECT condition has equivalent operations, then equivalence rinference can be implemented by using the feature of equivalent operations, so as to obtain a new select condition. + + For example, if two tables t1 and t2 respectively contain 100 rows of data \[1,2,3, ..100\], the query statement is as follows: + + ``` + SELECT t1.c1, t2.c1 FROM t1 JOIN t2 ON t1.c1=t2.c1 WHERE t1.c1=1; + ``` + + Comparison before and after query rewriting + + ![](../figures/62-0.png) + + - Outer Join Elimination + + The main difference between an outer join and an inner join is that NULL values need to be supplemented for tuples that cannot be joined. If a filter condition in the SQL statement meets the null value rejection condition \(that is, the supplemented NULL values are filtered out\), the outer join can be directly eliminated. + + **Example: **After an outer join is converted into an inner join, the optimizer can apply more optimization rules to improve execution efficiency. + + ``` + SELECT * FROM t1 FULL JOIN t2 ON t1.c1 = t2.c1 WHERE t1.c2 > 5 AND t2.c3 < 10; + SELECT * FROM t1 INNER JOIN t2 ON t1.c1 = t2.c2 WHERE t1.c2 > 5 AND t2.c3 < 10; + ``` + + - DISTINCT Elimination + + If the DISTINCT column has a primary key constraint, this column cannot be empty and has no duplicate value. Therefore, the DISTINCT operation is not required to reduce the calculation workload. + + **Example: **Some primary key attributes in column c1 determine that the DISTINCT operation is not required. The statement is as follows: + + ``` + CREATE TABLE t1(c1 INT PRIMARY KEY, c2 INT); + SELECT DISTINCT(c1) FROM t1; + SELECT c1 FROM t1; + ``` + + - Expanding IN Predicate + + **Example:** The IN operator is changed to an equivalent filter condition so that indexes can be used to reduce the calculation workload. The statement is as follows: + + ``` + SELECT * FROM t1 WHERE c1 IN (10,20,30); + SELECT * FROM t1 WHERE c1=10 or c1=20 OR c1=30; + ``` + + - Expanding View + + A view can logically simplify SQL writing and improve query usability. A view is virtual, so you need to expand the view during query rewriting. + + Example: You can rewrite the view query into a subquery and then simplify the subquery. The statement is as follows: + + ``` + CREATE VIEW v1 AS (SELECT * FROM t1,t2 WHERE t1.c1=t2.c2); + SELECT * FROM v1; + SELECT * FROM (SELECT * FROM t1,t2 WHERE t1.c1=t2.c2) as v1; + SELECT * FROM t1,t2 WHERE t1.c1=t2.c2; + ``` + + + diff --git a/content/en/post/2022/Core-Technologies-of-openGauss-Database-(II).md b/content/en/post/2022/Core-Technologies-of-openGauss-Database-(II).md new file mode 100644 index 0000000000000000000000000000000000000000..b061750b4d575a5531e7689092bd2ba01087ae9b --- /dev/null +++ b/content/en/post/2022/Core-Technologies-of-openGauss-Database-(II).md @@ -0,0 +1,196 @@ ++++ + +title = "Core Technologies of openGauss Database" + +date = "2020-07-30" + +tags = [ "Core Technologies of openGauss Database (II)"] + +archives = "2020-07" + +author = "Shujie Zhang" + +summary = "Core Technologies of openGauss Database (II)" + +img = "/en/post/2022/title/img6.png" + +times = "12:30" + ++++ + +# Core Technologies of openGauss Database \(II\) + +## Overview + +Previously, we have introduced the principles of SQL parsing and query rewriting in query optimization. Now, we will introduce the technical principles of path search and cost estimation. + +## Path Search + +The core problem of the optimizer is to obtain the optimal solution for an SQL statement. In this process, the solution space corresponding to the SQL statement needs to be enumerated, that is, different candidate execution paths need to be enumerated. These execution paths are equivalent to each other, but the execution efficiency is different. Execution costs of these execution paths in the solution space are calculated, and finally an optimal execution path may be obtained. Based on different methods for searching for candidate execution paths, the structure of the optimizer is divided into the following modes: + +**Bottom-up Mode** + +![](../figures/zh-cn_image_0000001251754525.gif) + +As shown in the preceding figure, in bottom-up mode, the logical execution plan is split. A table scan operator is created first, and then a connection operator is formed by the scan operator. Finally, a physical execution plan is formed. In this process, there are multiple types of physical scan operators and physical join operators. Therefore, multiple physical execution paths are generated. The optimizer selects an execution plan with the lowest cost based on the estimated cost of each execution path, and then transfers the execution plan to the executor for execution. + +**Top-down Mode** + +![](../figures/zh-cn_image_0000001251954519.gif) + +As shown in the preceding figure, this mode uses the object-oriented idea to objectify the core functions of the optimizer and generate a logical plan after lexical analysis, syntax analysis, and semantic analysis. Based on the logical plan, an object-based optimization rule is applied to generate a plurality of to-be-selected logical plans. The logical plans are traversed by using a top-down method, and an optimal execution path is obtained by combining dynamic planning, cost estimation, and branch and bound technologies. + +- Random Search Mode + + Regardless of the bottom-up or top-down mode, the enumeration time is too long when a large number of tables are joined. Some optimizers search for paths through random enumeration when there are a large number of tables, an attempt is made to obtain a suboptimal execution plan in a random solution space. + + Currently, the optimizers of databases such as MySQL and PostgreSQL use the bottom-up mode, and the optimizers of SQL Server and open-source Calcite and ORCA use the top-down mode. Calcite is widely used in other open source projects, such as Apache Storm, Apache Flink, Apache Kylin, Apache Drill and SQL-Gremlin, due to its good scalability. The openGauss uses a combination of a bottom-up mode and a random search mode. + + Regardless of the top-down or bottom-up mode, a search process is also a process of transforming from a logical execution plan to a physical execution plan. For example, there may be different scan operators for each table, the logical connection operator may also be converted into a plurality of different physical join operators. The following describes a specific physical operator. + +- Single-table Scan Path Search + + The openGauss uses the bottom-up path search method. Therefore, the path generation always starts from the single-table access path. There are two types of single-table access paths: + + ◾ Full table scan: Data in a table is accessed one by one. + + ◾ Index scan: Indexes are used to access data in tables. Generally, indexes are used together with predicates. + + The optimizer first estimates the cost of different scan paths based on the data volume, filter condition, and available indexes of the table and the cost model. For example, if **CREATE TABLE t1\(c1 int\)** is defined for a table and the data in the table is consecutive integers ranging from 1 to 100000000 and there is a B+ tree index in the c1 column, **SELECT \* FROM t1 WHERE c1=1;** can obtain data by reading one index page and one table page. However, for a full table scan, 100 million data records need to be read to obtain the same result. In this case, the path of the index scan wins. + + Index scan is not superior to full table scan in all cases. Their advantages and disadvantages depend on how much data can be filtered out. Generally, a database management system uses a B+ tree to create an index. If the selectivity is high, the B+ tree index causes a large number of random I/Os, this reduces the access efficiency of the index scan operator. For example, for the **SELECT \* FROM t1 WHERE c1\>0** statement, an index scan needs to access all data in the index and all data in the table, causing a large number of random I/Os. However, a full table scan only needs to access all data in the table in sequence. Therefore, the cost of a full table scan is lower. + +- Multi-table Join Path Search + + The difficulty in generating multiple table paths lies in how to enumerate all table join orders and join algorithms. Assume that two tables t1 and t2 are joined. According to the commutative law in relational algebra, the join order can be t1 x t2 or t2 x t1, and the physical join operators can be hash join, nested loop join, or merge join. In this way, there are six paths available for selection. This number increases exponentially as the number of tables increases. Therefore, an efficient search algorithm is very important. + + openGauss usually uses the bottom-up path search mode. It first generates the scan path of each table. These scan paths are at the bottom layer \(first layer\) of the execution plan. At the second layer, the optimal path for joining two tables is considered, that is, the possibility of joining every two tables is calculated through enumeration. At the third layer, the optimal path of the three-table join is considered. That is, the possibility of the three-table join is calculated through enumeration. The global optimal execution plan is generated until the top layer. Assume that the JOIN operation is performed on four tables. The join path generation process is as follows: + + ◾ Optimal path of a single table: The optimal paths of \{1\}, \{2\}, \{3\}, and \{4\} tables are generated in sequence. + + ◾ Optimal path of two tables: The optimal paths of \{1 2\}, \{1 3\}, \{1 4\}, \{2 3\}, \{2 4\} and \{3 4\} tables are generated in sequence. + + ◾ Optimal path of three tables: The optimal paths of \{1 2 3\}, \{1 2 4\}, \{2 3 4\} and \{1, 3, 4\} tables are generated in sequence. + + ◾ Optimal path of four tables: The optimal path of \{1, 2, 3, 4\} is the final path. + + The core of the multi-table path problem is join order, which is a nondeterministic polynomially \(NP\) problem. To find an optimal path in multiple relational joins, a commonly used algorithm is a cost-based dynamic planning algorithm. As the number of joined tables increases, the expansion of table search space affects the path selection efficiency of the optimizer. The cost-based genetic algorithm and other random search algorithms can be used to solve this problem. + + In addition, to prevent the search space from being too large, the following pruning policies can be used: + + ◾ Consider the paths with join conditions first and delay the Cartesian product as much as possible. + + ◾ In a search process, perform LowBound pruning on an execution path based on cost estimation, and abandon some execution paths with relatively high costs. + + ◾ Retain execution paths with special physical attributes. For example, results of some execution paths are sequential, and these execution paths may avoid re-sorting in a subsequent optimization process. + + Optimization Based on Physical Attributes + + A relationship can be regarded as a set or a package. The data structure does not set the data distribution. To improve the computing performance, some data structures or algorithms need to be used to preprocess the data distribution. These preprocessing methods use the physical attributes \(such as order\) of the physical execution path, or create physical properties for physical execution paths, which often play a significant role in query optimization. + + +## B+ Tree + +The simplest way to query data in a table is to traverse all the data in the table. However, as the data volume increases, the cost of traversing the data in the table increases. The B+ tree becomes a powerful weapon for efficient data query. + +In 1970, R. Bayer and E. McCreight proposed a balanced tree for external search, that is, B-tree. The B-tree is to create a directory on the table data, which is similar to the content in a book. In this way, you can quickly locate the data to be queried. + +As a data structure, the B+ tree is not directly related to the query optimizer. However, the database management system usually establishes an index based on the B+ tree. In the query optimization process, the query efficiency can be improved through index scanning and bitmap scanning, this involves the use of indexes of the B+ tree type. + +## Hash Table + +A hash table is also a method of preprocessing data. The openGauss database uses hash tables in multiple places or borrows the idea of hash tables to improve query efficiency. + +◾ The hash table can be used to implement the grouping operation because the hash table has the function of classifying data. + +◾ A hash index can be created by using the hash algorithm. This index is applicable to equivalent constraints. + +◾ Hash join is an important physical join path. + +## Sorting + +Sorting is also a method of preprocessing data. It is mainly used in the following aspects: + +◾ Sorting can be used to group data because the same data is aggregated after sorting. + +◾ The B-tree index needs to be created through sorting. + +The physical join path Merge Join needs to be implemented through sorting. + +The ORDER BY operation in the SQL language needs to be implemented through sorting. + +◾ When the data volume is small, all data can be loaded to the memory. In this case, internal sorting can be used. When the data volume is large, external sorting is required. Therefore, the sorting cost needs to be determined based on the data volume and available memory size. + +## Materialization + +Materialization is to save the result of the scan or join operation. If the intermediate result is large, the result may need to be written to the external memory, which causes the I/O cost. Therefore, the saving cost is high. + +The advantage of materialization is that if the internal table can be read once and used for multiple times, the intermediate result can be saved and used for multiple times. For example, table t1 and table t2 are joined. If table t2 is used as the internal table and is scanned, only 5% of the data is used as the intermediate result, if the other 95% data is filtered out, you can materialize the 5% data. In this way, each tuple in the t1 table is joined to only the 5% data. Whether the intermediate result is materialized depends on the cost estimation model. Generally, when a physical path is generated through physical optimization, the cost is estimated for both the materialized and non-materialized paths, and the path with a lower cost is finally selected. + +## Cost Estimation + +The optimizer enumerates candidate execution paths based on the generated logical execution plan. To ensure efficient execution, the optimizer needs to select the path with the lowest cost and highest execution efficiency from these paths. How to evaluate the execution cost of these plan paths becomes critical. Cost estimation is to complete this task. Based on the collected data statistics, cost estimation models are established for different planned paths to evaluate the costs and provide input for path search. + +- Statistics + + Statistical information is a cornerstone for estimating the path cost of a plan, and accuracy of the statistical information plays an important role in row count estimation and cost estimation in a cost estimation model, and directly affects an advantage and a disadvantage of a query plan. openGauss allows you to use the ANALYZE statement to collect statistics on the entire database, a single table, a column, and multiple correlated columns. + + Statistics directly affect the accuracy of cost estimation. Therefore, the frequency of collecting statistics is a sensitive parameter. If the frequency of collecting statistics is too low, the statistics will be delayed. On the contrary, if the frequency of collecting statistics is too high, the query performance will be affected indirectly. + + Generally, the database management system provides a method for manually collecting statistics. The openGauss supports statistics collection by running the ANALYZE command. In addition, the database management system automatically determines whether to re-collect statistics based on data changes. For example, when the number of frequent data updates in a table exceeds a threshold. In this case, you need to automatically update the statistics of the table. During query optimization, if the optimizer finds that the statistics data is severely delayed, the optimizer can also initiate statistics collection. + + Table-level statistics include the number of tuples \(N\) and the number of pages occupied by the table \(B\). Column-level statistics include the attribute width \(W\), maximum value \(Max\), minimum value \(Min\), and most common value \(MCV\). Generally, a histogram \(H\) is created for each column. The data in the column is displayed in a histogram based on the range, which facilitates the calculation of the selectivity. + + Histograms, such as the height-balanced histogram, frequency histogram, and multi-dimensional histogram, may present data distribution from different angles. openGauss uses height-balanced histograms, and each column of the histogram represents a same frequency. + +- Selection Rate + + Based on the statistics, the cost estimation system can know how many rows of data exist in a table, how many data pages are used, and the frequency of a value. Then, the cost estimation system can calculate how much data can be filtered out by a constraint \(for example, the WHERE condition in an SQL statement\). The ratio of the data filtered by this constraint to the total data volume is called the selectivity. Selectivity = Number of tuples after the constraint is filtered/Number of tuples before the constraint is filtered. The constraint may be formed by an independent expression, or may be a conjunctive normal form or disjunctive normal form formed by multiple expressions. For an independent expression, a selectivity needs to be calculated according to statistics information, conjunctive normal form and disjunctive normal form obtain the selectivity by means of probability calculation. + + Conjunctive normal form: P\(A and B\) = P\(A\) + P\(B\) – P\(AB\); disjunctive normal form: P\(AB\) = P\(A\) × P\(B\) + + Assume that the selectivity needs to be calculated for the constraint A \> 5 AND B < 3. First, the selectivity needs to be calculated for A \> 5 and B < 3. Because the statistics of columns A and B are available, the proportion of data whose value is greater than 5 in column A can be calculated according to the statistics. Similarly, the selectivity of column B can be calculated. Assume that the selectivity of A \> 5 is 0.3 and that of B < 3 is 0.5. The selectivity of A \> 5 AND B < 3 is calculated as follows: + + P\(A\>5 and B<3\) + + = P\(A\>5\) + P\(B<3\) – P\(A\>5\)×P\(B<3\) + + = 0.3 + 0.5 – 0.3×0.5 + + = 0.65 + + Due to the diversity of constraints, the calculation of the selectivity usually encounters some difficulties. For example, in the calculation process of the selectivity, it is usually assumed that multiple expressions are "independent" of each other, but in an actual situation, a function dependency relationship may exist between different columns. In this case, the selectivity may be inaccurate. + +- Cost Estimation Method + + The optimizer of openGauss is a cost-based optimizer. For each SQL statement, openGauss generates multiple candidate plans, calculates an execution cost for each plan, and selects the plan with the lowest cost. After a constraint determines the selectivity, the number of rows that need to be processed for each plan path can be determined, and the number of pages that need to be processed can be calculated according to the number of rows. When a plan path processes a page, an I/O cost is generated. When a plan path processes a tuple \(for example, expression calculation is performed on the tuple\), a CPU cost is generated. Therefore, an overall cost of a plan may be expressed as follows: + + Total cost = I/O cost + CPU cost + + openGauss defines the cost of scanning a page in sequence as 1 and normalizes the cost of all other operators to 1. For example, if the cost of scanning a random page is defined as 4, it is considered that the cost of scanning a random page is four times the cost of scanning a page sequentially. For another example, if the cost of processing a tuple by the CPU is 0.01, it is considered that the cost required for processing a tuple by the CPU is 1% of the cost required for sequentially scanning a page. From another perspective, openGauss divides costs into startup costs and execution costs. + + Total cost = Startup cost + Execution cost + + Startup cost: indicates the cost required from the time when an SQL statement is executed to the time when the operator outputs the first tuple. Some operators have low startup costs. For example, the scan operator on the base table can output tuples once it starts to read data pages. Therefore, the startup cost is 0. Some operators have relatively high startup costs. For example, the sorting operator needs to read all outputs of lower-layer operators, and output the first tuple only after sorting these tuples. Therefore, the startup cost of the sorting operator is relatively high. + + Execution cost: indicates the cost required from the time when the first tuple is output to the time when the query ends. The cost may further include a CPU cost, an I/O cost, and a communication cost. A size of the execution cost is related to an amount of data that needs to be processed by an operator, and is related to a function completed by each operator. The larger the amount of data to be processed and the heavier the task to be completed by the operator, the higher the execution cost. + + Total cost: Cost estimation is a bottom-up process. The cost of the scan operator is estimated first, and then the cost of the connection operator and the cost of the non-SPJ operator are estimated based on the cost of the scan operator. + +- Notes + + 1. Selection-Projection-Join \(SPJ\): The basic three operators in relational algebra are SELECTION, PROJECTION, and JOIN. + + SELECTION: In **SELECT XXX FROM T WHERE XX = 5**, the WHERE filter condition indicates a select operation. + + PROJECTION: In **SELECT c FROM t**, selecting column c indicates a projection operation. + + JOIN: In **SELECT xx FROM t1, t2 WHERE t1.c = t2.c**, tables t1 and t2 are joined. + + 2. Non-SPJ: Operators other than the SPJ operators, such as SORT, AGGREGATION, and UNION/EXCEPT. + + +## Summary + +This document describes the basic functions and principles of each module of the SQL engine in terms of SQL parser, query rewriting, cost estimation, and path search. You can further understand the optimizer optimization technology based on the analysis of specific SQL optimization cases. + diff --git a/content/en/post/2022/DB4AI-Enabling-Database-Native-AI-Computing-and-Facilitating-Service-Success-in-the-Data-Lake-Sce.md b/content/en/post/2022/DB4AI-Enabling-Database-Native-AI-Computing-and-Facilitating-Service-Success-in-the-Data-Lake-Sce.md new file mode 100644 index 0000000000000000000000000000000000000000..cf47b717b520e3bb0232231cab3ac21d0f878ecd --- /dev/null +++ b/content/en/post/2022/DB4AI-Enabling-Database-Native-AI-Computing-and-Facilitating-Service-Success-in-the-Data-Lake-Sce.md @@ -0,0 +1,162 @@ ++++ + +title = "DB4AI: Enabling Database Native AI Computing and Facilitating Service Success in the Data Lake Scenario" + +date = "2021-09-27" + +tags = [ "DB4AI: Enabling Database Native AI Computing and Facilitating Service Success in the Data Lake Scenario"] + +archives = "2021-09" + +author = "Wen Nie" + +summary = "DB4AI" + +img = "/en/post/2022/title/img6.png" + +times = "12:30" + ++++ + +# DB4AI: Enabling Database Native AI Computing and Facilitating Service Success in the Data Lake Scenario + +DB4AI tries to embed AI computing capabilities into databases to help users get rid of tedious data migration, export, and management. It sounds reasonable to use a database to store massive data. However, when using a traditional database, users who are algorithm engineers or AI beginners have to export data from a dataset and then import it to the AI computing framework for their computing tasks. Data migration is troublesome and costly. The most direct method is to write the exported data to a file. Before an AI computing task is executed, the program reads data from a file and feeds the data to the model for training. + +Here are some obvious challenges: + +- 1. Data security: + + Data carriers that are separated from the database do not have protection measures such as permission restriction and privacy protection. The risk of data deletion and tampering is greatly increased. In some fields, such as finance and healthcare, data involves sensitive information. During data migration, data needs to be masked to degrade sensitive information. + +- 2. Data migration cost: + + In AI computing, analysts and algorithmists need to focus on model design and model computing verification, instead of spending costs on data migration and sharing. However, the time and computing costs of exporting massive amount of data are inevitable. + +- 3. Data version management: + + Data is added, deleted, modified, and queried in both the AP and TP databases. For online learning, how do we capture new data in real time? For offline learning, how do we detect data distribution changes in a dataset in time? To cope with these two questions, the traditional processing methods require more data management and control. When data drift occurs, users need to update the dataset to maintain data validity. In this case, the cost is increased. In addition, users need to store datasets of different versions based on different data processing methods and filter criteria. This further increases storage costs. + + +The preceding problems do not exist in databases with DB4AI. A database is equipped with an AI framework to reduce the data migration costs. All computing processes are completed in the database. By eliminating the data migration process, DB4AI programmatically avoids the preceding problems. + +The following describes how to use the openGauss native AI framework: + +- 1. DB4AI-snapshot: data version control. + + DB4AI-Snapshots is a DB4AI feature used to manage dataset versions. Datasets are fixed by using snapshots and classified into the materialized snapshot \(MSS\) mode which uses the materialization algorithm to store data entities of original datasets, and the computed snapshot \(CSS\) mode which uses the relative calculation algorithm to store incremental data information. Compared with the MSS mode, the CSS mode greatly reduces the space usage. + + This function involves the CREATE, PREPARE, SAMPLE, PUBLISH, and PURGE operations. Examples of some operations are as follows: + + - Create a snapshot. + + ``` + openGauss=# create snapshot s1@1.0 comment is 'first version' as select * from t1; + schema | name + --------+-------- + public | s1@1.0 + (1 row) + ``` + + - \(2\) Sample a snapshot. + + 0.3 is used as the sampling rate, sampling is performed on the basis of the snapshot s1@1.0.0, and a suffix '\_sample1' is added to the generated sub-snapshot. + + ``` + openGauss=# SAMPLE SNAPSHOT s1@1.0 STRATIFY BY id AS _sample1 AT RATIO .3; + schema | name + --------+---------------- + public | s1_sample1@1.0 + (1 row) + ``` + + This function can be used to generate a test set and a training set during AI computing. For example, in the following syntax, sampling is performed in the ratio of 2:8. + + ``` + openGauss=# SAMPLE SNAPSHOT s1@1.0 STRATIFY BY id AS _test AT RATIO .2, AS _train AT RATIO .8; + schema | name + --------+-------------- + public | s1_test@1.0 + public | s1_train@1.0 + (2 rows) + ``` + + - \(3\) Publish a snapshot. + + In the snapshot feature, other states except the released state cannot be involved in AI computing. If the data in the current snapshot is available, you can publish a snapshot to change the snapshot state. You can view the state of the snapshot in the **db4ai.snapshot** system catalog. + + ``` + openGauss=# openGauss=# select * from db4ai.snapshot; + id | parent_id | matrix_id | root_id | schema | name | owner | commands | comment | published | archived | c + reated | row_count + ----+-----------+-----------+---------+--------+----------------+-------+-----------------------------+---------------+-----------+----------+----------- + -----------------+----------- + 0 | | | 0 | public | s1@1.0 | owner | {"select *","from t1",NULL} | first version | t | f | 2021-09-16 + 17:15:52.460933 | 5 + 1 | 0 | | 0 | public | s1_sample1@1.0 | owner | {"SAMPLE _sample1 .3 {id}"} | | f | f | 2021-09-16 + 17:19:12.832676 | 1 + 2 | 0 | | 0 | public | s1_test@1.0 | owner | {"SAMPLE _test .2 {id}"} | | f | f | 2021-09-16 + 17:20:46.778663 | 1 + 3 | 0 | | 0 | public | s1_train@1.0 | owner | {"SAMPLE _train .8 {id}"} | | f | f | 2021-09-16 + 17:20:46.833184 | 3 + (4 rows) + ``` + + - \(4\) Purge a snapshot. + + ``` + openGauss=# PURGE SNAPSHOT s1_sample1@1.0; + schema | name + --------+---------------- + public | s1_sample1@1.0 + (1 row) + ``` + + +- 2. DB4AI native AI syntax: used for model training and inference + + This function uses the query syntax to complete AI computing tasks. Currently, AI operators are added to the openGauss database. The operators are added to the execution plan to fully utilize the computing capability of the database to complete model training and inference tasks. + + Currently, the DB4AI engine in openGauss supports four algorithms: logistic regression, linear regression, and support vector machine classification, and K-means clustering algorithms. + + The CREATE MODEL and PREDICT BY syntaxes are used for model training and inference. + + CREATE MODEL: used for model training. After a model training task is complete, the syntax saves the trained model information to the **gs\_model\_warehouse** system catalog in the database. You can view the model information by viewing the system catalog at any time. The system catalog stores not only the model description information but also the model training information. + + +PREDICT BY: used for inference. The database searches a system catalog for a model based on the model name and loads the model to the memory. The database inputs the test data into the memory model for inference and returns the result in the form of a temporary result set. + +The following is a simple example: + +- \( 1\). Run **CREATE MODEL** for training. + + The K-means clustering algorithm is used as an example. + + ![](../figures/zh-cn_image_0000001251917015.jpg) + + The training syntax consists of four parts: model name, algorithm type, training set, and hyperparameter setting. + + The training set supports the input of tables, views, and subqueries. You only need to run one query statement to set model hyperparameters and specify the training set. The subsequent steps include data input and model saving, which are automatically completed by the database. + + When the training task is complete, the database prints a success message. + + The model has been written into the **gs\_model\_warehouse** system catalog. You can view the model information by querying the table. + + ![](../figures/zh-cn_image_0000001252197021.jpg) + +- \(2\) Run **PREDICT BY** for inference. + + Use the saved model to perform inference tasks. An example is provided as follows: + + ![](../figures/zh-cn_image_0000001207677032.jpg) + + In the PREDICT BY syntax, you only need to specify the model name, test set, and feature name to complete the inference task. + +- Summary and Prospect + + DB4AI has always been a popular topic in the database field. By making databases intelligent, you can lower the threshold and cost in the AI computing process, and further release the computing resources of the database. Big data and AI computing are good partners, so databases for big data storage should not be independent of this system. The effective combination of the two not only facilitates the AI computing process, but also increases the possibility of optimizing the database performance. + + The native AI framework of the open-source openGauss database is evolving, and there must be many shortcomings. However, the vision of "all things intelligent" inspires countless R&D engineers to move forward. + + When you pursue your goal, do not stop. There is a long way to go. + + diff --git a/content/en/post/2022/Discussion-on-openGauss-Memory-Management.md b/content/en/post/2022/Discussion-on-openGauss-Memory-Management.md new file mode 100644 index 0000000000000000000000000000000000000000..88c49eba38cae13ea6db21d17e3ef4af7d2394ee --- /dev/null +++ b/content/en/post/2022/Discussion-on-openGauss-Memory-Management.md @@ -0,0 +1,114 @@ ++++ + +title = "Discussion on openGauss Memory Management" + +date = "2021-09-13" + +tags = [ "Discussion on openGauss Memory Management"] + +archives = "2021-09" + +author = "Shifu Li" + +summary = "Discussion on openGauss Memory Management" + +img = "/en/post/2022/title/img4.png" + +times = "12:30" + ++++ + +# Discussion on openGauss Memory Management + +Recently, a friend from the technical exchange group of openGauss asked how to allocate memory during code development. This article provides a preliminary answer to this question. The memory management of openGauss has been extended and reconstructed in many aspects to adapt to the multi-thread architecture and better meet enterprise application requirements. The openGauss memory management has been optimized in the following aspects: + +- Introduced the **jemalloc** open-source library to replace **glibc** for memory allocation and release, reducing memory fragments. +- Introduced the logical memory management mechanism to control the memory usage of processes, preventing the OOM problem. +- Introduced multiple memory contexts, such as shared memory context, stack memory context, and aligned memory context, to meet code development requirements in different scenarios. +- Introduced the AddressSanitizer \(ASan\) open-source library, helping locate memory leakage and memory overwriting problems in the debug version. Various memory query views are provided, helping users observe memory usage and locate potential memory problems. + +Based on the preceding functions and features, the following describes how to use the memory during coding and how to quickly locate problems from the perspectives of developers and users. + +- **1. Precautions for openGauss Memory Management Development** + + For the memory allocation and release interfaces in openGauss, the data structure and algorithm used by the general memory context do not change greatly. The new memory context is implemented by using the new data structure. + + By default, the AllocSetContextCreate function is used to create a memory context. Check whether the type of the memory context is specified. By default, the type is not specified. The STANDARD\_CONTEXT identifier is used to create a general memory context. The memory context is used only in a single thread. As the thread exits or the job is reset, the memory context needs to be cleared to prevent memory accumulation. The root node of the memory context in a thread is TopMemoryContext \(that is, t\_thrd.top\_mem\_cxt in the code\). Generally, memory application from TopMemoryContext is forbidden in the code. Subnodes are created from the corresponding memory context node based on the memory scope. Both the parent and child nodes are general memory contexts. + + Because openGauss is a multi-thread architecture, it usually uses shared memory to store key information for multi-thread access and update. When creating a memory context, you need to specify the SHARED\_CONTEXT identifier and ensure that the parent node is a shared memory context. The root node of the shared memory context is ProcessMemory \(that is, g\_instance.instance\_context in the code\). By default, no memory is allocated from the memory context. Generally, the memory that can be allocated from the shared memory context is limited. Because the memory is mainly used during job execution, developers need to limit the size of memory that can be allocated from the shared memory context \(by limiting the number of members or using the elimination mechanism\). It is recommended that the size be less than or equal to 200 MB. The operations of allocating or releasing memory in the shared memory context do not require extra locks. You can directly invoke palloc or pfree. However, you need to determine whether lock protection is required for subsequent operations of the pointer returned after the memory is allocated based on the invoking logic. + + The implementation mechanism of the stack memory context is simple. Different from the traditional memory context, the buddy algorithm is not used for alignment to the power of 2. Therefore, only 8-byte alignment is required during memory allocation, which saves a large amount of memory space. The stack memory context applies to the scenario where only palloc is called to allocate memory and the pfree operation is not required. When the memory context is not used, MemoryContextDelete or MemoryContextReset is performed for one time. For details, see the logic of using the memory by the hashjoin operator. The aligned memory context is used to align memory pages and applies to the ADIO scenario. It is seldom used in the current code. + + In addition to the scenario where the memory context is created by specifying MemoryContextCreate, the memory context can also be created implicitly when the hash\_create function is used to create a hash table. Therefore, hash tables created by hash\_create are classified into common hash tables \(used in a single thread\) and shared hash tables \(shared by the entire process\). When creating a shared hash table, you need to specify the **HASH\_SHRCTX** parameter, and the parent memory context specified by the parameter must be the shared memory context. + + The preceding describes the basic methods of creating and using the memory context. The requirements for allocating and releasing the memory context are as follows: + + Memory contexts are classified into thread-level contexts \(such as TopMemoryContext\), session-level contexts \(such as MessageMemoryContext\), job-level contexts \(such as ExecutorState\), and operator-level contexts \(such as HashJoin\). Memory cannot be allocated from high-level memory contexts during job execution. + + Do not frequently allocate and release the same memory context. Even for temporary memory contexts, ensure that each operator allocates and releases the memory context only once. + + Release the unused memory and memory context in a timely manner. After the operator is executed, release the operator memory context in a timely manner. + + In principle, the memory consumed by the non-high memory consumption operator \(hashjoin/hashagg/setop/material/windowsagg\) cannot exceed 10 MB. If the memory consumed exceeds 10 MB, evaluation criteria must be provided. + + The total size of the shared memory context must be controlled. In principle, the memory usage cannot exceed 200 MB. If the memory usage exceeds 200 MB, evaluation is required. + + The global variable pointer is set to null after the memory is released. That is, the pfree\_ext function is invoked to set the global variable pointer to null. + + When the array memory is allocated at a time and the memory corresponding to the array subscript is accessed and written, the Assert judgment is applied to the array subscript to prevent OOM problems. + + +- **2. Locating openGauss Memory Faults** + - 1 \> The error message "memory is temporarily unavailable" is displayed. + + Check whether the log contains "reaching the database memory limitation". If yes, the fault is caused by the logical memory management mechanism of the database. In this case, you need to analyze the database view. Check whether the log contains "reaching the OS memory limitation". If yes, the fault is caused by the memory allocation failure of the operating system. In this case, you need to check the parameter configuration of the operating system and the memory hardware. + + To protect the logical memory of the database, you need to check the following views: + + - Run the **pg\_total\_memory\_detail** command to check the memory usage of the internal modules of the database. When the value of **dynamic\_used\_memory** is greater than that of **max\_dynamic\_memory**, a message is displayed indicating that the memory is insufficient. If the value of **dynamic\_used\_memory** is smaller than that of **max\_dynamic\_memory** and the value of **dynamic\_peak\_memory** is greater than that of **max\_dynamic\_memory**, the memory was insufficient. If the value of **other\_used\_memory** is larger, replace the debug version to further locate the fault. The SQL statement used is **Select \* from pg\_total\_memory\_detail**. + + - If the value of **dynamic\_used\_shrctx** is larger, query the **gs\_shared\_memory\_detail** view to check which memory context uses much memory. The SQL statement used is **Select \* from gs\_shared\_memory\_detail**. + + - If the value of **dynamic\_used\_shrctx** is not large, query the **gs\_session\_memory\_detail** view to check which memory context uses much memory. The SQL statement used is **Select \* from gs\_session\_memory**. + + \_detail order by totalsize desc limit 20; + + - If any fault is found in the memory context and it is difficult to locate the fault, use **memory\_tracking\_mode** in the debug version to further locate the file and line number. + + - If no fault is found in the memory context, check whether the number of threads is large. The possible cause is CacheMemoryContext. + + - In the debug version, run the **gdb** script to print the allocation information in the memory context. + + - 2 \> The RES of the database node is high or the node breaks down, and the message "Out of Memory" is displayed. + + Read the information in **/var/log/messages** to check which process causes the fault. Generally, the fault is caused by the GaussDB process. If the fault is caused by the GaussDB process memory, check whether the **max\_process\_memory** parameter is correctly configured. + + If the configuration is proper, check whether the memory usage of **Other** in the **pg\_total\_memory\_detail** view is too high. + + If the memory usage increases rapidly and is mainly used by the memory context, you can use jemalloc profiling to quickly locate the process to which the memory is allocated. + + High **Other** memory usage may be caused by the malloc memory of a third-party component or libpq. In this case, use the ASan tool to further locate the fault. If the fault cannot be located, disable parameters \(such as **ssl** and **llvm**\) one by one and locate the fault + + +- **3 Appendix** + - 1 \> Usage of jemalloc: + + In the debug version, run the following command to set environment variables: + + In **export MALLOC\_CONF=prof:true,prof\_final:false,prof\_gdump:true,lg\_prof\_sample:20**, the last **20** indicates that a heap file is generated every 2^20 bytes \(1 MB\). The value can be changed. However, after the value is increased, the number of heap files decreases, but some memory application information is lost. + + Run the **source** command to set environment variables and start the cluster. + + Use the **jeprof** to process heap files and generate PDF files. You can obtain the **jeprof** file from the open-source third-party binary directory **binarylibs/**_$\{platForm\}_**/jemalloc/debug/bin**. To use the binary file, you need to run the **yum install graphviz** command to install graphviz. + + To generate a PDF file, run the following command: + + Full: jeprof –show\_bytes –pdf gaussdb \*.heap \> out.pdf + + Incremental: jeprof –pdf gaussdb –base=start.heap end.heap \> out.pdf + + - 2 \> Usage of ASan: + + Check the operating system configuration: The value of **ulimit -v unlimited && vm.overcommit\_memory** is not **0**. + + Stop the cluster and add the following environment variable to the .bashrc file in standalone deployment: **export ASAN\_OPTIONS=halt\_on\_error=0:alloc\_dealloc\_mismatch=0:log\_path=/tmp/memcheck/memcheck**. In the environment variable, **log\_path** specifies the error information output location. The directory is **/tmp/memcheck/**, and the file name prefix is **memcheck**. diff --git a/content/en/post/2022/Dynamic-Data-Masking-of-openGauss.md b/content/en/post/2022/Dynamic-Data-Masking-of-openGauss.md new file mode 100644 index 0000000000000000000000000000000000000000..ca166a15e5fd00f2ad83f556374b9a8b0c4fce79 --- /dev/null +++ b/content/en/post/2022/Dynamic-Data-Masking-of-openGauss.md @@ -0,0 +1,156 @@ ++++ + +title = "Dynamic Data Masking of openGauss" + +date = "2021-03-24" + +tags = [ "Dynamic Data Masking of openGauss"] + +archives = "2021-03" + +author = "Meiting Xu" + +summary = "Dynamic Data Masking of openGauss" + +img = "/en/post/2022/title/img11.png" + +times = "12:30" + ++++ + +# Dynamic Data Masking of openGauss + +## 1 Background and Introduction + +- 1.1 Background of Data Masking + + With the rapid development and popularization of emerging technologies, such as Internet big data and cloud services, different data application modes, such as service cloudification, centralized data storage, and data sharing, have become the trend of future development. Cloud databases and cloud computing services are featured with easy deployment, low cost, high efficiency, and high reliability. As such, more and more consumers tend to store their personal data in the cloud rather than in personal portable hard disks. Actually, as data application scenarios become more complex, the risk of personal privacy data leakage and theft is increasing. In recent years, several major database information leakage events have occurred, which poses higher requirements for database security. + + Data masking is to process sensitive data by transforming or shielding the data or by other ways, aiming to protect privacy data and prevent data leakage and malicious snooping. When an enterprise or organization collects sensitive data, such as personal identity data, mobile phone numbers, and bank card numbers, and then exports the data \(in a non-production environment\) or directly queries the data \(in a production environment\), the data must be masked according to privacy protection laws and regulations. + +- 1.2 Introduction to Data Masking + + Data masking is classified into static data masking and dynamic data masking. In static data masking, data is masked before being distributed. Generally, data in the production environment is copied to the test environment or development library. Then, the exported data replaces the original data. In this way, the masked data becomes the source data for test and development. Dynamic data masking is closely related to the production environment and is mainly used in scenarios where production data is directly accessed. In dynamic data masking, sensitive data is masked in real time when it is accessed, and the consistency and validity of the source data can be guaranteed. + + **Figure 1-1** Static and dynamic data masking + + ![](../figures/110.png) + + Dynamic data masking and static data masking are applicable to different scenarios. You can select one based on the application scenario. The latest openGauss officially supports dynamic data masking. The following sections describe the dynamic data masking mechanism of openGauss. + + Currently, the mainstream dynamic data masking technology is achieved by two paths, that is, result set parsing and statement rewriting. + + - Result set parsing: The statements sent to the database are not rewritten and the data table structure needs to be obtained in advance. After the database returns the result, the data to be masked in the set is identified based on the table structure and the result data records are modified one by one. + - Statement rewriting: The query statements that contain sensitive columns are rewritten. Outer nested functions are used to rewrite the sensitive columns involved in the query so that the database returns result sets that do not contain sensitive data when running query statements. + + In terms of performance, result set parsing requires that columns be parsed, rules be matched, and data be masked after the database returns the result set. Each row of data records in the result set needs to be modified one by one. Therefore, the masking time is linearly related to the result set capacity, and the overall performance loss is large. In contrast, if you choose statement rewriting, short query statements can be parsed and rewritten, and a masking function can be embedded outside sensitive columns in the statements. When executing commands, the database automatically executes the masking function to mask data, and then the returned result set is the masked data. In this method, only one query statement is rewritten and the result set is not parsed. As such, the performance loss can be greatly reduced. openGauss adopts the statement rewriting method. The performance loss of masking 100,000 pieces of sensitive data records is less than 5%. + + In addition, for complex commands, query columns generally contain a large number of columns with the same name, table aliases, and nested queries. If you use result set parsing, you need to map the result set to the actual query column to identify whether a column needs to be masked. The more complex the query is, the more difficult the identification is, causing lower matching accuracy. In contrast, if you use statement rewriting, you can accurately nest masking functions for columns involved in complex queries. + + Based on the preceding analysis, data masking based on statement rewriting is a desirable solution in terms of both performance and accuracy. By using statement rewriting, openGauss identifies the target node of the query tree based on the user-defined masking policy after the query tree is obtained through query parsing. In addition, openGauss rewrites the node to be masked, constructs the masking query tree, and sends the query tree to the database kernel for execution. Then, the masked data is returned. + + +## 2 Dynamic Data Masking Solution of openGauss + +In the industry, the dynamic data masking function is usually loaded as a middleware plug-in or a data masking system. It is used to mask data by intercepting commands or result sets between the database on the client and that on the server. openGauss has the built-in dynamic data masking feature, so that the database can mask data without using external plug-ins, effectively reducing the risk of sensitive data leakage during data transmission. + +openGauss has defined a complete built-in security policy model from version 1.1.0. Based on the model, users can define resource labels to identify sensitive data and define related security policy mechanisms for different resource label types and content. Dynamic data masking is one of the security policy mechanisms. + +- 2.1 Built-in Security Policy + + The built-in security policy model identifies and protects user behaviors by configuring a series of security policies, providing the capabilities of protecting sensitive user data. + + Resource labels are the basis of the security policy model. It is a collection of database resources in essence. To manage database resources in a unified manner, a data manager can add multiple database resources to the same resource label and configure policies for the resource label to manage database resources in batches. + + For example, if multiple data tables contain sensitive data columns such as **creditcard** which indicates a bank card number, these columns can be classified into the **creditcard\_label** resource label in a unified manner, and then the administrator can configure data masking policies for the **creditcard\_label** resource label to implement batch configurations for all related sensitive columns. + + Dynamic data masking is a security policy supported by the security policy model. After identifying sensitive data in user tables \(sensitive data discovery and identification are not within this scope\), the data controller configures data masking policies for resource labels that contain sensitive columns and restrict users' data access and information extraction behaviors based on different application scenarios to protect sensitive data. + + In general, resource labels are used to classify database resources and put these resources into various security policies for management. The dynamic data masking feature uses resource labels to identify sensitive data and matches masking policies to mask sensitive data. + +- 2.2 Core Idea of Dynamic Data Masking + + The dynamic data masking feature of openGauss is deployed together with the database as a built-in security plug-in. No additional adaptation is required for services. The security policy module of openGauss is responsible for parsing SQL statements and matching masking policies, and the service takes effect after masking policies are configured. + + **Configuring Masking Policies** + + The configuration of a masking policy involves masking functions, resource labels, and masking filters. + + - Masking functions indicate the methods used by a masking policy to mask target columns. Currently, openGauss provides seven masking functions, namely, **creditcardmasking**, **basicemailmasking**, **fullemailmasking**, **alldigitsmasking**, **shufflemasking**, **randommasking**, and **maskall**. They are applicable to different masking scenarios. + - Resource labels are a set of labels on which a masking policy takes effect. If a target column in a query exists in a label, sensitive data of the column will be masked based on the masking policy. Please note that the dynamic data masking feature of openGauss can mask labels that contain only data columns. + - Masking filters specify the user scenarios where a masking policy takes effect and mainly involve usernames, login clients, and IP addresses of users. The data masking policy takes effect only when a query user meets the threshold specified by a masking filter. + + The following example shows how to create a dynamic data masking policy. + + **Data Preparation** + + Check whether the built-in security policy is enabled. + + ![](../figures/zh-cn_image_0000001206967370.png) + + Prepare two tables containing the sensitive columns **creditcard** and **customername**. + + ![](../figures/111.png) + + **Policy Configuration** + + Log in to the database as a policy administrator \(with the **poladmin** permission\) and add the sensitive columns in the two data tables to the resource labels **creditcard\_label** and **customer\_label** for management. + + ![](../figures/zh-cn_image_0000001252127325.png) + + Create the following two masking policies: + + - **mask\_card\_pol**: Columns in the **creditcard\_label** label are masked by using the **creditcardmasking** function only when the **user1** user uses gsql to access the tables using the IP address 10.11.12.13. + - **mask\_name\_pol**: By default, columns in the **customer\_label** label are masked by using the **maskall** function for all query users. + + ![](../figures/zh-cn_image_0000001206807380.png) + + **Triggering Data Masking Policies** + + When the system receives a query command, **security\_plugin** intercepts the query tree generated by the semantic analysis in the parser and selects the masking policy that is applicable to the user scenario based on the user login information \(username, client, and IP address\). The masking policy is configured based on resource labels \(containing only table columns\). Therefore, you need to identify whether the target node of the query tree belongs to a resource label, match the identified resource label with the masking policy, and rewrite the target node of the query tree based on the policy content. Then, the query tree is returned to the parser. + + Due to the built-in masking function of the query tree in the **security\_plugin** module, data visitors are unaware of the process of rewriting the query tree by using the built-in security policy. They access data in the same way as executing a common query with data privacy protected. + + **Figure 2-1** Dynamic data masking architecture of openGauss + + ![](../figures/112.png) + + Based on the cases described in section "Configuring Masking Policies", you can query the data table to trigger the masking policy. + + **Trigger the data masking policy.** + + If the **user1** user uses gsql to log in to the database and query sensitive data in compliance with the **mask\_card\_pol** policy, the system returns the masked data. However, the **user2** user does not comply with the policy. Therefore, the data queried by this user is not masked. + + ![](../figures/113.png) + + When the **user1** user or the **user2** user queries the **order** table, the **mask\_name\_pol** masking policy is triggered. Therefore, the **customername** column is masked. + + ![](../figures/zh-cn_image_0000001251847329.png) + + +## 3 Advantages of openGauss Dynamic Data Masking + +The dynamic data masking feature of openGauss focuses on identifying users who access data. Masking filters are configured based on a specified user, client tool, and login IP address. The policy administrator can flexibly formulate different masking policies based on different services and user scenarios and is granted with different levels of sensitive data access capabilities to adapt to various complex production environments. For example, in the finance and healthcare industries, counter service personnel can view only some information about ID cards and bank cards, while O&M administrators can query and maintain all user information. On the premise of ensuring the diversity of masking scenarios, the system performs a strict mutual exclusion check when a masking filter is specified. This prevents ambiguity in selecting policies when a user complies with multiple masking filters at the same time. + +**Figure 3-1** Data masking of openGauss based on filter criteria + +![](../figures/114.png) + +The dynamic data masking feature of openGauss focuses more on batch management of database resources. In the security policy model, database resources to be managed and controlled are classified into labels. Operations on labels are operations on a specified cluster of resources, which greatly simplifies the management process and improves management efficiency. The dynamic data masking feature of other databases is based on a single column or table. The masking policy corresponds to the database resource. Even if you want to use the same masking function, you need to configure multiple masking policies for different database resources, which increases the policy configuration cost as well as the difficulty in subsequent O&M and batch resource policy management. Therefore, allocating database resources to be managed in batches to resource labels is the basis and one of the advantages of the dynamic data masking feature of openGauss. + +**Figure 3-2** Batch policy configuration for openGauss resource labels + +![](../figures/115.png) + +The openGauss kernel is equipped with dynamic data masking, which ensures the security of data transmission paths to some extent. However, external plug-ins may be bypassed. After an external plug-in rewrites the SQL statements sent from the client or the result set returned by the server, attackers can bypass the plug-in to directly send SQL statements to the database or intercept the source data result set returned by the database. As a result, the masking plug-in becomes invalid. Therefore, compared with masking by using external plug-ins, the dynamic data masking feature of openGauss can reduce the risk of sensitive data leakage on the transmission path to some extent. + +Combined with the production environment, the purpose of dynamic data masking is to mask sensitive data in the result set. The service side provides query interfaces, and then the interfaces trigger data masking. To ensure the security of sensitive data, openGauss is adapted to masking policies in most scenarios, including addition, deletion, and modification operations with returned values, MERGE INTO statements, common table expressions \(CTEs\), and subqueries. In this way, the interfaces for the service side to perform operations on sensitive data are enriched, instead of providing only data query interfaces. + +To improve usability, openGauss provides a set of simple policy configuration syntaxes, covering the addition, deletion, and modification of resource labels and masking policies. You can use the definition syntax to easily configure masking policies, simplifying the operation process for administrators. + +## 4 Prospect of openGauss Dynamic Data Masking + +The dynamic data masking feature of openGauss provides a simple and flexible policy configuration solution to prevent user privacy data from being disclosed to some extent. It is an indispensable part of the multi-layer security defense architecture of openGauss. + +In the future, the dynamic data masking feature of openGauss will provide more flexible policy configuration methods, such as user-defined function \(UDF\) masking and conditional masking, to support more flexible and rich privacy protection scenarios. + diff --git a/content/en/post/2022/Everything-You-Want-to-Know-About-the-openGauss-Ledger-Database.md b/content/en/post/2022/Everything-You-Want-to-Know-About-the-openGauss-Ledger-Database.md new file mode 100644 index 0000000000000000000000000000000000000000..b2b84e9b168e25a1e63413b7082e6d4b8c296509 --- /dev/null +++ b/content/en/post/2022/Everything-You-Want-to-Know-About-the-openGauss-Ledger-Database.md @@ -0,0 +1,133 @@ ++++ + +title = "Everything You Want to Know About the openGauss Ledger Database" + +date = "2021-10-22" + +tags = [ "Everything You Want to Know About the openGauss Ledger Databases"] + +archives = "2021-10" + +author = "Rui He" + +summary = "Everything You Want to Know About the openGauss Ledger Database" + +img = "/en/post/2022/title/img16.png" + +times = "12:30" + ++++ + +# Everything You Want to Know About the openGauss Ledger Database + +## 1 What Is a Ledger Database? + +Coins such as bitcoin, ethereum, and dogecoin, as synonyms of the blockchain, not only affect the profit and loss of some people's accounts, but also affect the prices of graphics cards and hard disks. However, as database-related technical personnel or enthusiasts, we are more concerned about the core technology. + +As a distributed ledger technology, blockchain overcomes the disadvantages of traditional centralized ledgers, such as low storage efficiency, low reliability, and vulnerability to single-point attacks, and technically ensures that the blockchain features distributed sharing, multi-party consensus, tamper-proof, and traceability. + +Then, can we use blockchains to replace databases? The answer is no. The blockchain has many disadvantages, such as low transaction performance and inconvenient query. The bitcoin system can process seven transactions per second. If it is used to process major bank transactions, the efficiency will be very low. The openGauss database features high efficiency, high reliability, and high security. We can start from openGauss and integrate some blockchain technologies such as cryptography tamper-proof and multi-party consensus, to improve the tamper-proo and traceability capabilities of the database. The idea of the ledger database comes into being. + +A blockchain is usually divided into seven layers in terms of the architecture model: application layer, query layer, contract layer, actuator layer, consensus layer, network layer, and data layer. The following figure shows the technical points of each layer. + +![](../figures/311.png) + +Figure 1 Blockchain infrastructure model + +The database absorbs the tamper-proof capability of the blockchain. Naturally, the idea is to start from the bottom layer of the blockchain technology. At the data layer, the database provides the capability of recording data verification information and verifying data tampering. This ensures that the database can faithfully record data changes caused by each transaction when processing sensitive information, forming a faithful and complete data change ledger. The openGauss ledger database that we will introduce this time is to record data change operations when data is modified in the openGauss kernel, ensuring that the entire data link can be queried and traced. In addition, an efficient tampering check API is provided for the upper-layer application system or multiple parties to verify data consistency. In the next chapter, we will introduce the implementation principle of the ledger database and the reconstruction of the openGauss. + +## 2 Principles of the openGauss Ledger Database + +![](../figures/312.png) + +Figure 2 New modules in the ledger database + +When a client sends an SQL statement to modify data in the database, the communication module receives the SQL statement, the parsing module processes the SQL statement, converts the SQL statement into a parsing tree, and then optimizes the parsing tree to generate an execution plan. After obtaining the execution plan, the execution module calls the storage layer API to modify the data. As shown in the preceding figure, the modification verification information is recorded during data modification. In addition, the modification verification module is provided for users to call APIs to perform verification. Tampering information recording and checking are based on the tampering check information designed for database addition, deletion, and modification. The following describes the new tampering check information. + +- 2.1 Tamper-proof User Table + + ![](../figures/zh-cn_image_0000001207772870.png) + + Figure 3 Structure of the tamper-proof user table + + In the ledger database feature, schema-level isolation of tamper-proof tables from common tables is adopted. Tables in a tamper-proof schema have verification information and record each data change operation \(add, delete, and modify\). These tables are called tamper-proof tables. A table in a common schema is called a common table. + + The tamper-proof table has a structure shown in Figure 3. When a tamper-proof table is created, the system adds a hash column. When data is inserted or modified in this column, the data digest is calculated in real time. Data and abstracts are stored in the same tuple and are inseparable. Based on the unidirectionality of the hash function, the digest of each row is used as the logical representation of the data in the digest space. + +- 2.2 User History Table + + ![](../figures/zh-cn_image_0000001252412855.png) + + Figure 4 Structure of the user history table + + As shown in the preceding figure, the user history table contains four columns: xid, hash\_ins, hash\_del, and pre\_hash. Each row in the user history table corresponds to each row-level data change in the user table. **xid** records the XID when data is changed, indicating the logical time sequence of operations. **hash\_ins** records the hash values of data rows inserted using INSERT or UPDATE. **hash\_del** records the hash values of data rows deleted using DELETE or UPDATE. In addition, whether **hash\_ins** and **hash\_del** are empty indicates the INSERT, DELETE, and UPDATE operations. The following table lists the mapping relationship. + + + + + + + + + + + + + + + + + + + + +
  

hash_ins

+

hash_del

+

Insert

+

√ (Insert data hash.)

+

--

+

Delete

+

--

+

√ (Delete data hash.)

+

Update

+

√ (New data hash)

+

√ (Delete previous data hash)

+
+ + **pre\_hash** combines the data of the current row and the pre\_hash data of the previous row in the history table to generate the data summary of the current user's history table. The calculation formula is as follows: + + ![](../figures/zh-cn_image_0000001252700965.gif) + + **i** indicates the _i_th row in the user history table, and **rowdata\_i** indicates the data concatenated by **xid || hash\_ins || hash\_del** in the _i_th row.![](figures/zh-cn_image_0000001252341007.gif) + + When verifying the integrity of a user history table, the system uses the row data to calculate the **pre\_hash** value in sequence and compares it with the **pre\_hash** value in the table. If the data is inconsistent, the integrity of the user history table is damaged. + +- 2.3 Structure of the Global Blockchain Table + + ![](../figures/313.png) + + Figure 5 Structure of the global blockchain table + + The preceding figure shows the structure of the global blockchain table. Each row in the table corresponds to a tamper-proof table modification behavior and is saved as a block. The global blockchain table mainly includes three parts: The block information mainly stores mark information related to a block, including a block number and a time stamp. The operation information includes information about an operation performed by a user on a tamper-proof data table, including identification information such as a database name, a username, and a table name, and a corresponding SQL statement. The verification information stores the hash information used for consistency or integrity verification, including the table-level hash \(rel\_hash\) and global hash \(global\_hash\). + +- 2.4 Tampering Check Algorithm + + ![](../figures/314.png) + + Figure 6 Generation of tamper-proof user table verification information + + When a user calls the tampering check API, the system can concurrently use the tamper-proof user table to generate table-level verification information and use the records in the history table corresponding to the user table to generate the overall verification information of change records. Then, compare the two pieces of verification information to determine whether the data is consistent with the operation. If they are inconsistent, data modification bypasses the system records, that is, tampering. + + A process of generating table-level verification by using row-level verification information in a tamper-proof user table is shown in Figure 6. During verification, the system scans the data in the table, obtains the verification information of each row, and uses the row verification information to verify the row data. In a process of scanning the overall row calibration information, overall verification information of currently scanned data may be continuously generated by using a built-in exchangeable verification information aggregation algorithm. Because of the interchangeability of the information aggregation algorithm, this process can be completely executed in parallel. + + Figure 7 shows the overall verification information about the change records generated in the user history table. According to the structure of the user history table, the non-null elements in the **hash\_ins** column indicate the increase of data verification information caused by all operations, and the non-null elements in the **hash\_del** column indicate the decrease of verification data. A set of remaining check information is obtained by performing a difference set on two columns of elements. Then, the exchangeable verification information aggregation algorithm is used to obtain the overall verification information of the change records caused by the record operation in the user history table. In this process, due to the interchangeability of the aggregation algorithm, **hash\_ins – hash\_del** may be performed on each row first, and then information is continuously stacked and generated during scanning. Herein, generation of the overall verification information of the change records may also be completely parallel. + + ![](../figures/315.png) + + Figure 7 Generating the verification information of the user history table + + +## 3 Development Prospect of the openGauss Ledger Database + +The ledger database is the basis of the openGauss tamper-proof data. Currently, only the verification information in the database can be recorded and the high-performance verification API is provided. It provides some functions of the storage layer in the blockchain technology. To implement the tamper-proof function, we need to add high-performance remote execution capabilities between multiple databases and provide pluggable high-performance multi-party consensus protocols. In this way, the tamper-proof capability of openGauss is complete and trusted by multiple parties. In the convergence of databases and blockchains, openGauss will continuously evolve to provide more easy-to-use and efficient tamper-proof databases. + diff --git a/content/en/post/2022/Full-encryption-Upgrade-and-Unaware-Encryption-Decryption-Principle-Analysis.md b/content/en/post/2022/Full-encryption-Upgrade-and-Unaware-Encryption-Decryption-Principle-Analysis.md new file mode 100644 index 0000000000000000000000000000000000000000..32e21f38084479bf96ae6eb41de179e6cb0257e8 --- /dev/null +++ b/content/en/post/2022/Full-encryption-Upgrade-and-Unaware-Encryption-Decryption-Principle-Analysis.md @@ -0,0 +1,107 @@ ++++ + +title = "Full-encryption Upgrade and Unaware Encryption/Decryption Principle Analysis" + +date = "2021-10-13" + +tags = [ "Full-encryption Upgrade and Unaware Encryption/Decryption Principle Analysis"] + +archives = "2021-10" + +author = "Jinxiang Xiao" + +summary = "Full-encryption Upgrade and Unaware Encryption/Decryption Principle Analysis" + +img = "/en/post/2022/title/img10.png" + +times = "12:30" + ++++ + +# Full-encryption Upgrade and Unaware Encryption/Decryption Principle Analysis + +To implement encryption and decryption on the client, a large number of maintenance and management operations need to be performed on the client, including data key management, sensitive data encryption, and SQL statement parsing and modification. openGauss encapsulates these complex operations in the client encryption driver to implement automatic encryption and replacement of sensitive information. In addition, all encryption-related metadata is stored in the database so that the database can identify and process encrypted data. In addition, parameters related to sensitive information in SQL statements are encrypted to ensure that query tasks do not disclose users' query intents, reduce complex security management and operation difficulties on the client, and ensure that users are unaware of application development. In addition, the openGauss provides a series of configuration APIs to meet users' requirements for encrypted fields, encryption algorithms, and secure key storage. The transparency of the openGauss fully-encrypted database makes task migration very convenient for users. + +The most secure protection for data confidentiality and personal privacy is encryption. The full-encryption technology can encrypt and decrypt data in specific applications and process data in the encrypted state in the database to implement full-lifecycle data protection. However, mainstream application encryption and decryption technologies in the industry generally involve a large number of operations such as key management, algorithm selection, SQL statement change, and data type conversion. Therefore, when data needs to be encrypted, a large amount of adaptation and migration work is required, and risks may be caused due to human negligence. + +The core of openGauss full-encryption is to parse all input and output statements of users on the client, identify defined sensitive data, and perform automatic encryption and decryption. The whole process of using a fully-encrypted database is as follows: A user inputs the syntax. A client sends the input to a server. The server executes and returns the result to the client. During the process, the only two steps that the user can perceive are inputting syntax and obtaining the result. The technical core of the fully-encrypted database is divided into several modules such as key management, parsing layer, encryption and decryption driver, implicit conversion layer, and data cache. Figure 1 shows the architecture of the fully-encrypted database. The following describes the modules related to user perception. + +![](../figures/28.png) + +Figure 1 Fully-encrypted database architecture + +## Automatic Syntax Parsing + +In the openGauss fully-encrypted database, a lightweight parser is added to the client. The lightweight parser reuses the original parser on the server. After a user enters the syntax, such as INSERT and SELECT statements, the client parser parses the lexicon and syntax to obtain the plaintext value and its location. The encryption and decryption driver automatically replaces the plaintext with the encrypted ciphertext and sends the query statement to the server. In this case, the data transmitted over the network and stored in the database is encrypted. After the server returns the ciphertext execution result to the client, the client encryption and decryption driver automatically decrypts the returned ciphertext data and returns it to the user. The only two steps that the user can perceive are inputting the syntax and obtaining the result. The user is not aware of the entire encryption process. In addition, the syntax is the same as that for non-encrypted data. + +In the upgraded openGauss fully-encrypted database, the client parses the syntax in functions and provides an API for decrypting the record data returned by functions. When functions are created, after the syntax of function bodies is parsed at the parsing layer, values to be encrypted in the function bodies are encrypted in the function processors by using the encryption driver. When functions are executed, after the syntax is parsed at the parsing layer, the syntax enters different processors according to the called functions, and parameters are encrypted by using an encryption driver. Users are unaware of the entire parsing and encryption process. The entire process is fully automated, and users do not need to perform other operations. + +![](../figures/282.png) + +Figure 2 Creating a function/procedure by using a function or stored procedure in an encrypted equality query + +![](../figures/283.png) + +Figure 3 Executing a function/procedure by using a function or stored procedure in an encrypted equality query + +## Implicit Data Conversion + +The encrypted columns and the original data types of encrypted columns are stored in the database. The data type displayed to users is the original data type. Although the ciphertext stored in the database is in binary format, users are unaware of data encryption, decryption, and type conversion. The openGauss fully-encrypted database checks whether the returned data is encrypted. If yes, the database calls the encryption driver to decrypt the data based on the original data type. + +In the fully-encrypted openGauss database after the upgrade, when a user creates an encrypted function, the server verifies the parameter type in the function parsing module. If the data is encrypted or the column is encrypted, the server converts the input, output, and returned parameters in the optimization module. The server converts the parameter type of the function to the encrypted column type \(binary type\) and saves the original data type of the function parameter in the database. Users are unaware of the parsing, encryption, and implicit data type conversion in the entire process. They do not even need to modify the function syntax. + +![](../figures/284.png) + +Figure 4 New modules for supporting functions in a fully-encrypted database + +## Data Caching + +Performance has always been a challenge for fully-encrypted databases. In the openGauss fully-encrypted database, the main performance loss of the database lies in obtaining keys, encrypting and decrypting keys, obtaining encrypted column information, and encrypting and decrypting data. If the waiting time is too long during syntax execution, user experience deteriorates. Therefore, the data cache module is used to cache the client key information and encrypted column information on the client. When a user initializes a connection, the client master key path, column encryption key ciphertext, and encrypted column information is automatically obtained from the database. When the user uses the client for the first time, the key is automatically cached. After the user is disconnected, the key and encrypted column information is automatically destroyed. Data is cached to improve query performance without affecting user experience. + +## JDBC API + +Considering the migration of tasks between different databases, more users prefer unified access APIs, such as JDBC. In earlier versions, openGauss only allows gsql to use a fully-encrypted database. JDBC is supported in the upgraded openGauss fully-encrypted database. The JDBC client reuses the original encryption driver through the Java native interface \(JNI\). The encryption driver is reconstructed so that it can obtain data from the server through a set of interfaces compatible with libpq/JDBC. The encryption and decryption process when users call JDBC is the same as that of gsql. Users are unaware of data encryption and decryption in the encryption driver during query. + +![](../figures/285.png) + +Figure 5 New JDBC modules supported by the encrypted equality query + +## State Cryptography Administration \(SCA\) Algorithms + +Chinese cryptographic algorithms are Chinese algorithms issued by the State Cryptography Administration Office of Security Commercial Code Administration \(OSCCA\). Common algorithms include SM1, SM2, SM3, and SM4. SM1 is a symmetric encryption algorithm and is not open to the public or supported. SM2 is an asymmetric encryption algorithm based on ECC. SM3 is a message digest algorithm. SM4 is a standard packet data algorithm for WLANs, that is, symmetric encryption. Chinese cryptographic algorithms are used in many user scenarios in China. To provide users with unaware migration and expand the application scenarios of fully-encrypted databases, the master key \(CMK\) on the upgraded openGauss client supports the asymmetric encryption algorithm SM2 when encrypting a column encryption key \(CEK\). When the CEK is used to encrypt or decrypt user data, the symmetric encryption algorithms SM4 and SM3 are used for integrity check. Currently, the openGauss fully-encrypted database supports the algorithms listed in Table 1. + + + + + + + + + + + + + + + + + + + +

Fully-encrypted Database

+

KeyStore

+

Key Encryption Algorithm

+

Data Encryption Algorithm

+

openGauss

+

localkms

+

RSA_2048

+

AEAD_AES_256_CBC_HMAC_SHA_256

+

AEAD_AES_128_CBC_HMAC_SHA_256

+

SM2

+

SM4_SM3

+
+ +Table 1 Encryption algorithms supported by a fully-encrypted database + +Currently, the openGauss fully-encrypted database supports only the encrypted equality query. It provides a pure software solution in the full-encryption technology and has advantages of high security and high performance. In the future, more scenario capabilities will be opened, such as range query and fuzzy query. In addition, the TEE software and hardware integration solution will be used to form complete encrypted query and computing capabilities. In the fully-encrypted database field, openGauss will continuously evolve to provide more secure, easy-to-use, and efficient fully-encrypted databases. + diff --git a/content/en/post/2022/Guide-to-Adapting-HAProxy-to-openGauss.md b/content/en/post/2022/Guide-to-Adapting-HAProxy-to-openGauss.md new file mode 100644 index 0000000000000000000000000000000000000000..e47ab2c6201fc19f5c61d35727e8010f6459a52a --- /dev/null +++ b/content/en/post/2022/Guide-to-Adapting-HAProxy-to-openGauss.md @@ -0,0 +1,160 @@ ++++ + +title = "Guide to Adapting HAProxy to openGauss" + +date = "2021-09-18" + +tags = [ "Guide to Adapting HAProxy to openGauss"] + +archives = "2021-09" + +author = "Xin Dou" + +summary = "Guide to Adapting HAProxy to openGauss" + +img = "/en/post/2022/title/img5.png" + +times = "12:30" + ++++ + +# Guide to Adapting HAProxy to openGauss + +## 1. Introduction to HAProxy + +HAProxy is an open-source project and its code is hosted on GitHub. + +Code link: https://github.com/haproxy/haproxy + +HAProxy is a free, quick and reliable proxy, which provides HA, load balancing, and TCP- and HTTP-based proxy services. It supports virtual hosts. + +HAProxy implements an event-driven, single-process model that supports a large number of concurrent connections. + +## 2. Read/Write Isolation and Load Balancing Based on HAProxy + +HAProxy implements read/write isolation and load balancing for the openGauss cluster. The prerequisite is that Patroni manages the openGauss database cluster. The key is the configuration file. + +The HAProxy configuration consists of five parts: + +- **global**: sets global configuration parameters, which are related to processes and the operating system. + +- **defaults**: sets default parameters. These parameters can be used by the frontend, backend, and listen components. + +- **frontend**: frontend virtual node that receives requests. The frontend can specify the backend to be used based on the ACL rule. + +- **backend**: backend service cluster, which is a real server. One backend corresponds to one or more entity servers. + +- **listen**: combination of the frontend and backend. + +In the HAProxy configuration file, two listen modules are defined: **opengauss** and **opengauss\_balance**, which correspond to the write operations on the primary node and the read operations and load balancing on the standby node, respectively. In the listen modules, you can use the **server** keyword to set the backend server, that is, set the IP address and port number of each database node in the openGauss cluster managed by Patroni. Then, the database node information can be added to the HAProxy management. + +- 2.1 Write Configuration for the Primary Node + + ``` + listen opengauss # Used for monitoring the primary node. + bind *:5000 # One of the open ports, used to connect to the primary node. + option httpchk + # Enable health check for backend servers, supporting health monitoring [check]. + http-check expect status 200 + default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions + # Monitoring interval [inter 3s], number of monitoring failures after which the backend server is considered unavailable [fall 3], number of monitoring successes after which the backend server is considered available [rise 2]; when the server is marked as down, disable the connection between HAProxy and the backend server [on-marked-down shutdown-sessions]. + server opengauss_ip1_port1 ip1:port1 maxconn 100 check port 8008 + server opengauss_ip2_port2 ip2:port2 maxconn 100 check port 8008 + server opengauss_ip3_port3 ip3:port3 maxconn 100 check port 8008 + server opengauss_ip4_port4 ip4:port4 maxconn 100 check port 8008 + # Use the server keyword to set the backend server, which is the internal name [opengauss_i] of the backend server. + ``` + + **Theoretical analysis:** + + HAProxy invokes the health monitoring representational state transfer \(REST\) application programming interface \(API\) endpoint to obtain information about the primary and standby nodes in the cluster through Patroni. + + Patroni has a rich set of REST APIs, which are the best practice of separating the frontend from the backend and are a set of development standards or specifications. Their features are summarized as follows: + + - \(1\) Each uniform resource identifier \(URI\) represents a resource. + - \(2\) A presentation layer exists between the client and the server for transferring resources. + - \(3\) The client uses four HTTP verbs to perform operations on server resources to implement REST. + + In the HTTP protocol, four verbs indicating operation modes are GET, POST, PUT, and DELETE, which correspond to four basic operations: GET is used to obtain resources, POST is used to create or update resources, PUT is used to update resources, and DELETE is used to delete resources. + + REST APIs in Patroni are used in the following scenarios: + + Reference: [https://patroni.readthedocs.io/en/latest/rest\_api.html](https://patroni.readthedocs.io/en/latest/rest_api.html) + + - \(1\) Used by Patroni for leader election. + - \(2\) Used by the patronictl tool to perform failover, switchover, reinitialization, restart, and reloading. + - \(3\) Used by the HAProxy or other load balancers to perform HTTP health check or monitoring. + + In this document, HAProxy uses the Patroni REST API to monitor the health status of the primary node, standby node, and other nodes in the cluster. + + For GET requests in health monitoring, Patroni returns a JSON document containing the node status and HTTP status code. If no complex JSON document is required and only some key information is retained, OPTIONS can be used to replace GET. + + For the following requests, when the Patroni node has the leader lock and is running as the primary node, the Patroni REST API returns the HTTP status code 200. + + _\(1\) GET /_ + + _\(2\) GET /master_ + + _\(3\) GET /primary_ + + _\(4\) GET /read-write_ + + In the preceding configuration, **option httpchk** is equivalent to invoking the GET/ request, and **http-check expect status 200** is equivalent to filtering out nodes returning status code 200 during health monitoring. When a database is configured as the primary node, and it returns status code 200 in response to the preceding configuration, it is selected as the primary node. In this way, the IP address of HAProxy and port 5000 can be used as the proxy of the primary node in the cluster. In the openGauss cluster, you can run the **gsql** command to connect to the primary node in the cluster. + + ``` + gsql -d postgres -h HAProxy_ip -p 5000 -U user -W password + ``` + +- 2.2 Read and Load Balancing Configuration for the Standby Node + + ``` + listen opengauss_balance # Used for monitoring the standby node. + bind *:5001 # One of the open ports, used to connect to the standby node. + mode tcp + option tcplog + balance roundrobin # balance defines the load balancing algorithm. roundrobin indicates that polling is performed based on weights. This is the most balanced and fair algorithm when the processing time of servers is evenly distributed. This algorithm is dynamic, which means that a weight can be adjusted at run time. + option httpchk OPTIONS /replica + http-check expect status 200 + default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions + server opengauss_ip1_port1 ip1:port1 maxconn 100 check port 8008 inter 5000 rise 2 fall 2 + server opengauss_ip2_port2 ip2:port2 maxconn 100 check port 8008 inter 5000 rise 2 fall 2 + server opengauss_ip3_port3 ip3:port3 maxconn 100 check port 8008 inter 5000 rise 2 fall 2 + server opengauss_ip4_port4 ip4:port4 maxconn 100 check port 8008 inter 5000 rise 2 fall 2 + ``` + + **Theoretical analysis:** + + **For the GET/replica request, when the Patroni node is in the running state, the role is replica, and the noloadbalance tag is not set, the HTTP returns status code 200.** + + **In the preceding configuration, option httpchk OPTIONS/replica invokes the OPTIONS/replica request and replaces GET with OPTIONS to simplify the returned information; http-check expect status 200 is equivalent to filtering out nodes returning status code 200 during health monitoring. When a database is configured as a standby node in the cluster and it returns status code 200 in response to the preceding configuration, it is selected as a standby node. balance roundrobin configures the load balancing algorithm, so that read requests are sent to each running standby node in polling mode. In this way, the IP address of HAProxy and port 5001 can be used as the proxy of the standby nodes in the cluster to implement load balancing.** + + In the openGauss cluster, you can run the **gsql** command to connect to a standby node in the cluster. + + ``` + gsql -d postgres -h HAProxy_ip -p 5001 -U user -W password + ``` + +- 2.3 Monitoring Page + + In addition, an HAProxy monitoring page is configured. You can access this page to view the status of each node in the cluster. + + ``` + listen stats # Define a part named stats. + mode http + # Set the mode to HTTP. + bind *:7000 # One of the open ports, used for monitoring. + # Define the listening socket. + stats enable + # stats is the socket of a statistics page of HAProxy. + stats uri / + # Set the URI of the statistics page to /. + ``` + + In the preceding configuration, you can access http://ip:7000/ to view the monitoring page. _ip_ indicates the IP address of the host where HAProxy is deployed. The following figure shows the page information. + + ![](../figures/zh-cn_image_0000001252065761.gif) + + In the preceding figure, the cluster consists of one primary node and three standby nodes. The first module **opengauss** corresponds to the write operation and the green column indicates the primary node in the cluster. The second module **opengauss\_balance** corresponds to the read operation, and the green columns indicate the standby nodes in the cluster. + + +In this way, HAProxy implements read/write isolation and load balancing for the openGauss cluster through Patroni. diff --git a/content/en/post/2022/Internal-Mechanism-of-the-openGauss-DB4AI-Framework.md b/content/en/post/2022/Internal-Mechanism-of-the-openGauss-DB4AI-Framework.md new file mode 100644 index 0000000000000000000000000000000000000000..4d50de71f7b575f785da35f9500ba6f1991327f3 --- /dev/null +++ b/content/en/post/2022/Internal-Mechanism-of-the-openGauss-DB4AI-Framework.md @@ -0,0 +1,100 @@ ++++ + +title = "Internal Mechanism of the openGauss DB4AI Framework" + +date = "2021-09-26" + +tags = [ "Internal Mechanism of the openGauss DB4AI Framework"] + +archives = "2021-09" + +author = "Wen Nie" + +summary = "Internal Mechanism of the openGauss DB4AI Framework" + +img = "/en/post/2022/title/img6.png" + +times = "12:30" + ++++ + +# Internal Mechanism of the openGauss DB4AI Framework + + + +## 1. Features of the openGauss AI Framework + +In the DB4AI direction, the database integrates AI capabilities to avoid problems caused by data migration during AI computing. Different from other DB4AI frameworks, the open-source inherent framework of openGauss completes AI computing in the database by adding AI operators. + +In addition to avoiding the problems caused by data migration, the AI framework of openGauss has the following advantages: + +- 1\) Extremely low learning threshold + + Currently, most mainstream computing frameworks, such as TensorFlow, PyTorch, and Keras, rely on Python as the script language. Although it is easy to learn Python, it still requires a certain learning cost. The current framework provides the CREATE MODEL and PREDICT BY syntax to complete AI training and inference tasks. Compared with Python, this syntax is more similar to the natural language and complies with people's intuition. + + ``` + CREATE MODEL point_kmeans USING kmeans FEATURES position FROM kmeans_2d WITH num_centroids=3; + SELECT id, PREDICT BY point_kmeans (FEATURES position) as pos FROM (select * from kmeans_2d_test limit 10); + ``` + +- 2\) Simplified data version management + + The snapshot function is added to the DB4AI feature. The database uses snapshots to fix data in a dataset at a specific time point. It can also save processed and filtered data. Data can be saved in full or incremental mode. In incremental mode, only data changes are stored each time. Therefore, the space occupied by snapshots is greatly reduced. You can directly obtain the corresponding data by using the snapshots of different versions. + +- 3\) Excellent performance experience + + Compared with many AIinDB projects, openGauss embeds model computing into the database by adding AI operators. Taking algorithm training as an example, data reading, model calculation and update, and final model storage are completed in an executor of the database. In this way, the computing capability of the database is fully utilized and released. The technical roadmap deep into the kernel makes our features faster than other higher-level invocation methods. + + ![](../figures/24.png) + + +Figure 1 Performance comparison with MADlib + +## Technical Principles and Advantages + +- 1\) DB4AI-Snapshot + + ![](../figures/zh-cn_image_0000001207516746.png) + + The DB4AI.snapshot feature requires users to specify the data to be filled in the SQL query statement for operating data storage, so as to create a snapshot. The initial snapshot is always created as a real and reusable copy of the operated data, making the specific state of the data immutable. Therefore, the initial snapshot serves as the starting point for subsequent data collation, but it always allows backtracking to the exact state of the original data at the time when the initial snapshot was created. + + Because a created snapshot cannot be changed, you must prepare the snapshot before starting data collation. The prepared snapshot data can be modified collaboratively to prepare for model training, especially for data management. In addition, snapshots automatically track all changes by recording each operation as metadata in the DB4AI system directory, providing a complete integration history for the data. + + After the snapshot is prepared, you can publish it. Published snapshots are immutable, and the DB4AI system specifies that only published snapshots can be used for model training. This ensures data consistency among training tasks. + + Outdated snapshots are archived for data backup. In this state, the data remains unchanged but cannot be used to train a new model. At last, clear the snapshot, delete the data tables and views in the schema, and restore the storage space. It is important to note that, for the purpose of strict model source management, dependent snapshots cannot be deleted. + + By using GUC parameters, snapshots can be stored in materialized view mode or incremental mode. In incremental mode, the view and data table corresponding to a new snapshot store only the modified content compared with the parent snapshot, which greatly reduces the storage space. + +- 2\) DB4AI-Query + + The inherent AI framework is deeply embedded in the database kernel. It builds an execution plan that contains AI operators through query optimization and query execution. After the computing is complete, the storage module of the framework saves the model information. The AI framework is divided into three parts: query optimization, computing execution, and model storage. + + - **Query optimization:** + + The lexical rules and syntax rules CREATE MODEL and PREDICT BY are added to the framework as the AI computing entry. During query optimization, this module is responsible for simple input verification, including the validity of attribute names, whether algorithms are supported, and whether model names conflict with each other. After the verification is complete, this module generates a query plan based on the training and inference tasks. + + - **Computing execution:** + + The query execution module adds corresponding AI operators to the execution plan based on the required algorithm type and executes computing, including data reading and model calculation and update. Algorithms are highly cohesive and loosely coupled, have good algorithm scalability, and are friendly for developers to add algorithms. + + - Model storage: + + After the model training is complete, the executor transfers the model data in the form of tuples to the storage module and saves the model to the gs\_model\_warehouse system catalog. + + ![](../figures/241.png) + + The following uses CREATE MODEL as an example to describe how to implement the query statement used for model training. + + ![](../figures/zh-cn_image_0000001253422853.png) + + - Step 1: Perform lexical and syntax analysis \(Lex and Yacc\) on the query. Generate an analysis tree by identifying pattern categories and pattern combinations to check whether syntax errors exist in statements. + - Step 2: The database performs semantic analysis and rewriting on each obtained analysis tree. In the process of generating a query tree through semantic analysis, for a createmodelStmt command case, the database first checks the algorithm type to determine whether the algorithm belongs to supervised learning or unsupervised learning. Then, based on the judgment result, the system further checks whether the attributes, hyperparameters, and model names entered in the query statement are invalid. After the verification is complete, the semantic analysis generates a query tree and transfers it to the database executor. + - Step 3: The executor adds different algorithm operators to the execution plan based on the algorithm type and adds the AI operator to the upper layer of the scanning operator. During the calculation, the scanned data is input to the algorithm model for calculation and update. Finally, the operator execution ends based on the iteration conditions set by the hyperparameters. + - Step 4: The executor transfers the trained model to the storage engine in the form of tuples. The received tuple-converted model structure is verified and saved to the gs\_model\_warehouse system catalog. You can view model information in the system catalog. + + As an original advanced feature of openGauss, DB4AI consolidates the new AI practices of openGauss and further expands the application fields of openGauss. The out-of-the-box DB4AI function provided by openGauss effectively solves data migration problems in data warehouses and data lakes and improves information security during data migration. In the future, with the multi-mode and parallel computing advantages of openGauss, a unified data management platform will be formed to reduce O&M and usage difficulties caused by heterogeneous and fragmented data storage. The release of the DB4 AI feature is a key step in making openGauss a cutting-edge tool. + + + + diff --git a/content/en/post/2022/Introduction-to-Multi-Core-Optimization-of-openGauss-on-Kunpeng-Servers.md b/content/en/post/2022/Introduction-to-Multi-Core-Optimization-of-openGauss-on-Kunpeng-Servers.md new file mode 100644 index 0000000000000000000000000000000000000000..205e13df93af3a09f501c33b08a2613658f9b2f2 --- /dev/null +++ b/content/en/post/2022/Introduction-to-Multi-Core-Optimization-of-openGauss-on-Kunpeng-Servers.md @@ -0,0 +1,96 @@ ++++ + +title = "Introduction to Multi-Core Optimization of openGauss on Kunpeng Servers" + +date = "2021-03-03" + +tags = [ "Introduction to Multi-Core Optimization of openGauss on Kunpeng Servers"] + +archives = "2021-03" + +author = "Wengang Tian" + +summary = "Introduction to Multi-Core Optimization of openGauss on Kunpeng Servers" + +img = "/en/post/2022/title/img10.png" + +times = "12:30" + ++++ + +# Introduction to Multi-Core Optimization of openGauss on Kunpeng Servers + +Since the birth of integrated circuits, CPUs have experienced three development phases, among which the first phase is to increase the CPU dominant frequency. Six years after integrated circuits were invented, Gordon Moore proposed Moore's law, predicting that the number of transistors on a chip doubles every two years. Moore's law is not the law of nature, but the development of semiconductor chips has proved that Moore's predictions are correct. Technology advances in chips bring benefits mainly to two aspects: smaller manufacturing specifications and larger silicons. However, when it goes to the 7 nm process or smaller, a quantum tunneling effect occurs, and mass production of chips becomes challenging, resulting in a sharp increase in manufacturing costs. + +The second phase is to increase the number of CPU cores. If the frequency of a single-core CPU cannot be increased, the number of CPU cores can be increased to improve computing power. However, the CPU is only a logical computing unit. The programs and data in the memory must be loaded to the CPU for computing. All CPU cores share a northbridge to read memory. As the number of cores increases rapidly, the performance bottleneck of the northbridge in response time becomes more and more obvious. + +The third phase is to achieve non-uniform memory access \(NUMA\) for CPU cores. To resolve the bottleneck of the memory controller that reads memory in the northbridge, the memory may be evenly allocated to each die. However, this causes asymmetric delays when different CPU cores access different memory. The reason is that although the memory is directly attached to the CPU, the response time is short when the CPU accesses the local address corresponding to the attached memory, while to access the memory data attached to other CPUs, which is called remote access, you need to access the memory data through the inter-connect channel, and the response time is relatively long. This is the origin of NUMA. In the NUMA architecture, the physical distance between the processor and the memory block of a NUMA node is called NUMA distance. You can use the numactl tool to query the CPU access distance. A Kunpeng server is used as an example, as shown in the following figure. + +![](../figures/zh-cn_image_0000001206801884.png) + +A NUMA-based CPU brings not only surging computing power to servers, but also great challenges to software development. From the perspective of the entire IT software stack, the first thing to support NUMA is the operating system. Currently, most enterprises use Linux. After NUMA appears, Linux also provides targeted optimization solutions to preferentially attempt to allocate space in the local memory of the CPU where the request thread is located. If the local memory is insufficient, useless pages in the local memory are eliminated first. However, NUMA provided by Linux is not suitable for databases because a database is a data-intensive and high-concurrency application and has many kernel data structures inside. These data structures are accessed by both the local CPU core and the remote CPU core. To improve data access performance, the database has its own shared data buffers, which are randomly accessed by service threads on each CPU core. From the perspective of the IT software stack, databases are the core of enterprise applications, and many applications have a database in the background. The database performance determines the overall throughput of many applications. As such, if the database performance cannot be maximized in NUMA and is not in a linear ratio to the number of cores, no enterprise is willing to pay for NUMA-based CPUs though they provide rich computing power. + +![](../figures/10.png) + +Nevertheless, NUMA is an inevitable trend in CPU development. If an enterprise-level database cannot adapt to hardware development, this database would be eliminated in enterprise database selection. + +openGauss is an open-source relational database management system. It optimizes the concurrency control algorithm, kernel data structure, data access, and others according to hardware development trends of NUMA-based CPUs to release the multi-core computing power of processors and achieve 1.5 million tpmC on 2-socket 128-core Kunpeng servers. This document describes the NUMA-based multi-core optimization technology of openGauss on Kunpeng servers and provides reference for other databases to optimize performance on Kunpeng servers. It is intended for database developers working to optimize database performance. + +## 1 Introduction to Multi-Core Optimization of openGauss on Kunpeng Servers + +![](../figures/zh-cn_image_0000001207121854.png) + +A database is a software system with high concurrency and severe data access conflicts. _Staring into the Abyss: An Evaluation of Concurrency Control with One Thousand Cores_ published by Michael Stonebraker et al., Turing Award winners in the database field in 2014, shows that the transaction processing mechanism of a traditional database cannot effectively use the processing capabilities of dozens to hundreds of cores. Through a more in-depth analysis on the database, it is found that the causes lie in both the concurrency control algorithm and the implementation mechanism. To implement concurrency, the database uses many locks internally, such as Clog, WALInsert, WALWrite, ProcArray, and XidGen in openGauss. These locks are performance bottlenecks, while the essence of the locks is to protect kernel data structures. Therefore, openGauss needs to adjust and optimize these data structures to cope with multi-core concurrency in the NUMA architecture on Kunpeng servers. The main purposes are to implement nearby CPU access, eliminate single-point bottlenecks, and evenly allocate and access shared data. + +- 1.1 Binding Threads to Cores to Prevent Thread Offsets Between Cores + + ![](../figures/101.png) + + To implement nearby access to a CPU core, a thread needs to be fixed to a specific core first. The GUC parameter **numa\_distribute\_mode** in openGauss is used to control CPU core affinity. By setting this parameter, the service processing threads can be bound to specific NUMA nodes. openGauss adopts the client-server structure. The client and server interact with each other frequently through the network. To prevent network interruption and service processing from interfering with each other, core binding is required for network interruption. In addition, the core binding area for network interruption must be separated from that for background service threads. + + +- 1.2 Reconstructing NUMA-based Data to Reduce Cross-Core Access + + ![](../figures/102.png) + + WALInsertLock is used to perform concurrency protection on WAL Insert operations. You can configure multiple WALInsertLocks, for example, 16. There are two types of access: \(1\) Xlog insert, each of which requires an Insert Lock. \(2\) Traversal and access to all WALInsertLocks, which is used to check whether unacknowledged information exists during Xlog flushing. + + In the original implementation solution, all WALInsertLocks are in the same global array and stored in the shared memory. This results in a fierce contention between WALInsertLocks, and there is a high probability that remote memory access is involved. That is, there is cross-node and cross-package contention among multiple threads. Actually, WALInsertLock has multiple instances. For most operations, only one WALInsertLock is required each time. You can allocate WALInsertLocks by NUMA node. + + In the optimized solution, the global WALInsertLock array is divided into multiple subarraies based on the number of NUMA nodes, and memory is allocated by NUMA node. Each transaction thread selects the WALInsertLock corresponding to the NUMA node to which the transaction thread belongs. The WALInsertLock references the LWLock in the shared memory. To minimize cross-node contention, the LWLock is directly embedded into the WALInsertLock. In this way, the LWLock can be distributed to NUMA nodes, and access to cache lines is reduced. + +- 1.3 Partitioning Data to Reduce Thread Access Conflicts + + ![](../figures/zh-cn_image_0000001207121858.png) + + As an auxiliary of Xlog, Clog records the final state of transactions and is used to accelerate the process of determining transaction states based on logs. + + There are four transaction states: **IN\_PROGRESS**, **COMMITED**, **ABORTED**, and **SUB\_COMMITED**. Each log occupies 2 bits. Clog needs to be stored on disks. A page \(occupying 8 KB\) can contain 215 records, each log file \(segment = 2048 x 8 KB\) contains 226 records, and the log ID has 32 bits. Therefore, 256 Clog files may exist. The Clog files are stored in the **PGDATA/pg\_clog** directory. To accelerate the access to disk files, the access to Clog is implemented through a buffer pool. A unified SLRU buffer pool is used in the code. + + Before optimization, the log buffer pool of Clog is stored in the same shared memory and globally unique in the name of **CLOG Ctl**. Each worker thread uses the thread local variable ClogCtl to point to the resource. In high concurrency scenarios, resource contention becomes a performance bottleneck. After optimization, logs are evenly distributed to the buffer pools of multiple shared memory based on **PageNo** and are recorded in the thread local object array ClogCtlData. The buffer pools are named **CLOG Ctl** _i_. Buffer pool objects and corresponding global locks are added to the shared memory synchronously. + + Similarly, other internal key shared data structures are also partitioned. + + ![](../figures/zh-cn_image_0000001206961884.png) + +- 1.4 Adjusting Concurrency Control Algorithms to Reduce Single-Point Bottlenecks + + ![](../figures/zh-cn_image_0000001251841849.png) + + Before optimization, ProcArrayLock is required for obtaining transaction snapshots when a transaction starts, and for clearing transaction snapshots when the transaction ends. With the increase of concurrent connections, the snapshots obtained by the global transaction manager expand. + + After optimization, snapshots are committed by transaction and each non-read-only transaction is assigned a transaction ID \(XID\) during running. When a transaction is committed, the commit sequence number \(CSN\) is pushed and the mapping between the current CSN and the XID of the transaction is saved. The red vertical line indicates the time when the snapshot is captured. If the CSN is not used, the snapshot set corresponding to the red vertical line is \{2,4,6\}. If the CSN is used, the CSN 3 is used. In other words, modifications to TX2. TX4, TX6, TX7, and TX8 with the CSNs 4, 5, 6, 7, and 8 respectively are invisible to the snapshot. + +- 1.5 Using ARM Atomic Instructions to Reduce the Computing Overhead + + ![](../figures/zh-cn_image_0000001206801888.png) + + The atomic operation of a traditional compiler uses the load-linked/store-conditional \(LL/SC\) atomic instructions by default. To obtain the write permission on shared variables, any core must obtain the ownership of all shared variables in an exclusive manner. That is, the modification operation can be performed only after the latest data is loaded to the L1 cache where the core is located. In the case of multiple CPUs, the system performance deteriorates due to fierce contention. + + In ARMv8.1, large-system extensions \(LSE\) that provide atomic operations is introduced to perform computing operations on the storage side, improving computing performance. Theoretically, in a multi-core system, the performance of LSE is better than that of LL/SC. The test result shows that the performance of LSE 6.4.0 is three to five times that of LL/SC in high-concurrency scenarios. + + +## 2 Multi-Core Optimization Result of openGauss on Kunpeng Servers + +![](../figures/zh-cn_image_0000001206801890.png) The running of the database system involves multiple resources, including the CPU, memory, network I/O, and disk I/O. The ultimate goal of performance optimization is that each resource usage exactly reaches the bottleneck. However, in actual optimization, the environment may consist of different hardware. As such, the optimization objectives may be different, while the basic objective of system optimization is to fully utilize the CPU capabilities. After optimizing the NUMA architecture, openGauss runs on the Kunpeng 920 processor, the TPC-C test performance reaches 1,500,000 tpmC, and the CPU efficiency is close to 95%. The data shows that openGauss fully utilizes the multi-computing capabilities of CPUs. + diff --git a/content/en/post/2022/New-Feature-of-openGauss-3-0-0-Parallel-Decoding.md b/content/en/post/2022/New-Feature-of-openGauss-3-0-0-Parallel-Decoding.md new file mode 100644 index 0000000000000000000000000000000000000000..f98804e114f33d459ee95293bc715bd5eb211cd8 --- /dev/null +++ b/content/en/post/2022/New-Feature-of-openGauss-3-0-0-Parallel-Decoding.md @@ -0,0 +1,147 @@ ++++ + +title = "New Feature of openGauss 3.0.0: Parallel Decoding" + +date = "2022-03-15" + +tags = [" Parallel Decoding"] + +archives = "2022-03" + +author = "Tianqing Wang" + +summary = "New Feature of openGauss 3.0.0: Parallel Decoding" + +img = "/en/post/2022/title/img16.png" + +times = "17:30" + ++++ + +# New Feature of openGauss 3.0.0: Parallel Decoding + +## Introduction + +With the rapid development of information technology, various types of databases emerge one after another. Logical replication is increasingly important, with which data can be synchronized between heterogeneous databases. Currently, the average serial decoding performance of logical replication in openGauss is only 3 to 5 Mbit/s, which cannot meet the requirements of real-time synchronization in heavy service pressure scenarios. As a result, logs are stacked, affecting services in the production cluster. Therefore, the parallel decoding feature is designed to enable multiple threads to perform decoding in parallel, improving the decoding performance. In basic scenarios, the decoding performance can reach 100 Mbit/s. + +## Design Idea: Why Parallel Decoding Is Considered? + +In the original serial decoding logic, a single thread is used to read logs, decode logs, and combine and send results. The following figure shows the main process and time consumption. + +![](../figures/zh-cn_image_0000001279474617.png) + +It can be learned that most time of the entire process is consumed in the decoding step, which needs to be optimized by multi-thread decoding. In addition, time consumed in the sending step is obviously the second, which needs to be optimized by batch sending. + +## Working Process: Parallel Decoding Message Sequence Diagram + +As shown in the following figure, in parallel decoding, worker threads on an openGauss DN are classified into three types: + +1. Sender/Collector, which receives decoding requests from a client, collects the results of each decoder, and sends the results to the client. Only one sender/collector is created for each decoding request. +2. Reader/Dispatcher, which reads WALs and distributes them to decoders for decoding. Only one reader/dispatcher is created for a decoding request. +3. Decoder, which is responsible for decoding the logs sent by the reader/dispatcher \(when the thread is decoding the logs, the logs are temporarily stored in the read change queue\) and sending the decoding results \(when the committed logs are not decoded, the results are temporarily stored in the decode change queue\) to the sender/collector. Multiple decoders can be created for a decoding request. + +![](../figures/zh-cn_image_0000001234914846.png) + +The message sequence is described as follows: + +1. A client sends a logical replication request to a primary or standby DN. In the logical replication options, you can set parameters to connect only to the standby node to prevent the primary node from being overloaded. + +2. In addition to the sender that receives requests from a client, DNs need to create a reader/dispatcher and several decoders. + +3. The reader reads and preprocesses Xlogs. If the logs contain TOAST columns, combine the TOAST columns. + +4. The dispatcher dispatches the preprocessed logs to each decoder. + +5. Each decoder performs decoding independently. You can set the decoding format \(.json, .txt, or .bin\) through configuration options. + +6. Each decoder sends the decoding result to the collector. + +7. The collector collects decoding results by transaction. + +8. To reduce the number of sending times and the impact of network I/O on the decoding performance, when the batch sending function is enabled \(that is, **sending-batch** is set to **1**\), the sender accumulates a certain number of logs \(the threshold is set to 1 MB\) and returns the decoding result to the client in batches. + +9. To stop the logical replication process, disconnect the logical replication connection to the DN. + +10. The sender sends the exit signal to the reader/dispatcher and decoders. + +11. After receiving the exit signal, each thread releases the occupied resources, cleans up the environment, and exits. + +## Technical Details 1: Visibility Reconstruction + +In logical decoding, historical logs are parsed. Therefore, it is important to determine the visibility of tuples in logs. In the original serial decoding logic, the active transaction linked list mechanism is used to determine the visibility. However, for parallel decoding, it is costly for each decoder to maintain an active transaction linked list, which adversely affects the decoding performance. Therefore, visibility reconstruction is performed, and the commit sequence number \(CSN\) is used to determine tuple visibility. For each XID, the visibility process is as follows: + +![](../figures/zh-cn_image_0000001279274373.png) + +The main process is as follows: + +1. Obtain a CSN used to determine the visibility based on XID. Ensure that the CSN value can be obtained based on any XID. If the XID is abnormal, a CSN indicating a specific status is returned. This CSN can also be used to determine the visibility. + +1. If the CSN has been committed, it is compared with the CSN in the snapshot. If the CSN of the transaction is smaller, the transaction is visible. Otherwise, the transaction is invisible. +2. If the CSN is not committed, the transaction is invisible. + +Based on the foregoing logic, in parallel decoding, logic for determining tuple snapshot visibility is sequentially determining snapshot visibilities of tuple **Xmin** \(XID during insertion\) and **Xmax** \(XID during deletion/update\). The overall idea is that if Xmin is invisible/uncommitted or Xmax is visible, the tuple is invisible; if Xmin is visible and Xmax is invisible/uncommitted, the tuple is visible. Each flag bit in the tuple maintains its original meaning and participates in visibility determination. + +## Technical Details 2: Batch Sending + +After parallel decoding is used, the time occupied by the decoding process is significantly reduced. However, in this case, the sender becomes a bottleneck, and costs of performing a complete sending process for each decoding result are excessively high. Therefore, the batch sending mode is used. The decoding results are collected temporarily and sent to the client when the threshold is exceeded. During batch sending, the length of each decoding result and the specified separator need to be recorded so that users of the parallel decoding function can split the logs to be sent in batches. + +## Usage Mode + +The following optional configuration items are added for parallel decoding: + +1. Decoder concurrency + +Configure **parallel-decode-num** to specify the number of decoders for parallel decoding. The value is an integer ranging from 1 to 20. The value **1** indicates that decoding is performed based on the original serial logic and the code logic of this feature is not used. The default value is **1**. When this item is set to **1**, the decoding format **decode-style** cannot be configured. + +2. Decoding whitelist + +Configure **white-table-list** to specify the table to be decoded. The value is a character string of the text type that contains table names in the whitelist. Different tables are separated by commas \(,\). Example: **select \* from pg\_logical\_slot\_peek\_changes\('slot1', NULL, 4096, 'white-table-list', 'public.t1,public.t2'\);** + +3. Decoding only on the standby node + +Configure the **standby-connection** parameter to specify whether to perform decoding only on the standby node. The value is of the Boolean type. If the value is **true**, only the standby node can be connected for decoding. When the primary node is connected for decoding, an error is reported and the decoding exits. If the value is **false**, there is no restriction. The default value is **false**. + +4. Decoding format + +Configure **decode-style** to specify the decoding format. The value can be **'j'**, **'t'** or **'b'** of the char type, indicating the JSON, text, or binary format, respectively. The default value is **'b'**, indicating binary decoding. + +5. Batch sending + +Configure the **sending-batch** parameter to determine whether to send decoding results in batches. The value is **0** or **1**. The default value **0** indicates that batch sending is disabled. The value **1** indicates that batch sending is enabled when the accumulated size of decoding results reaches or just exceeds 1 MB. + +The following uses JDBC as an example to describe how to perform parallel decoding. Perform the following configurations when establishing a connection: + +``` +PGReplicationStream stream = conn + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(replSlotName) + .withSlotOption("include-xids", true) + .withSlotOption("skip-empty-xacts", true) + .withSlotOption("parallel-decode-num", 10) + .withSlotOption("white-table-list", "public.t1,public.t2") + .withSlotOption("standby-connection", true) + .withSlotOption("decode-style", "t") +.withSlotOption("sending-batch", 1) + .start(); +``` + +The added logic is from the sixth line to the second line from the bottom, indicating that 10 concurrent decoding operations are performed, only the **public.t1** and **public.t2** tables are decoded, the standby node connection is enabled, the decoding format is text, and the batch sending function is enabled. If the parameter value is out of the range, an error is reported and the allowed value range is displayed. + +## Auxiliary Functions: Monitoring Function + +During parallel decoding, the **gs\_get\_parallel\_decode\_status\(\)** function is added to help locate the decoding performance bottleneck when the decoding speed is low. This function is used to check the length of the read change queue that stores logs that have not been decoded and the length of the decode change queue that stores decoding results that have not been sent of each decoder on the current DN. + +This function has no input parameter. The return result contains four columns: **slot\_name**, **parallel\_decode\_num**, **read\_change\_queue\_length**, and **decode\_change\_queue\_length**. + +**slot\_name** indicates the replication slot name and its type is text. **parallel\_decode\_num** indicates the number of parallel decoding threads and its type is integer. **read\_change\_queue\_length** records the read change queue length of each decoder and its type is text. **decode\_change\_queue\_length** records the length of the decode change queue of each decoder and its type is text. The usage is as follows: + +![](../figures/zh-cn_image_0000001235074794.png) + +If decoding stalls, execute the function on the decoding DN and check the value of **read\_change\_queue\_length** in the query result. Record the length of the log reading queue in each decoder. If the value is too small, log reading is blocked. In this case, check whether the disk I/O is insufficient. Check the value of **decode\_change\_queue\_length** in the query result. The value indicates the length of the decoding log queue in each decoder. If the value is too small, the decoding speed is too slow. You can increase the number of decoders. If the values of **read\_change\_queue\_length** and **decode\_change\_queue\_length** are large, decoding log sending is blocked. In this case, check the log replay speed of the parallel decoding user in the target database. Generally, the decoding stall is caused by insufficient CPU, I/O, or memory resources. The decoding stall can be avoided by using the standby node to ensure sufficient resources. + +## Conclusion + +Parallel decoding can greatly improve the decoding performance of logical replication.Therefore, it is forgiven even if it increases service pressure on decoding instances. As a key technology of heterogeneous database data replication, parallel decoding plays an important role in openGauss. + diff --git a/content/en/post/2022/Permission-Management-Model-of-the-openGauss-Database-(Continued).md b/content/en/post/2022/Permission-Management-Model-of-the-openGauss-Database-(Continued).md new file mode 100644 index 0000000000000000000000000000000000000000..cc80a580d8876c5c76998da66dcc7bb2cb833c63 --- /dev/null +++ b/content/en/post/2022/Permission-Management-Model-of-the-openGauss-Database-(Continued).md @@ -0,0 +1,84 @@ ++++ + +title = "Permission Management Model of the openGauss Database Continued" + +date = "2021-08-02" + +tags = [ "Permission Management Model of the openGauss Database"] + +archives = "2021-08" + +author = "Rongrong Song" + +summary = "Permission Management Model of the openGauss Database (Continued)" + +img = "/en/post/2022/title/img3.png" + +times = "12:30" + ++++ + +# Permission Management Model of the openGauss Database \(Continued\) + +The openGauss database uses a role-based access control model. In addition to the classification of system permissions and object permissions described in the _Permission Management Model of the openGauss Database_, there are some advanced permission management mechanisms to meet customers' service requirements. + +## **1. Separation of Duties** + +Separation of duties is a supplement to the system permission management mechanism. The core idea is to separate the permissions for managing database objects, users, and audit logs to prevent high risks caused by excessive centralized rights of an administrator. You can set the GUC parameter **enableSeparationOfDuty** to **on** to enable the function. + +After separation-of-duty is enabled, the permission scope of **SYSADMIN** is narrowed down. That is, **SYSADMIN** does not have the permissions to create users or roles, or view or delete database audit logs. The **SYSADMIN**, **CREATEROLE**, and **AUDITADMIN** permissions are isolated from each other and do not affect each other. A user can be assigned only one attribute. + +After separation-of-duty is enabled, the permissions are divided as follows: + + + + + + + + + + + + + + + + +

System Permission

+

Permission Description

+

SYSADMIN

+

Allows users to create databases and tablespaces.

+

CREATEROLE

+

Allows users to create users and roles.

+

AUDITADMIN

+

Allows users to view and delete audit logs.

+
+ +## **2. Column-Level Access Control** + +In some service scenarios, some columns in a data table store important information and need to be invisible to users, but data in other columns needs to be viewed or operated by users. In this case, access control needs to be performed on a specific column in the data table to implement column-level access control for users. + +openGauss provides the GRANT and REVOKE statements to grant and revoke permissions on column objects. + +``` +Example 1: Grant the SELECT permission on the first column fir of the tbl table and the UPDATE permission on the second column sec of the tbl table to user1. +openGauss=# GRANT select(fir),update(sec) ON TABLE tbl TO user1; +GRANT +After the permission is granted, user user1 can perform the SELECT operation on the first column of the tbl table and the UPDATE operation on the second column. +Example 2: Revoke the SELECT permission on the first column fir of the tbl table from user1. +openGauss=# REVOKE select(fir) ON tbl FROM user1; +REVOKE +After the revocation, user user1 no longer has the permission to view data in the first column fir of the tbl table. +``` + +## **3. Row-Level Access Control** + +In actual services, users may be allowed to view only rows that meet specific conditions in a data table. In this case, row-level access control is required so that different users can read different results when performing the same SQL query, update, or delete operation. + +You can create a row-level security policy for a data table. The policy defines an expression that takes effect only for specific database users and SQL operations. When a database user accesses the data table, rows that meet the policy conditions are visible to the user, and rows that do not meet the policy conditions are invisible to the user. In this way, row-level access control is implemented for the user. + +![](../figures/zh-cn_image_0000001251894929.jpg) + +openGauss provides the CREATE, ALTER, and DROP ROW LEVEL SECURITY statements to create, modify, and delete row-level access control policies. + diff --git a/content/en/post/2022/Permission-Management-Model-of-the-openGauss-Database.md b/content/en/post/2022/Permission-Management-Model-of-the-openGauss-Database.md new file mode 100644 index 0000000000000000000000000000000000000000..99b6fedde9a7e564a93e5cd644027ebfd7874b35 --- /dev/null +++ b/content/en/post/2022/Permission-Management-Model-of-the-openGauss-Database.md @@ -0,0 +1,328 @@ ++++ + +title = "Permission Management Model of the openGauss Database" + +date = "2021-07-26" + +tags = [ "Permission Management Model of the openGauss Database"] + +archives = "2021-07" + +author = "Rongrong Song" + +summary = "Permission Management Model of the openGauss Database" + +img = "/en/post/2022/title/img3.png" + +times = "12:30" + ++++ + +# Permission Management Model of the openGauss Database + +The database stores a large amount of important data and sensitive information and provides data sharing services for authorized users with different permissions. Therefore, the database must have a complete security defense mechanism to defend against internal and external malicious attacks, to ensure that data is not lost, privacy is not disclosed, and data is not tampered with. Currently, the openGauss database has built an in-depth defense security system to enhance database security in applications. A complete permission management mechanism can effectively block unauthorized operations of malicious users. This document focuses on the permission management mechanism in the openGauss database. + +## 1 Common Permission Management Models + +There are three common permission management models: policy-based access control model, role-based access control model, and session- and role-based access control model. The openGauss database inherits the permission management mechanism of PostgreSQL, adopts the role-based access control model, and uses roles to organize and manage permissions, greatly simplifying permission authorization management. With the role mechanism, to grant permissions to a group of users with the same permissions, you only need to grant the permissions to a role and then grant the role to the group of users. You do not need to grant permissions to users one by one. In addition, the separation of roles and permissions can be used to control different permissions of different users and achieve mutual restriction and balance. + +With the development of databases and expansion of service scenarios, higher requirements are proposed for database permission separation and fine-grained permission management. The native permission division of PostgreSQL cannot meet diversified service security requirements; therefore, the openGauss database divides permissions at a finer granularity based on the permission models so that users can flexibly assign and manage user permissions based on actual services. + +## 2 openGauss Database Permission Levels + +In the logical structure of the object layout in the openGauss database system, multiple databases can be created under each instance, multiple schemas can be created under each database, and multiple objects can be created under each schema, such as tables, functions, views, and indexes, and each table can be measured by row and column to form the following logical levels: + +![](../figures/3.png) + +The permission system of the openGauss database is constructed based on the preceding logical distribution, as shown in the following figure. Each layer has its own permission control. + +![](../figures/31.png) + +For example, if a user wants to view data in a row of a data table, the user must have the LOGIN permission for logging in to the database, CONNECT permission for connecting to the database where the table is stored, USAGE permission for using the schema of the table, and SELECT permission for viewing the table; in addition, the row level security requirements for the row of data must be met. For details about permission concepts and classification, see the next section. + +## 3 Classification of openGauss Database Permissions + +In the openGauss database, users and roles are basically the same concepts. The only difference is that a role does not have the LOGIN permission by default when being created, and a schema with the same name as the role is not automatically created. That is, a role with the LOGIN permission can be considered as a user. In the following sections, users are used to connect to and access the database and execute SQL statements, and roles are used to organize and manage permissions. Different permissions are packaged into a role and assigned to a user so that the user can obtain all permissions of the role. In addition, after the permissions of a role are changed, the permissions of all members in the role are automatically changed. + +In the openGauss database system, permissions are classified into system permissions and object permissions. + +- System permissions refer to the permissions of a user to use a database, such as logging in to a database, creating a database, creating a user or role, and creating a security policy. +- Object permissions refer to the permissions to perform special operations on database objects, such as databases, schemas, tables, views, and functions. Different objects are associated with different permissions, such as database connection permissions, permissions to view, update, and insert tables, and permissions to execute functions. It is meaningful to describe object permissions based on specific objects. + +**3.1 System Permissions** + +System permissions are also called user attributes. Users with specific attributes obtain the permissions corresponding to the specified attributes. System permissions cannot be inherited by roles. When creating a user or role, you can run the **CREATE ROLE/USER** SQL statement to specify some attributes for the user or role, or run the **ALTER ROLE/USER** statement to add or cancel user attributes for the user or role. + +The openGauss database supports granting and revoking of the following system permissions: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

System Permission

+

Permission Description

+

SYSADMIN

+

Allows users to create databases and tablespaces.

+

Allows users to create users and roles.

+

Allows users to view and delete audit logs.

+

Allows users to view data of other users.

+

MONADMIN

+

Allows users to view and manage permissions for the dbe_perf schema and monitoring views or functions in this schema.

+

OPRADMIN

+

Allows users to use Roach to back up and restore databases.

+

POLADMIN

+

Allows users to create resource tags, dynamic data masking policies, and unified audit policies.

+

AUDITADMIN

+

Allows users to view and delete audit logs.

+

CREATEDB

+

Allows users to create databases.

+

USEFT

+

Allows users to create foreign tables.

+

CREATEROLE

+

Allows users to create users and roles.

+

INHERIT

+

Allows a user to inherit the permissions of the role of the group to which the user belongs.

+

LOGIN

+

Allow users to log in to the database.

+

REPLICATION

+

Allows users to perform streaming replication operations.

+

VCADMIN

+

Allows users to create resource pools in associated logical clusters and manage permissions on the associated logical clusters.

+
+ +The openGauss provides the CREATE and ALTER ROLE/USER statements to grant and revoke system permissions. The following is an example: + +**3.2 Object Permissions** + +By default, an object owner has all the operation permissions on the object, such as modifying, deleting, and viewing the object, granting object operation permissions to other users, and revoking granted operation permissions. ALTER, DROP, COMMENT, INDEX, VACUUM, and regrantable permissions for objects are inherent permissions of the owner and are implicitly owned by the owner. Object owners can remove their own common permissions, for example, making tables read-only to themselves or others. + +Object permissions can be inherited by roles. In this way, users can package these individual permissions into a role for permission management. The openGauss database supports the following object permissions for each type of database objects: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Object

+

Permission

+

Description

+

TABLESPACE

+

CREATE

+

Allows users to create tables in specified tablespaces.

+

ALTER

+

Allows users to run the ALTER statement to modify the attributes of a specified tablespace.

+

DROP

+

Allows users to delete specified tablespaces.

+

COMMENT

+

Allows users to define or modify comments for a specified tablespace.

+

DATABASE

+

CONNECT

+

Allows users to connect to a specified database.

+

TEMP

+

Allows users to create temporary tables in a specified database.

+

CREATE

+

Allows users to create schemas in a specified database.

+

ALTER

+

Allows users to run the ALTER statement to modify attributes of a specified database.

+

DROP

+

Allows users to delete a specified database.

+

COMMENT

+

Allows users to define or modify comments for a specified database.

+

SCHEMA

+

CREATE

+

Allows users to create new objects in a specified schema.

+

USAGE

+

Allows users to access objects contained in a specified schema.

+

ALTER

+

Allows users to run the ALTER statement to modify attributes of a specified schema.

+

DROP

+

Allows users to delete a specified schema.

+

COMMENT

+

Allows users to define or modify comments for a specified schema.

+

FUNCTION

+

EXECUTE

+

Allows users to use a specified function.

+

ALTER

+

Allows users to run the ALTER statement to modify attributes of a specified function.

+

DROP

+

Allows users to delete a specified function.

+

COMMENT

+

Allows users to define or modify comments for a specified function.

+

TABLE

+

INSERT

+

Allows users to run the INSERT statement to insert data into a specified table.

+

DELETE

+

Allows users to run the DELETE statement to delete data from a specified table.

+

UPDATE

+

Allows users to run the UPDATE statement on a specified table.

+

SELECT

+

Allows users to run the SELECT statement on a specified table.

+

TRUNCATE

+

Allows users to run the TRUNCATE statement on a specified table.

+

REFERENCES

+

Allows users to create a foreign key constraint on a specified table.

+

TRIGGER

+

Allows users to create a trigger on a specified table.

+

ALTER

+

Allows users to run the ALTER statement to modify attributes of a specified table.

+

DROP

+

Allows users to delete a specified table.

+

COMMENT

+

Allows users to define or modify comments for a specified table.

+

INDEX

+

Allows users to create indexes on a specified table and manage the indexes on the specified table.

+

VACUUM

+

Allows users to perform ANALYZE and VACUUM operations on a specified table.

+
+ +openGauss provides the GRANT and REVOKE statements to grant and revoke object permissions. + +**3.3 User Permission Set** + +According to the permission management mechanism of the openGauss database, a user has the union of the following types of permissions: + +![](../figures/32.png) + +In actual service applications, you are advised to configure accounts based on the least privilege principle and assign the minimum permissions to users on the basis that service requirements are met. + +## 4 openGauss Database Permission Evolution + +The openGauss database provides a series of system permissions and object permissions. You can combine permissions into roles based on actual services. However, with the feedback from users in various application scenarios, the openGauss database will provide a series of built-in roles in the future. The permissions that are frequently used in actual applications are packaged into built-in roles. Users can directly use the built-in roles to manage permissions. + diff --git a/content/en/post/2022/SQL-Engine-Source-Parsing.md b/content/en/post/2022/SQL-Engine-Source-Parsing.md new file mode 100644 index 0000000000000000000000000000000000000000..02fbbc49ef65755e431e998b71fdb4b1a08bb09d --- /dev/null +++ b/content/en/post/2022/SQL-Engine-Source-Parsing.md @@ -0,0 +1,2121 @@ ++++ + +title = "SQL Engine Source Parsing" + +date = "2021-08-27" + +tags = [ "SQL Engine Source Parsing"] + +archives = "2021-08" + +author = "Shujie Zhang" + +summary = "SQL Engine Source Parsing" + +img = "/en/post/2022/title/img4.png" + +times = "12:30" + ++++ + +# SQL Engine Source Parsing + ++++ + +## Query Optimization + +The query optimization process of the openGauss database is clear. From the perspective of source code organization, related code is distributed in different directories, as shown in Table 1. + +Table 1 Description of the query optimization module + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Module

+

Directory

+

Description

+

Query rewriting

+

src/gausskernel/optimizer/prep

+

Includes subquery optimization, predicate simplification and regularization, predicate passing closure, and other query rewriting optimization technologies.

+

Statistics

+

src/gausskernel/optimizer/commands/analyze.cpp

+

Generates various types of statistics, which can be used for selectivity estimation, row count estimation, and cost estimation.

+

Cost estimation

+

src/common/backend/utils/adt/selfuncs.cpp

+

src/gausskernel/optimizer/path/costsize.cpp

+

Includes selectivity estimation, row count estimation, and cost estimation.

+

Physical path

+

src/gausskernel/optimizer/path

+

Generates physical paths.

+

Dynamic programming

+

src/gausskernel/optimizer/plan

+

Searches for physical paths using the dynamic programming method.

+

Genetic algorithm

+

src/gausskernel/optimizer/geqo

+

Searches for physical paths using the genetic algorithm.

+
+## Query Rewriting + +As the SQL language is diversified and flexible, the SQL statements written by different developers vary according to their experience. In addition, the SQL statements can be automatically generated by tools. The SQL language is a descriptive language. A database user only describes a desired result, and does not care about a specific data obtaining manner. The SQL language entered into the database is usually not in their optimal forms, and may include some redundant information. Mining this information could improve the exaction efficiency. Query rewriting is to convert SQL statements entered by users into more efficient equivalent SQL statements. It has two basic principles. + +- \(1\) Equivalence: The output of the original statement is the same as that of the rewritten statement. +- \(2\) Efficiency: The rewritten statement is more efficient in execution time and resource usage than the original statement. + +Query rewriting is mainly equivalent transformation based on relational algebra. This transformation usually meets the laws of commutativity, associativity, distributive, and decomposition, as shown in Table 2. + +Table 2 Equivalent transformation based on relational algebra + + + + + + + + + + + + + + + + + + + + +

Equivalent transformation

+

Content

+

Law of commutativity

+

A x B == B x A

+

A ⨝B == B ⨝ A

+

A ⨝F B == B ⨝F A Where, F is the join condition.

+

Π p(σF (B)) == σF (Π p(B)) Where, F∈p

+

Law of associativity

+

(A x B) x C==A x (B x C)

+

(A ⨝ B) ⨝ C==A ⨝ (B ⨝ C)

+

(A ⨝F1 B) ⨝F2 C==A ⨝F1 (B ⨝F2 C) Where, F1 and F2 are join conditions.

+

Law of distributive

+

σF(A x B) == σF(A) x B Where, F ∈ A

+

σF(A x B) == σF1(A) x σF2(B)

+

Where, F = F1 ∪ F2, F1∈A, F2 ∈B

+

σF(A x B) == σFX (σF1(A) x σF2(B))

+

Where, F = F1∪F2∪FX, F1∈A, F2 ∈B

+

Π p,q(A x B) == Π p(A) x Π q(B) Where, p∈A, q∈B

+

σF(A x B) == σF1(A) x σF2(B)

+

Where, F = F1 ∪ F2, F1∈A, F2 ∈B

+

σF(A x B) == σFx (σF1(A) x σF2(B))

+

Where, F = F1∪F2∪Fx, F1∈A, F2 ∈B

+

Law of decomposition

+

Π P=p1,p2,…pn(Π Q=q1,q2,…qn(A)) == Π P=p1,p2,…pn(A) Where, P ⊆ Q

+

σF1(σF2(A)) == σF1∧F2(A)

+
+ + +Query rewriting can achieve optimization based on relational algebra theories, such as predicate pushdown and subquery optimization, or based on heuristic rules, such as outer join elimination and table join elimination. In addition, there are some optimizations related to a specific optimization rule and an actual execution process. For example, based on parallel scanning, consider to execute an aggregation operator by phase. Aggregation is divided into different phases, to improve execution efficiency. + +From another perspective, query rewriting is equivalent transformation based on optimization rules and belongs to logical optimization, which can also be called rule-based optimization. How do we measure the performance improvement of an SQL statement after query rewriting? It is very important to evaluate query rewriting based on costs. Therefore, query rewriting can be not only based on experience, but also based on costs. + +Taking predicate transfer closure and predicate pushdown as examples, predicate pushdown can greatly reduce the calculation workload of upper-layer operators to achieve optimization. If the predicate condition has equivalent operations, then equivalence inference can be implemented by equivalent operations, so as to obtain a new selection condition. + +For example, if two tables t1 and t2 each contain a total of 100 rows of data \[1,2,3, ..100\], the query statement **SELECT t1.c1, t2.c1 FROM t1 JOIN t2 ON t1.c1=t2.c1 WHERE t1.c1=1** may be optimized by select pushdown and equivalent inference, as shown in Figure 1. + +![](../figures/62.png) + +Figure 1 Comparison before and after query rewriting + +As shown in Figure 1 \(1\), 100 rows of data in tables t1 and t2 are scanned and joined to generate the intermediate result, and then the selection operation is performed. The final result contains only one row of data. If equivalence inference is used, it may be obtained that values in \{t1.c1, t2.c1, 1\} are equivalent to each other. Therefore, a new selection condition of t2.c1 = 1 is deduced, and the condition is pushed down to t2. In this way, the rewritten logical plan in Figure 1 \(4\) is obtained. As shown in the preceding figure, the rewritten logical plan only needs to obtain one piece of data from the base table. During join, there is only one piece of data in the inner and outer tables. In addition, the filter criteria in the final result are not required, greatly improving the performance. + +At the code level, the architecture of query rewriting is roughly shown in Figure 2. + +![](../figures/41.png) + +Figure 2 Architecture of query rewriting + +- \(1\) Pulling up a subquery: When a subquery appears in RangeTableEntry, it stores a subquery tree. If the subquery is not pulled up, a subquery execution plan is formed after query optimization. The upper-layer execution plan and subquery plan perform nested loops to obtain the final result. In this process, the query optimization module does not have so many optimization choices for the subquery. If the subquery is pulled up, it is joined with tables at the upper layer. + +- \(2\) Constant replacement: Because the constant reference speed is faster, the variable can be replaced by the calculated constant. The implementation function is preprocess\_const\_params. + +- \(3\) Replacing common table expressions \(CTEs\) with subqueries: Theoretically, CTEs have the same performance as subqueries. However, subqueries can be further pulled up, rewritten, and optimized. Therefore, subqueries are used to replace CTEs. The implementation function is substitute\_ctes\_with\_subqueries. + +- \(4\) Replacing multi count \(distinct\) with multiple subqueries: If this type of query occurs, multiple count \(distinct\) queries are replaced with multiple subqueries. Each subquery contains a count \(distinct\) expression. The implementation function is convert\_multi\_count\_distinct. + +- \(5\) Pulling up sublinks: Sublinks appear in constraints such as WHERE and ON, and are usually used together with predicates such as ANY, ALL, IN, EXISTS, and SOME. Although sublinks are clear from the logical level of statements, the efficiency varies. For example, the execution result of a correlated sublink is related to the parent query. That is, each tuple of the parent query corresponds to the re-evaluation of the sublink. In this case, you can pull up the sublink to improve efficiency. In this part, ANY and EXISTS sublinks are pulled up to SemiJoin or Anti-SemiJoin. The implementation function is pull\_up\_sublinks. + +- \(5\) Reducing ORDER BY: In the parent query, database records may need to be reordered. Therefore, reducing the number of ORDER BY statements in the subquery can improve the efficiency. The implementation function is reduce\_orderby. + +- \(6\) Deleting NotNullTest: Deleting related non-null tests can improve efficiency. The implementation function is removeNotNullTest. + +- \(7\) Lazy Agg rewriting: Lazy aggregation is used to reduce the number of aggregation times. The implementation function is lazyagg\_main. + +- \(8\) A lot of work has been done to optimize the join operation to obtain a better execution plan. The implementation function is pull\_up\_subqueries. + +- \(9\) UNION ALL optimization: The UNION ALL operation at the top layer is processed to convert the UNION ALL set operation to the AppendRelInfo operation. The implementation function is flatten\_simple\_union\_all. + +- \(10\) Expanding an inherited table: If an inherited table is used during the execution of a query statement, the inherited table exists as a parent table. The parent table needs to be expanded into multiple inherited tables. The implementation function is expand\_inherited\_tables. + +- \(11\) Expression preprocessing: This module standardizes expressions in the query tree, including replacing the alias Var generated by links, evaluating constant expressions, leveling constraints, and generating execution plans for sublinks. The implementation function is preprocess\_expression. + +- \(12\) Processing the HAVING clause: In the HAVING clause, some constraints can be converted into filter conditions \(corresponding to WHERE\). The constraints in the HAVING clause are split to improve efficiency. + +- \(13\) Outer join elimination: The purpose is to convert an outer join to an inner join to simplify the query optimization process. The reduce\_outer\_join function is used. + +- \(14\) Full join rewriting: Rewrites the full join function to improve its functionality. For example, the statement **SELECT \* FROM t1 FULL JOIN t2 ON TRUE** can be converted to **SELECT \* FROM t1 LEFT JOIN t2 ON TRUE UNION ALL \(SELECT \* FROM t1 RIGHT ANTI FULL JOIN t2 ON TRUE\)**. The implementation function is reduce\_inequality\_fulljoins. + + The following uses pulling up sublinks as an example to describe the most important subquery optimization in openGauss. A sublink is a special subquery. It appears in constraints such as WHERE and ON, and is often accompanied by predicates such as ANY, EXISTS, ALL, IN, and SOME. The openGauss database sets different SUBLINK types for different predicates. The code is as follows: + + ``` + Typedef enum SubLinkType { + EXISTS_SUBLINK, + ALL_SUBLINK, + ANY_SUBLINK, + ROWCOMPARE_SUBLINK, + EXPR_SUBLINK, + ARRAY_SUBLINK, + CTE_SUBLINK + } SubLinkType; + ``` + + The openGauss database defines an independent structure SubLink for sublinks, which describes the sublink types and operators. The code is as follows: + + ``` + Typedef struct SubLink { + Expr xpr; + SubLinkType subLinkType; + Node* testexpr; + List* operName; + Node* subselect; + Int location; + } SubLink; + ``` + + Figure 3 shows the interface functions related to pulling up sublinks. + + ![](../figures/42.png) + + Figure 3 Interface functions related to sublinks + + The main process of pulling up sublinks is implemented in the pull\_up\_sublinks function. The pull\_up\_sublinks function invokes pull\_up\_sublinks\_jointree\_recurse to recursively process nodes in Query-\>jointree. Table 3 lists the input parameters of the function. + + Table 3 Input parameters of the function + + + + + + + + + + + + + + + + + + + + + + + + + +

Parameter

+

Type

+

Description

+

root

+

PlannerInfo*

+

Input parameter, which is used to query the context information about the optimization module.

+

jnode

+

Node*

+

Input parameter, which indicates the node to be recursively processed. The value can be RangeTblRef, FromExpr, or JoinExpr.

+

relids

+

Relids*

+

Output parameter, which is a set of tables involved in the jnode parameter.

+

Return value

+

Node*

+

Node after the sublink is pulled up.

+
+ + + There are three types of jnodes: RangeTblRef, FromExpr, and JoinExpr, which are processed by the pull\_up\_sublinks\_jointree\_recurse function separately. + +- RangeTblRef + + RangeTblRef is a leaf node of Query-\>jointree and is the condition for ending the function recursion. When the program reaches this branch, there are two cases: + + - **\(1\) If the current statement is a single table query and has no join, the recursion proceeds until it ends. Then, the program checks whether the sublink meets other pull-up conditions.** + - \(2\) If the query statement has joins, during the recursion of From-\>fromlist, JoinExpr-\>larg, or JoinExpr-\>rarg, when it traverses the RangeTblRef leaf node, relids \(a set of tables\) of the RangeTblRef node is returned to the upper layer to determine whether the sublink can be pulled up. + +- FromExpr + + - \(1\) Traverse the nodes in From-\>fromlist recursively, and then invoke the pull\_up\_sublinks\_jointree\_recurse function recursively for each node until reaching the RangeTblRef leaf node. + - \(2\) Invoke the pull\_up\_sublinks\_qual\_recurse function to process From-\>qual and process ANY\_SUBLINK or EXISTS\_SUBLINK that may occur. + +- JoinExpr + + **\(1\) Invoke the pull\_up\_sublinks\_jointree\_recurse function to recursively process JoinExpr-\>larg and JoinExpr-\>rarg until reaching the RangeTblRef leaf node. In addition, check whether the sublink can be pulled up based on the join type.** + + \(2\) Invoke the pull\_up\_sublinks\_qual\_recurse function to process JoinExpr-\>quals and process ANY\_SUBLINK or EXISTS\_SUBLINK that may occur. The **available\_rels1** parameter of the pull\_up\_sublinks\_qual\_recurse function varies depending on the join type. + + In addition to ANY\_SUBLINK and EXISTS\_SUBLINK, the pull\_up\_sublinks\_qual\_recurse function also performs query rewriting for OR clauses and EXPR-type sublinks. The code logic of pulling up sublinks of the Expr type is as follows: + + - \(1\) Use the safe\_convert\_EXPR function to check whether the sublink can be pulled up. The code is as follows: + + ``` + //Check whether the current SQL statement meets the condition for pulling up the sublink. + if (subQuery->cteList || + subQuery->hasWindowFuncs || + subQuery->hasModifyingCTE || + subQuery->havingQual || + subQuery->groupingSets || + subQuery->groupClause || + subQuery->limitOffset || + subQuery->rowMarks || + subQuery->distinctClause || + subQuery->windowClause) { + ereport(DEBUG2, + (errmodule(MOD_OPT_REWRITE), + (errmsg("[Expr sublink pull up failure reason]: Subquery includes cte, windowFun, havingQual, group, " + "limitoffset, distinct or rowMark.")))); + return false; + } + ``` + + - \(2\) Use the push\_down\_qual function to extract related conditions from the sublink. The code is as follows: + + ``` + Static Node* push_down_qual(PlannerInfo* root, Node* all_quals, List* pullUpEqualExpr) + { + If (all_quals== NULL) { + Return NULL; + } + + List* pullUpExprList = (List*)copyObject(pullUpEqualExpr); + Node* all_quals_list = (Node*)copyObject(all_quals); + + set_varno_attno(root->parse, (Node*)pullUpExprList, true); + set_varno_attno(root->parse, (Node*)all_quals_list, false); + + Relids varnos = pull_varnos((Node*)pullUpExprList, 1); + push_qual_context qual_list; + SubLink* any_sublink = NULL; + Node* push_quals = NULL; + Int attnum = 0; + + While ((attnum = bms_first_member(varnos)) >= 0) { + RangeTblEntry* r_table = (RangeTblEntry*)rt_fetch(attnum, root->parse->rtable); + + // This table must be a base table. Otherwise, it cannot be processed. + If (r_table->rtekind == RTE_RELATION) { + qual_list.varno = attnum; + qual_list.qual_list = NIL; + + // Obtain the condition that contains the special varno. + get_varnode_qual(all_quals_list, &qual_list); + + If (qual_list.qual_list != NIL && !contain_volatile_functions((Node*)qual_list.qual_list)) { + any_sublink = build_any_sublink(root, qual_list.qual_list, attnum,pullUpExprList); + push_quals = make_and_qual(push_quals, (Node*)any_sublink); + } + + list_free_ext(qual_list.qual_list); + } + } + + list_free_deep(pullUpExprList); + pfree_ext(all_quals_list); + + return push_quals; + } + ``` + + - **\(3\) Use the transform\_equal\_expr function to construct a subquery to be pulled up. \(Add a GROUP BY clause and delete related conditions.\) The code is as follows:** + + ``` + // Add GROUP BY and windowClasues for SubQuery. + if (isLimit) { + append_target_and_windowClause(root,subQuery,(Node*)copyObject(node), false); + } else { + append_target_and_group(root, subQuery, (Node*)copyObject(node)); + } + // Delete related conditions. + subQuery->jointree = (FromExpr*)replace_node_clause((Node*)subQuery->jointree, + (Node*)pullUpEqualExpr, + (Node*)constList, + RNC_RECURSE_AGGREF | RNC_COPY_NON_LEAF_NODES); + ``` + + - \(4\) Construct the conditions that need to be pulled up. The code is as follows: + + ``` + // Construct the conditions to be pulled up. + joinQual = make_and_qual((Node*)joinQual, (Node*)pullUpExpr); + ... + Return joinQual; + ``` + + - \(5\) Generate a join expression. The code is as follows: + + ``` + // Generate a join expression. + if (IsA(*currJoinLink, JoinExpr)) { + ((JoinExpr*)*currJoinLink)->quals = replace_node_clause(((JoinExpr*)*currJoinLink)->quals, + tmpExprQual, + makeBoolConst(true, false), + RNC_RECURSE_AGGREF | RNC_COPY_NON_LEAF_NODES); + + } else if (IsA(*currJoinLink, FromExpr)) { + ((FromExpr*)*currJoinLink)->quals = replace_node_clause(((FromExpr*)*currJoinLink)->quals, + tmpExprQual, + makeBoolConst(true, false), + RNC_RECURSE_AGGREF | RNC_COPY_NON_LEAF_NODES); + } + + rtr = (RangeTblRef *) makeNode(RangeTblRef); + rtr->rtindex = list_length(root->parse->rtable); + + // Construct the JoinExpr of the left join. + JoinExpr *result = NULL; + result = (JoinExpr *) makeNode(JoinExpr); + result->jointype = JOIN_LEFT; + result->quals = joinQual; + result->larg = *currJoinLink; + result->rarg = (Node *) rtr; + + // Add JoinExpr to rangetableentry. In subsequent processing, the left outer join can be converted to an inner join. + rte = addRangeTableEntryForJoin(NULL, + NIL, + result->jointype, + NIL, + result->alias, + true); + root->parse->rtable = lappend(root->parse->rtable, rte); + ``` + +## Statistics and Cost Estimation + +In different data distribution, the execution efficiency of the same query plan may be significantly different. Therefore, the impact of data distribution on the plan should also be considered during plan selection. Unlike common logical optimization, physical optimization builds plan optimization on data and improves performance by minimizing data operation costs. In terms of functions, the physical optimization of openGauss involves the following three key steps: + +\(1\) Data distribution generation: Mines data distribution from data tables and stores the data. + +\(2\) Plan cost estimation: Based on data distribution, a cost model is established to estimate the actual execution time of a plan. + +\(3\) Optimal plan selection: Based on the cost estimation, the system searches for the plan with the minimum cost from the candidate plans. + +First, introduce the concepts related to data distribution and the internal storage mode of the database. + +### 1. Data Distribution Storage + +The distribution of dataset _D_ consists of the frequencies of different values on _D_. Assume that _D_ is the projection data in the **Grade** column of Table 4. This column has three values 1, 2, and 3. For details about the frequency distribution, see Table 5. Here, the number of **Grade** values is referred to as number of distinct values \(NDV\). + +Table 4 Grade attribute distribution + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Sno

+

Name

+

Gender

+

Grade

+

001

+

Xiao Zhang

+

Male

+

1

+

002

+

Xiao Li

+

Male

+

2

+

003

+

Xiao Wang

+

Male

+

3

+

004

+

Xiao Zhou

+

Female

+

1

+

005

+

Xiao Chen

+

Female

+

1

+
+ + +Table 5 Grade frequency distribution + + + + + + + + + + + + + + +

Grade

+

1

+

2

+

3

+

Frequency

+

3

+

1

+

1

+
+ + +_D_ may relate to a plurality of attributes, and distribution of the plurality of attributes is referred to as joint distribution. The value space of the joint distribution may be very large. From the perspective of performance, the database does not store the joint distribution of _D_, but stores the attribute distribution of _D_ separately. For example, the database stores the frequency of \{Gender='Male'\} and \{Grade='1'\}, instead of \{Gender='Male', Grade='1'\}. This practice loses much of the information distributed on _D_. As will be seen in the subsequent section on selectivity and data distribution, openGauss will use prediction techniques to infer the joint distribution when the system requires it. Although, in some cases, the results of this speculation may differ significantly from the actual situation. + +The data structure of data distribution is especially critical to understanding how the database stores this information. Generally, a key-value \(KV\) pair is the most commonly used structure for describing distribution, where key indicates a value, and value indicates a frequency. However, when the NDV is large, the expansion of the key value causes low storage and read performance of the KV. To improve efficiency, the openGauss uses the "KV vector + histogram" to indicate the attribute distribution. + +**Logical structure of data distribution**: A high-frequency value frequency is stored by using a KV, and its storage structure is referred to as a most common value \(MCV\). A frequency other than the high-frequency value is described by using an equal-bin-count histogram \(EH\). In the implementation, the openGauss puts the k \(k = 100\) key values with the highest frequency into the MCV, and puts the other key values into the EH. + +It should be noted that the EH combines the frequencies of multiple values, which significantly improves the access efficiency but also blurs the distribution. However, as can be seen in the following sections, the high frequency value is more critical to the estimation of the plan cost than the low frequency value. Therefore, this hybrid strategy, which trades for high performance at the cost of losing the accuracy of low frequency values, is undoubtedly a fairly cost-effective approach. + +**Storage location of data distribution**: In openGauss, information such as the MCV and EH is stored in the PG\_STATISTIC system catalog. Table 6 describes the table definition. + +Table 6 Definitions of the PG\_STATISTIC system catalog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

starelid

+

staattnum

+

stanullfrac

+

stakind1

+

stanumbers1

+

stavalues1

+

Stakind2

+

...

+

0001

+

1

+

0

+

1

+

{0.2851, 0.1345}

+

{1, 2}

+

2

+
  

0001

+

2

+

0

+

1

+

{0.1955, 0.1741}

+

{mathematics, language}

+

2

+
  
+ + +One tuple in Table 6 stores statistics of one attribute. The following describes the attribute meanings of tuples. + +\(1\) The **starelid/staattnum** attribute indicates the table OID and attribute ID. + +\(2\) The attribute **stanullfrac** indicates the percentage of null values in the attribute. The value **0** indicates that the column has no null value. + +\(3\) The attribute group **\{stakind1, stanumbers1, stavalues1\}** forms a slot in the PG\_STATISTIC system catalog and stores information about a data structure type in Table 7. There are five slots in the PG\_STATISTIC system catalog. Generally, the first slot stores the MCV information, and the second slot stores the EH information. Take the MCV slot as an example. The **stakind1** attribute indicates that the slot type is MCV, and **1** is the enumerated value of STATISTIC\_KIND\_MCV. The **stanumbers1** and **stavalues1** attributes record the MCV content. The **stavalues1** attribute records the key value, and the **stanumbers1** attribute records the frequency corresponding to the key. In the preceding example, the frequency ratio for value **1** is 0.2851, and the frequency ratio for value **2** is 0.1345. + +Table 7 Description of PG\_STATISTIC + + + + + + + + + + + + + + + + + + + + + + +

Type

+

Description

+

STATISTIC_KIND_MCV

+

High-frequency values (MCVs): The values that appear most frequently in a column are sorted according to the occurrence frequency, and a corresponding frequency array is generated. In this way, you can know the high-frequency values in a column and the frequencies of these high-frequency values.

+

STATISTIC_KIND_HISTOGRAM

+

Histogram. The openGauss database uses an EH to describe the distribution of data in a column. High-frequency values are not displayed in the histogram, ensuring that the data distribution is relatively flat.

+

STATISTIC_KIND_CORRELATION

+

Correlation coefficient. The correlation coefficient records the correlation between the unsorted data distribution and the sorted data distribution in the current column. This value is used to estimate the cost during index scanning. Assume that the correlation between the unsorted data distribution and sorted data distribution in a column is 0, that is, the data is not correlated. In this case, the cost of index scanning is higher.

+

STATISTIC_KIND_MCELEM

+

High-frequency type values (MCVs), which is used for the array type or other types. The openGauss database provides the ts_typanalyze system function to generate statistics of this type.

+

STATISTIC_KIND_DECHIST

+

Array histogram, which is used to generate histograms for array types. The openGauss database provides the array_typanalyze system function to generate statistics of this type.

+
+ + +Note that data distribution and the content of the PG\_STATISTIC system catalog are not automatically generated when the table is created. They are generated when the ANALYZE operation is performed on the table. + +### 2. Data Distribution Extraction + +This section describes the logical structure and storage of data distribution in the openGauss. How can we obtain data distribution information from data? The following describes the distribution extraction process in the openGauss. To deepen the understanding of the method, let's begin with analyzing the challenges. + +The most direct way to obtain the distribution is to traverse all data and generate the MCV and EH information directly by counting. However, in practice, there may be a large amount of data, and the I/O cost of traversal is usually unacceptable. For example, the bill data of a bank involves hundreds of billions of records and is stored at the TB level. In addition to the I/O cost, the memory consumption of the counting process may exceed the upper limit, which makes the algorithm implementation especially difficult. Therefore, a more realistic approach is to reduce the scale of data analysis and use small sample analysis to estimate the overall data distribution. Then, the quality of selected samples is particularly important. + +Currently, the sample generation process of the openGauss database is implemented in the acquire\_sample\_rows function, which uses the two-phase sampling algorithm to estimate the data distribution. In the first phase, an S algorithm is used to randomly sample a physical page, to generate a sample S1. In the second phase, a Z \(Vitter\) algorithm is used to perform reservoir sampling on tuples included in S1, to finally generate a sample S2 including 30000 tuples. The two-phase algorithm ensures that S2 is an unbiased sample of the original data. Therefore, you can infer the original data distribution by analyzing S2 and record the distribution information in the PG\_STATISTIC system catalog. + +openGauss divides sample generation into two steps to improve sampling efficiency. A theoretical basis of the method depends on the following practical condition: A quantity _M_ of physical pages occupied by data can be accurately obtained, but a quantity _n_ of tuples included in each physical page is unknown. Because _M_ is known, the S algorithm may evenly sample a page by using a probability of 1/_M_, and may generate a small sample S1 of original data. Generally, a tuple belonging to any physical page is an equal probability event, which ensures that S1 is an unbiased sample. However, the total number of tuples in S1 is far less than that of original data, so costs of performing secondary sampling on S1 are greatly reduced. The main reason why the S algorithm is not used in the second phase is that the total number of tuples _N_ of S1 is unknown \(because _n_ is unknown\), and the sampling probability \(1/_N_\) cannot be obtained using the S algorithm. The Z \(Vitter\) algorithm is a reservoir sampling algorithm, which can ensure uniform sampling when the total amount of data is unknown. The principle of reservoir sampling algorithm is not the focus of this section. You can refer to related information by yourself. + +### 3. Selectivity and Data Distribution + +SQL query often contains the WHERE constraint \(filtering condition\), for example,** SELECT \* FROM student WHERE gender = 'male';** and **SELECT \* FROM student WHERE grade \> '1'**. What is the actual effect of the constraint on the query result? In order to measure the effectiveness of constraints, the concept of selectivity is first introduced. + +**Selectivity**: Given the query dataset _C_ \(which can be a data table or any intermediate result set\) and constraint expression _x_, the selectivity of _x_ relative to _C_ is defined as follows: + +![](figures/zh-cn_image_0000001256862067.gif) + +Where, |C| indicates the total number of records in _C_, and |C|x indicates the number of records that meet the _x_ constraint on _C_. As shown in Table 8, when _x_ is **"grade = 1"**, 3/5.![](C:figures/zh-cn_image_0000001212222114.gif) + +Table 8 Selectivity of dataset C + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Sno

+

Name

+

Gender

+

Grade

+

001

+

Xiao Zhang

+

Male

+

1

+

002

+

Xiao Li

+

Male

+

2

+

003

+

Xiao Wang

+

Male

+

3

+

004

+

Xiao Zhou

+

Female

+

1

+

005

+

Xiao Chen

+

Female

+

1

+
+ + +Data distribution of _C_ is denoted as π. It can be learned from the definition that selec\(x│C\) is actually a description of π according to semantic _x_. Data distribution helps calculate the selectivity so that the calculation process does not need to traverse the original data. In the cost estimation section, you will see that the selectivity plays a significant role in estimating the cost of a plan.![](figures/zh-cn_image_0000001212382082.gif) + +Based on this idea, this section will focus on the selectivity calculation in the openGauss. As the selectivity calculation under simple constraints is representative, this section will take simple constraints as an example for illustration. A simple constraint is defined as a non-range constraint that involves only a single attribute in the base table. + +For details about the calculation method of non-simple constraint selectivity, read the source code in this chapter. + +- Selectivity Calculation Under Simple Constraints + + Assume that _x_ is a simple constraint, and the attribute distribution information related to _x_ already exists in the tuple _r_ of the PG\_STATISTIC system catalog \(see the Data Distribution Storage section\). openGauss calls the clause\_selectivity function to convert the tuple _r_ to the selectivity based on the requirements of _x_. + + The second parameter **clause** of **clause\_selectivity** is constraint statement _x_. For different SQL queries, there may be multiple types of clauses for the input **clause\_selectivity**. Table 9 lists the typical types. + + Table 9 Simple constraint types + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Simple Constraint Type

+

Example

+

Var

+

SELECT * FROM PRODUCT WHERE ISSOLD;

+

Const

+

SELECT * FROM PRODUCT WHERE TRUE;

+

Param

+

SELECT * FROM PRODUCT WHERE $1;

+

OpExpr

+

SELECT * FROM PRODUCT WHERE PRIZE = '100';

+

AND

+

SELECT * FROM PRODUCT WHERE PRIZE = '100' AND TYPE = 'HAT';

+

OR

+

SELECT * FROM PRODUCT WHERE PRIZE = '100' OR TYPE = 'HAT';

+

NOT

+

SELECT * FROM PRODUCT WHERE NOT EXIST TYPE = 'HAT';

+

Other

+
  
+ + + \{Var, Const, Param, OpExpr\} are basic constraints and \{AND, OR, NOT\} are SET constraints. Obviously, the constraint \{Var, Const, Param\} can be considered as a special example of the OpExpr constraint. For example, **SELECT \* FROM PRODUCT WHERE ISSOLD** is equivalent to **SELECT \* FROM PRODUCT WHERE ISSOLD = TRUE**. Due to limitations of space, this section will describe the selectivity calculation based on the OpExpr constraint in detail, and briefly introduces the key logic of the selectivity calculation based on constraints of the SET type. + + \(1\) Selectivity calculation based on the OpExpr constraint + + The query statement **SELECT \* FROM PRODUCT WHERE PRIZE = '100'** is used as an example. The clause\_selectivity function finds the OpExpr branch based on the clause \(PRIZE = '100'\) type. Then it calls the treat\_as\_join\_clause function to determine whether the clause is a join constraint. If the result is false, the clause is a filter condition \(OP\). In this case, it calls the restriction\_selectivity function to estimate the selectivity of the **clause** parameter. The code is as follows: + + ``` + Selectivity + clause_selectivity(PlannerInfo *root, + Node *clause, + int varRelid, + JoinType jointype, + SpecialJoinInfo *sjinfo) + { + Selectivity s1 = 0.5;/* default for any unhandled clause type */ + RestrictInfo *rinfo = NULL; + + if (clause == NULL) /* can this still happen? */ + return s1; + if (IsA(clause, Var))... + else if (IsA(clause, Const))... + else if (IsA(clause, Param)) + + // Processing branch of the NOT clause + else if (not_clause(clause)) + { + /* inverse of the selectivity of the underlying clause */ + s1 = 1.0 - clause_selectivity(root, + (Node *) get_notclausearg((Expr *) clause), + varRelid, + jointype, + sjinfo); + } + + // Processing branch of the AND clause + else if (and_clause(clause)) + { + /* share code with clauselist_selectivity() */ + s1 = clauselist_selectivity(root, + ((BoolExpr *) clause)->args, + varRelid, + jointype, + sjinfo); + } + + // Processing branch of the OR clause + else if (or_clause(clause)) + { + ListCell *arg; + + s1 = 0.0; + foreach(arg, ((BoolExpr *) clause)->args) + { + Selectivity s2 = clause_selectivity(root, + (Node *) lfirst(arg), + varRelid, + jointype, + sjinfo); + + s1 = s1 + s2 - s1 * s2; + } + } + + // Processing branch of the join or OP clause + else if (is_opclause(clause) || IsA(clause, DistinctExpr)) + { + OpExpr *opclause = (OpExpr *) clause; + Oidopno = opclause->opno; + + // Process the join clause. + if (treat_as_join_clause(clause, rinfo, varRelid, sjinfo)) + { + /* Estimate selectivity for a join clause. */ + s1 = join_selectivity(root, opno, + opclause->args, + opclause->inputcollid, + jointype, + sjinfo); + } + + //Process the OP clause. + else + { + /* Estimate selectivity for a restriction clause. */ + s1 = restriction_selectivity(root, opno, + opclause->args, + opclause->inputcollid, + varRelid); + } + } + ... ... + return s1; + } + ``` + + The restriction\_selectivity function identifies that PRIZE = '100' is an equivalent constraint like Var = Const. It indirectly calls the var\_eq\_const function through the eqsel function to estimate the selectivity. In this process, the var\_eq\_const function reads the PRIZE column distribution information in the PG\_STATISTIC system catalog and attempts to use the MCV in the information to calculate the selectivity. The get\_attstatsslot function is preferentially called to check whether 100 exists in the MCV in the following cases: + + - Case 1: If yes, the proportion of '100' is directly returned from the MCV as the selectivity. + + - Case 2: If no, calculate the total proportion **sumcommon** of high frequency values and return \(1.0 – **sumcommon** – **nullfrac**\)/**otherdistinct** as the selectivity. **nullfrac** is the proportion of NULL values, and **otherdistinct** is the NDV of low frequency values. + + Because the constraint added for query is PRIZE < '100', the restriction\_selectivity function will call the scalargtsel function based on the operator type and attempt to calculate the selectivity using the information in the PG\_STATISTIC system catalog. The values that meet the condition <'100' may exist in the MCV and EH respectively. Therefore, values need to be collected in the two structures respectively. Compared with that in the MCV, the process of collecting the values that meet the conditions in the EH is more complex. Based on the order of keys in the EH, openGauss uses binary search to quickly search for values that meet the conditions, sums up the total proportion of the values, and records the sum as selec\_histogram. Note that the EH does not record the frequency of '100' separately. Instead, it combines '100' and adjacent values into a bucket \(recorded as bucket _B_\) and records only the total frequency \(_Fb_\) of the values in bucket _B_. To solve this problem, openGauss assumes that the frequencies of elements in the bucket are the same and uses the following formula: + + ![](../figures/zh-cn_image_0000001257142015.gif) + + To estimate the proportion of values that meet the conditions in _B_. The specific code of this process is implemented in the ineq\_histogram\_selectivity function. Finally, the selectivity value returned by the restriction\_selectivity function is **selec** = **selec\_mcv** + **selec\_histogram**, where **selec\_mcv** is the percentage of MCVs that meet the conditions. + + +- Selectivity calculation based on constraints of the SET type + + For a SET constraint, the clause\_selectivity function recursively calculates the selectivity of its basic constraints. The final selectivity is then returned in the manner listed in Table 10 according to the semantics of the SET type. + + Table 10 Selectivity of the SET type + + + + + + + + + + + + + + + + +

SET Type

+

Description

+

NOT

+

selec(B) = 1 –selec(A) {B = NOT A}

+

AND

+

{A AND B}

+

OR

+

{A OR B}

+
+ + + By refering to the data distribution storage section, you may find that openGauss does not store the multi-attribute joint distribution. As shown in Table 6-15, openGauss calculates the joint distribution based on the assumption that the values of different columns are independent of each other. In the scenario where columns are not independent, the prediction often has deviations. For example, for the student table, the gender is related to the major. Therefore, the number of students in a computer department cannot be calculated by multiplying the proportion of male students by the number of students in the department. However, independent assumptions can generally lead to accurate results. + +- \(3\) Default selectivity parameters + + When the data distribution is unknown, the selectivity cannot be estimated by using the conventional method. For example, the ANALYZE operation is not performed on the data table, or the filter condition is an uncertain parameter. To provide a proper reference value for the optimizer, openGauss provides a series of empirical parameters of the selectivity, as shown in Table 11. + + Table 11 Selectivity parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Variable

+

Value

+

Description

+

DEFAULT_EQ_SEL

+

0.005

+

Default selectivity for an equivalent constraint, for example, A = b.

+

DEFAULT_INEQ_SEL

+

0.3333333333333333

+

Default selectivity for an unequal constraint, for example, A < b.

+

DEFAULT_RANGE_INEQ_SEL

+

0.005

+

Default selectivity for range constraints involving the same attribute (column), for example, A > b AND A < c.

+

DEFAULT_MATCH_SEL

+

0.005

+

Default selectivity for constraints based on pattern matching, for example, LIKE.

+

DEFAULT_NUM_DISTINCT

+

200

+

Number of elements in the value range after an attribute is deduplicated (distinct). Generally, DEFAULT_EQ_SEL and DEFAULT_EQ_SEL are the reciprocal of each other.

+

DEFAULT_UNK_SEL

+

0.005

+

Default selectivity for constraints such as BoolTest or NullText, for example, IS TRUE or IS NULL.

+

DEFAULT_NOT_UNK_SEL

+

(1.0 - DEFAULT_UNK_SEL)

+

Default selectivity for constraints such as BoolTest or NullText, for example, IS NOT TRUE or IS NOT NULL.

+
+ + +### 4. Cost Estimation + +Query execution costs are classified into I/O costs and CPU costs. Both costs are positively correlated with the number of tuples processed during the query. Therefore, it is relatively accurate to estimate the total cost of the query plan by using the selectivity. However, due to the differences in hardware environments, the cost model of openGauss outputs only a common indicator for measuring the plan quality, not the execution time. To describe the measurement process, the following describes the I/O and CPU cost estimation methods from the perspective of cost model parameters. + +- \(1\) I/O cost estimation + + On disks, tuples are organized as data pages. Page access modes include sequential read and random read. Restricted by the performance of storage media, the efficiency of sequential read is much higher than that of random read. For example, when HDDs face a large number of random access requests, the head seek time occupies most of the data read time. In openGauss, the I/O costs in different access modes are as follows: + + ``` + DEFAULT_SEQ_PAGE_COST 1.0 + DEFAULT_RANDOM_PAGE_COST 4.0 + ``` + + By default, the ratio of sequential read overheads to random read overheads on data pages is set to 1:4. + + The setting is reasonable for HDDs. However, for SSDs with excellent addressing capabilities, this parameter needs to be adjusted based on the actual requirements. In practice, database deployment is complex, and a system may have multiple different storage media at the same time. To enable the cost model to cope with the I/O performance of different storage media, openGauss provides users with a method of setting the unit cost of file I/O. + + ``` + CREATE TABLESPACE TEST_SPC LOCATION '...' WITH (SEQ_PAGE_COST=2, RANDOM_PAGE_COST=3); + ``` + + According to the I/O cost parameter and the selectivity, the I/O overhead of the candidate plan can be easily estimated. The following uses sequential scan \(SeqScan\) and index scan \(IndexScan\) as examples to describe the cost estimation process. + + - 1. SeqScan: traverses table data from the beginning to the end. This is a sequential read. Therefore, the I/O cost of SeqScan is **DEFAULT\_SEQ\_PAGE\_COST** multiplied by the total number of data pages in the table. + + - 2. IndexScan: uses indexes to search for table data that meets constraints. This is a random read. Therefore, the I/O cost of IndexScan is x **DEFAULT\_RANDOM\_PAGE\_COST**. + + _P_ \(number of data pages that meet the constraint\) is positively correlated with _R_ \(number of tuples that meet the constraint\), and _R_ = Total number of tuples in the table x Selectivity. After openGauss calculates _R_, it invokes the index\_pages\_fetched\(R, ...\) function to estimate _P_. This function is implemented in the **costsize.c** file. For details, see the paper_ Index scans using a finite LRU buffer: A validated I/O model_ of Mackert L F and Lohman G M. + + By observing the cost model, we can find that when the selectivity exceeds a certain threshold, _P_ is relatively large, and the cost of IndexScan is higher than that of SeqScan. Therefore, the efficiency of IndexScan is not always higher than that of SeqScan. + + +- \(2\) CPU cost estimation + + The database consumes CPU resources in the data addressing and data processing phases, for example, tuple projection selection and index search. Obviously, for different operations, the cost of the CPU is different. openGauss divides the CPU cost into tuple processing cost and data operation cost. + + ① Tuple processing cost: cost of converting a piece of disk data into a tuple. For ordinary table data and index data, the cost parameters are as follows: + + ``` + #define DEFAULT_CPU_TUPLE_COST 0.01 + #define DEFAULT_CPU_INDEX_TUPLE_COST 0.005 + ``` + + Among the default parameters, the index cost is lower. This is because index data typically involves fewer columns than table data and requires less CPU resources. + + ② Data operation cost: cost of projecting a tuple or determining whether a tuple meets the condition based on the constraint expression. The cost parameters are as follows: + + ``` + #define DEFAULT_CPU_OPERATOR_COST 0.0025 + ``` + + Given the above parameters, the estimated CPU cost is proportional to the computation scale of the problem, which depends on the selectivity. This relationship is similar to the relationship between the complexity of the algorithm instance and _n_. Due to limited space, this section does not provide details. + +## + +## Physical Path + +In the database, paths are represented by the path structure. The path structure is derived from the node structure. The path structure is also a base structure, which is similar to the base class in C++. Each specific path is derived from the path structure. For example, the IndexPath structure used by the index scanning path is derived from the path structure. + +``` +typedef struct Path +{ +NodeTag type; +NodeTag pathtype; /* Path type, such as T_IndexPath and T_NestPath.*/ +RelOptInfo *parent; /* Intermediate result generated after the current path is executed.*/ +PathTarget *pathtarget; /* Projection of the path. The expression cost is also saved.*/ +/* Pay attention to the expression index.*/ +ParamPathInfo *param_info; /* Parameter used during execution. In the executor, subqueries or some special */ +/* joins need to obtain the current value of another table in real time.*/ +Bool parallel_aware; /* Parallel parameter, which is used to distinguish parallel and non-parallel.*/ +bool parallel_safe; /* Parallel parameter, which is determined by the set_rel_consider_parallel function.*/ +int parallel_workers; /* Parallel parameter, indicating the number of parallel threads.*/ +double rows; /* Estimated amount of data in the intermediate result generated during the execution of the current path.*/ +Cost startup_cost; /* Startup cost, that is, the cost from statement execution to obtaining the first result.*/ +Cost total_cost; /* Overall execution cost of the current path.*/ +List *pathkeys; /* Key value for sorting intermediate results generated in the current path. If the intermediate results are unordered, the value is NULL.*/ +} Path; +``` + +## Dynamic Programming + +Currently, openGauss has completed rule-based query rewriting and logical decomposition, and has generated the physical path of each base table. The physical path of the base table is only a small part of the optimizer planning. Now, openGauss will enter another important task of the optimizer, that is, generating the join path. openGauss uses the bottom-up optimization. For the multi-table join path, dynamic programming and genetic algorithm are used. This section mainly introduces dynamic programming. But if there are a large number of tables, genetic algorithm is required. Genetic algorithm can avoid the problem of space expansion during join path search in the case of too many tables. In common scenarios, dynamic programming is used, which is the default optimization method used by the openGauss. + +After logical decomposition and optimization, tables in the statement are flattened, that is, the original tree structure is changed to the flattened array structure. The join relationships between tables are also recorded in the **SpecialJoinInfo** structure in the **root** directory, which is the basis for dynamic join planning. + +### 1. Dynamic Programming Method + +First, the dynamic programming method is applicable to an optimal solution problem including a large quantity of repeated sub-problems. By memorizing the optimal solution to each sub-problem, same sub-problems are solved only once, and a record of solving the previous same sub-problem may be reused next time. As such, it is required that the optimal solutions to these sub-problems can form the optimal solution to the whole problem, that is, they should have the property of the optimal substructure. For statement join optimization, the optimal solution to an entire statement join is the optimal solution to a block of statement join. In a planning process, a local optimal solution cannot be repeatedly calculated, and the local optimal solution calculated last time is directly used. + +![](../figures/zh-cn_image_0000001257142943.jpg)![](../figures/zh-cn_image_0000001211903080.jpg) + +FIG. 1 Optimal solution to a repeated sub-problem + +For example, the join operation of A x B in two join trees in Figure 1 is a repeated sub-problem, because no matter whether the A x B x C x D join path or the A x B x C join path is generated, the A x B join path needs to be generated first. There may be hundreds of join methods for a path generated by multi-table join, that is, when many layers are stacked. The number of repeated sub-problems of these join trees is large. Therefore, the join tree has repeated sub-problems, which can be solved once and used for multiple times. That is, for the join A x B, the optimal solution needs to be generated only once. + +The code of the multi-table join dynamic programming algorithm starts from the make\_rel\_from\_joinlist function, as shown in Figure 2. + +![](../figures/zh-cn_image_0000001256862995.jpg) + +Figure 2 Multi-table join dynamic programming algorithm + +- 1)make\_rel\_from\_joinlist function + + The main entry of the implementation code of dynamic programming starts from the make\_rel\_from\_joinlist function. The input parameter of the make\_rel\_from\_joinlist function is the RangeTableRef linked list after the deconstruct\_jointree function is flattened. Each RangeTableRef represents a table. You can search for the RelOptInfo structure of the base table based on the linked list. The found RelOptInfo structure is used to construct a base table RelOptInfo structure at layer 1 of the dynamic programming algorithm, and "accumulation" continues to be performed at layer-1 RelOptInfo structure subsequently. The code is as follows: + + ``` + // Traverse the joinlist after leveling. The linked list is the linked list of RangeTableRef. + foreach(jl, joinlist) + { + Node *jlnode = (Node *) lfirst(jl); + RelOptInfo *thisrel; + + // In most cases, the RangeTableRef linked list is used. The subscript value (rtindex) stored in the RangeTableRef linked list is used. + // Search for the corresponding RelOptInfo structure. + if (IsA(jlnode, RangeTblRef)) + { + int varno = ((RangeTblRef *) jlnode)->rtindex; + thisrel = find_base_rel(root, varno); + } + // Affected by the from_collapse_limit and join_collapse_limit parameters, there are nodes that are not flattened. In this case, the make_rel_from_joinlist function is invoked recursively. + else if (IsA(jlnode, List)) + thisrel = make_rel_from_joinlist(root, (List *) jlnode); + else + ereport (......); + + // The first initial linked list is generated, that is, the linked list of the base table. + // This linked list is the basis of the dynamic programming method. + initial_rels = lappend(initial_rels, thisrel); + } + ``` + + +- 2)standard\_join\_search function + + In the dynamic programming method, a table is added to each layer in the process of accumulating tables. When all tables are added, the final join tree is generated. Therefore, the number of accumulated layers is the number of tables. If there are _N_ tables, data needs to be accumulated for _N_ times. The accumulation process at each layer is described in the join\_search\_one\_level function. This function is mainly used to prepare for the accumulation join, including allocating memory space occupied by RelOptInfos at each layer and reserving some information after RelOptInfos at each layer are accumulated. + + Create a "join array", which is similar to a structure of \[LIST1, LIST2, LIST3\], where a linked list in the array is used to store all RelOptInfo structures of a layer in the dynamic programming method. For example, the first linked list in the array stores linked lists related to all base table paths. The code is as follows: + + ``` + // Allocate the RelOptInfo linked lists of all layers during accumulation. + root->join_rel_level = (List**)palloc0((levels_needed + 1) * sizeof(List*)); + // Initialize all layer-1 base table RelOptInfos. + root->join_rel_level[1] = initial_rels; + After completing the initialization, you can start trying to build RelOptInfo for each layer. The code is as follows: + for (lev = 2; lev <= levels_needed; lev++) { + ListCell* lc = NULL; + // Generate all RelOptInfo structures of the corresponding layer in the join_search_one_level function. + join_search_one_level(root, lev); + + ... + } + ``` + + +- 3)join\_search\_one\_level function + + The join\_search\_one\_level function is mainly used to generate all RelOptInfos in one layer, as shown in Figure 3. To generate RelOptInfo of the _N_th layer, there are mainly three manners: one is to attempt to generate a left-deep tree and a right-deep tree, one is to attempt to generate a bushy tree, and the other is to attempt to generate a join path of a Cartesian product \(commonly referred to as a traversal attempt\). + + ![](../figures/43.png) + + Figure 3 Manners of generating RelOptInfo of the Nth layer + + - \(1\) Left-deep tree and right-deep tree + + The generation principle of the left-deep tree is the same as that of the right-deep tree, except that the positions of the two RelOptInfos to be joined are exchanged in the make\_join\_rel function. That is, each RelOptInfo has a chance to be used as an inner table or an outer table. In this way, more joins may be created to help generate the optimal path. + + As shown in Figure 4, two RelOptInfos to be selected need to be joined to generate A x B x C, and the left-deep tree is to exchange positions of AxB and C. A x B is used as an inner table to form a left-deep tree, and A x B is used as an outer table to form a right-deep tree. + + ![](../figures/44.png) + + Figure 4 Schematic diagram of a left-deep tree and a right-deep tree + + The code is as follows: + + ``` + // Traverse the upper layer of the current layer, that is, to generate RelOptInfo of layer 4, + // try to join RelOptInfo at layer 3 and the base table at layer 1. + foreach(r, joinrels[level - 1]) + { + RelOptInfo *old_rel = (RelOptInfo *) lfirst(r); + // If there is a join relationship or join sequence restriction between two RelOptInfos, + // a join is preferentially generated for the two RelOptInfos. + // The has_join_restriction function may make a misjudgment. However, more refined filtering will be provided in the future. + if (old_rel->joininfo != NIL || old_rel->has_eclass_joins || + has_join_restriction(root, old_rel)) + { + ListCell *other_rels; + // To generate RelOptInfo of the Nth layer, RelOptInfo of the (N – 1)th layer needs to be joined with the base table set of the first layer. + // That is, if the RelOptInfo of layer 2 needs to be generated, the RelOptInfo of layer 1 and the base table set of layer 1 are joined. + // Therefore, processing is required when tables at layer 2 are generated from base tables to prevent itself from being joined with itself. + if (level == 2) + other_rels = lnext(r); + else + other_rels = list_head(joinrels[1]); + // old_rel "may" have join constraints or join sequence restrictions with other tables. + // other_rels "may" contain the possible tables. The make_rels_clause_joins function performs accurate judgment. + make_rels_by_clause_joins(root, old_rel, other_rels); + } + else + { + // Try to generate a join path for tables without join relationships or tables with join sequence restrictions. + make_rels_by_clauseless_joins(root, old_rel, list_head(joinrels[1])); + } + } + ``` + + - \(2\) Bushy tree + + To generate RelOptInfo of the _N_th layer, the left-deep tree or the right-deep tree joins RelOptInfo of the \(N – 1\)th layer with the base table of the first layer. Regardless of the left-deep tree or the right-deep tree, in essence, it constructs the current-layer RelOptInfo by referencing the base table RelOptInfo. A bushy tree is generated without using the base table. The bushy tree attempts to randomly join RelOptInfos of all layers. For example, RelOptInfo of the \(_N_ – 2\)th layer and that of the second layer are joined, and circumstances with \(2,_N_ – 2\), \(3,_N_ – 3\), \(4,_N_ – 4\), and more may be deduced in sequence. The establishment of a bushy tree must meet two conditions: One is that two RelOptInfos have a related constraint or a restriction on the join sequence, and the other is that two RelOptInfos cannot have an intersection table. + + ``` + for (k = 2;; k++) + { + int other_level = level - k; + foreach(r, joinrels[k]) + { + // There are join constraints or join sequence restrictions. + if (old_rel->joininfo == NIL && !old_rel->has_eclass_joins && + !has_join_restriction(root, old_rel)) + continue; + ... + for_each_cell(r2, other_rels) + { + RelOptInfo *new_rel = (RelOptInfo *) lfirst(r2); + // No intersection is allowed. + if (!bms_overlap(old_rel->relids, new_rel->relids)) + { + // There are related join constraints or restrictions on the join sequence. + if (have_relevant_joinclause(root, old_rel, new_rel) || + have_join_order_restriction(root, old_rel, new_rel)) + { + (void) make_join_rel(root, old_rel, new_rel); + } + } + } + } + } + ``` + + - \(3\) Cartesian product + + After trying the left-deep tree, right-deep tree, and bushy tree, if no legal join is generated, a final attempt needs to be made on RelOptInfos of layer _N_ – 1 and layer 1. That is, an attempt is made to join each RelOptInfo at the \(_N_ – 1\)th layer with RelOptInfo at the first layer. + + + +### 2. Path Generation + +We have learned the dynamic programming method used in path generation, and how to generate RelOptInfo for the current layer during the accumulation process in the previous section. For generating RelOptInfo of the current layer, several problems may be faced: one is to determine whether two RelOptInfos can be joined, and the other is to generate a physical join path. Currently, a physical join path mainly has three implementations: NestLoopJoin, HashJoin, and MergeJoin. A process of establishing the join path is a process of continuously attempting to generate the three paths. + +- Perform check + + In the dynamic programming method, each RelOptInfo of the _N_ – 1 layer and each RelOptInfo of the first layer need to be joined, and then the RelOptInfo of the new join is stored in the current _N_th layer. The time complexity of the algorithm is about O \(M x N\). If there are a relatively large quantity of RelOptInfos at both the \(_N_ – 1\)th layer and the first layer, the search space expands greatly. However, some RelOptInfos can be avoided during join. This is also the purpose of timely check. Detecting and skipping the join between two RelOptInfos in advance can save unnecessary overheads and improve the optimization efficiency of the optimizer. + + - \(1\) Preliminary check + + The following conditions are the main factors to be measured in the preliminary check: + + - The value of **joininfo** in RelOptinfo is not **NULL**. This indicates that the RelOptInfo has related constraints with other RelOptInfos. That is, the current RelOptInfo may be associated with other tables. + + - The value of **has\_eclass\_joins** in RelOptInfo is **true**, indicating that the current RelOptInfo and other RelOptInfos may have equivalent join conditions in the equivalence class record. + + - The return value of the has\_join\_restriction function is **true**, indicating that the join sequence between the current RelOptInfo and other RelOptInfos is limited. + + The preliminary check is to use the RelOptInfo information to determine the possibility, that is, to check whether there are join conditions and join sequence constraints. + + ``` + static bool has_join_restriction(PlannerInfo* root, RelOptInfo* rel) + { + ListCell* l = NULL; + + // If the current RelOptInfo involves Lateral semantics, there must be join sequence constraints. + foreach(l, root->lateral_info_list) + { + LateralJoinInfo *ljinfo = (LateralJoinInfo *) lfirst(l); + + if (bms_is_member(ljinfo->lateral_rhs, rel->relids) || + bms_overlap(ljinfo->lateral_lhs, rel->relids)) + return true; + } + + // Process only the conditions except the inner join. + foreach (l, root->join_info_list) { + SpecialJoinInfo* sjinfo = (SpecialJoinInfo*)lfirst(l); + + // Skip the full-join check. Other mechanisms are used to ensure the join sequence. + if (sjinfo->jointype == JOIN_FULL) + continue; + + // If the SpecialJoinInfo has been included in the RelOptInfo, skip this step. + if (bms_is_subset(sjinfo->min_lefthand, rel->relids) && + bms_is_subset(sjinfo->min_righthand, rel->relids)) + continue; + + //If the relids and min_lefthand or min_righthand variables of the RelOptInfo structure overlap, there may be constraints on the join sequence. + if (bms_overlap(sjinfo->min_lefthand, rel->relids) || + bms_overlap(sjinfo->min_righthand, rel->relids)) + return true; + } + + return false; + } + ``` + + - \(2\) Precise check + + After the preliminary check, if it is determined that there is no join condition or join sequence constraint on the RelOptInfos on both sides, the make\_rels\_by\_clauseless\_joins function is entered, and all possible paths in the RelOptInfo are joined with the RelOptInfo at layer 1. If the current RelOptInfo may have join constraints or join sequence restrictions, the make\_rel\_by\_clause\_joins function is invoked to further check the current RelOptInfo and other RelOptInfo at layer 1 to determine whether the join can be performed. + + The have\_join\_order\_restriction function determines whether there are join sequence restrictions on two RelOptInfos from the following two aspects: One is to determine whether the two RelOptInfos have a Lateral semantic sequence restriction, and the other is to determine whether min\_lefthand and min\_righthand in SpecialJoinInfo have a join sequence restriction on the two RelOptInfos. + + The analysis of the have\_join\_order\_restriction source code is as follows: + + ``` + bool have_join_order_restriction(PlannerInfo* root, RelOptInfo* rel1, RelOptInfo* rel2) + { + bool result = false; + ListCell* l = NULL; + + // If the dependency relationship with Lateral semantics exists, the join sequence must be restricted. + foreach(l, root->lateral_info_list) + { + LateralJoinInfo *ljinfo = (LateralJoinInfo *) lfirst(l); + + if (bms_is_member(ljinfo->lateral_rhs, rel2->relids) && + bms_overlap(ljinfo->lateral_lhs, rel1->relids)) + return true; + if (bms_is_member(ljinfo->lateral_rhs, rel1->relids) && + bms_overlap(ljinfo->lateral_lhs, rel2->relids)) + return true; + } + + // Traverse all SpecialJoinInfo in the root directory and check whether the two RelOptInfos have join constraints. + foreach (l, root->join_info_list) { + SpecialJoinInfo* sjinfo = (SpecialJoinInfo*)lfirst(l); + + if (sjinfo->jointype == JOIN_FULL) + continue; + + // The minimum set is a subset of the two tables. The two tables must be joined in the specified sequence. + if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) && + bms_is_subset(sjinfo->min_righthand, rel2->relids)) { + result = true; + break; + } + // Conversely, the minimum set is a subset of the two tables. The two tables must be joined in the specified sequence. + if (bms_is_subset(sjinfo->min_lefthand, rel2->relids) && + bms_is_subset(sjinfo->min_righthand, rel1->relids)) { + result = true; + break; + } + + // If both tables have intersection with one end of the minimum set, the two tables should be joined at the end. + // So let them join first. + if (bms_overlap(sjinfo->min_righthand, rel1->relids) && bms_overlap(sjinfo->min_righthand, rel2->relids)) { + result = true; + break; + } + // The reverse is the same as above. + if (bms_overlap(sjinfo->min_lefthand, rel1->relids) && bms_overlap(sjinfo->min_lefthand, rel2->relids)) { + result = true; + break; + } + } + + // If the two tables have corresponding join relationships with other tables, + // you can join them with the tables with join relationships first. + if (result) { + if (has_legal_joinclause(root, rel1) || has_legal_joinclause(root, rel2)) + result = false; + } + + return result; + } + ``` + + - \(3\) Legal join + + As RelOptInfo causes search space expansion, if the legal join check is performed on the two RelOptInfo structures, the search time is too long. This is why the preliminary check and accurate check need to be performed in advance. The search time can be reduced to achieve the pruning effect. + + For legal joins, the main code is in join\_is\_legal, which is used to determine whether two RelOptInfo structures can be joined to generate a physical path. The input parameters are the two RelOpInfo structures. The logical join between two RelOptInfo structures to be selected may be InnerJoin, LeftJoin, or SemiJoin, or no legal logical join exists. In this case, you need to determine the join in two steps. + + Step 1: Traverse SpecialJoinInfo in the join\_info\_list linked list in the **root** directory to check whether a legal SpecialJoinInfo can be found. A corresponding SpecialJoinInfo is generated for each logical join relationship except InnerJoin. In addition, the legal join sequence is recorded in SpecialJoinInfo. + + Step 2: Check the Lateral relationship in RelOptInfo and check whether the found SpecialJoinInfo meets the join sequence requirement specified by the Lateral semantics. + + +- Create a join path + + So far, two RelOptInfo structures that meet the condition have been filtered out. The next step is to establish a physical join relationship for paths in the two RelOptInfo structures. Common physical join paths include NestLoop, MergeJoin, and HashJoin, which are implemented by using the sort\_inner\_and\_outer, match\_unsorted\_outer, and hash\_inner\_and\_outer functions. + + For example, the sort\_inner\_and\_outer function is used to generate the MergeJoin path. It is assumed that the paths of the inner and outer tables are unordered. Therefore, the paths must be sorted explicitly. The path with the lowest total cost is selected for the inner and outer tables. The matvh\_unsorted\_outer function indicates that the outer table is sorted. In this case, you only need to sort the inner table to generate the MergeJoin path, NestLoop, or parameterized path. The final choice is to set up a HashJoin path to join the two tables, that is, to set up a hash table. + + To facilitate the creation of MergeJoin, constraints need to be processed first. Therefore, the constraints applicable to MergeJoin are filtered out \(select\_mergejoin\_clauses function\). In this way, the Mergejoinable join constraint can be used in both the sort\_inner\_and\_outer and match\_unsorted\_outer functions. The code is as follows: + + ``` + // Extract the conditions for MergeJoin. + foreach (l, restrictlist) { + RestrictInfo* restrictinfo = (RestrictInfo*)lfirst(l); + + // If the current join is an outer join and is a filter condition, ignore it. + if (isouterjoin && restrictinfo->is_pushed_down) + continue; + + // Preliminarily determine whether the join constraint can be used for MergeJoin. + // restrictinfo->can_join and restrictinfo->mergeopfamilies are generated in distribute_qual_to_rels. + if (!restrictinfo->can_join || restrictinfo->mergeopfamilies == NIL) { + // Ignore FULL JOIN ON FALSE. + if (!restrictinfo->clause || !IsA(restrictinfo->clause, Const)) + have_nonmergeable_joinclause = true; + continue; /* not mergejoinable */ + } + + // Check whether the constraint is in the form of outer op inner or inner op outer. + if (!clause_sides_match_join(restrictinfo, outerrel, innerrel)) { + have_nonmergeable_joinclause = true; + continue; /* no good for these input relations */ + } + + // Update and use the final equivalence class. + // Normalize pathkeys so that constraints can match pathkeys. + update_mergeclause_eclasses(root, restrictinfo); + + if (EC_MUST_BE_REDUNDANT(restrictinfo->left_ec) || EC_MUST_BE_REDUNDANT(restrictinfo->right_ec)) { + have_nonmergeable_joinclause = true; + continue; /* can't handle redundant eclasses */ + } + + result_list = lappend(result_list, restrictinfo); + } + ``` + + - \(1\) sort\_inner\_and\_outer function + + The sort\_inner\_and\_outer function is mainly used to generate a MergeJoin path. It needs to explicitly sort the two child RelOptInfo structures, and only the cheapest\_total\_path function in the child RelOptInfo needs to be considered. Generate pathkeys by using the join constraint of MergeJoinable \(which can be used to generate Merge Join\), and then continuously adjust the sequence of pathkeys in pathkeys to obtain different pathkeys. Then, the innerkeys of the inner table and outerkeys of the outer table are determined based on the pathkeys in different sequences. The code is as follows: + + ``` + // Try to join and traverse each path in the outer table and inner table. + foreach (lc1, outerrel->cheapest_total_path) { + Path* outer_path_orig = (Path*)lfirst(lc1); + Path* outer_path = NULL; + j = 0; + foreach (lc2, innerrel->cheapest_total_path) { + Path* inner_path = (Path*)lfirst(lc2); + outer_path = outer_path_orig; + + // The parameterized path cannot be used to generate the MergeJoin path. + if (PATH_PARAM_BY_REL(outer_path, innerrel) || + PATH_PARAM_BY_REL(inner_path, outerrel)) + return; + + // The lowest-cost path of the outer table and inner table must be met. + if (outer_path != linitial(outerrel->cheapest_total_path) && + inner_path != linitial(innerrel->cheapest_total_path)) { + if (!join_used[(i - 1) * num_inner + j - 1]) { + j++; + continue; + } + } + + // Generate a unique path. + jointype = save_jointype; + if (jointype == JOIN_UNIQUE_OUTER) { + outer_path = (Path*)create_unique_path(root, outerrel, outer_path, sjinfo); + jointype = JOIN_INNER; + } else if (jointype == JOIN_UNIQUE_INNER) { + inner_path = (Path*)create_unique_path(root, innerrel, inner_path, sjinfo); + jointype = JOIN_INNER; + } + // Determine the pathkeys set that can be generated by the MergeJoin path based on the extracted conditions. + all_pathkeys = select_outer_pathkeys_for_merge(root, mergeclause_list, joinrel); + // Process each pathkey in the preceding pathkeys and try to generate a MergeJoin path. + foreach (l, all_pathkeys) { + ... + // Generate the pathkey of the inner table. + innerkeys = make_inner_pathkeys_for_merge(root, cur_mergeclauses, outerkeys); + + // Generate the pathkey of the outer table. + merge_pathkeys = build_join_pathkeys(root, joinrel, jointype, outerkeys); + + // Generate the MergeJoin path based on the pathkey and inner and outer table paths. + try_mergejoin_path(root, ......, innerkeys); + } + j++; + } + i++; + } + ``` + + - \(2\) match\_unsorted\_outer function + + The overall code roadmap of the match\_unsorted\_outer function is similar to that of the sort\_inner\_and\_outer function, except that the sort\_inner\_and\_outer function infers the pathkeys of the inner and outer tables based on conditions. In the match\_unsorted\_outer function, the outer table path is assumed to be ordered. It sorts the join constraints based on the pathkey of the outer table. That is, the pathkeys of the outer table can be used as outerkeys, so as to check which join constraint matches the current pathkeys, filter the matched join constraint, and generate the innerkeys that needs to be displayed and sorted based on the matched join constraint. + + - \(3\) hash\_inner\_and\_outer function + + The hash\_inner\_and\_outer function is used to create a HashJoin path. The distribute\_restrictinfo\_to\_rels function has determined whether a constraint is applicable to HashJoin. To create a hash table, HashJoin can be used only when at least one join constraint applicable to HashJoin exists. Otherwise, the hash table cannot be created. + + +- Filter paths + + So far, the physical join paths Hashjoin, NestLoop, and MergeJoin are generated. You need to determine whether a path is worth storage according to the cost calculated during the generation process, because many paths are generated in the path join phase. In addition, some obviously poor paths are generated. In this case, filtering can help you perform a basic check and save the time for generating the plan. Taking a long time to generate a plan is unacceptable, even if it is a "good" execution plan. + + add\_path is the main function for filtering paths. The code is as follows: + + ``` + switch (costcmp) { + case COSTS_EQUAL: + outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path), + PATH_REQ_OUTER(old_path)); + if (keyscmp == PATHKEYS_BETTER1) { + if ((outercmp == BMS_EQUAL || outercmp == BMS_SUBSET1) && + new_path->rows <= old_path->rows) + // The cost of the new path is similar to that of the old path, while the pathkeys is longer and fewer parameters are required. + // The number of rows in the result set is small. Therefore, the new path is accepted and the old path is discarded. + remove_old = true; /* new dominates old */ + } else if (keyscmp == PATHKEYS_BETTER2) { + if ((outercmp == BMS_EQUAL || outercmp == BMS_SUBSET2) && + new_path->rows >= old_path->rows) + // The cost of the new path is similar to that of the old path, while the pathkeys is shorter and more parameters are required. + // If the result set contains more rows, the new path is not accepted and the old path is retained. + accept_new = false; /* old dominates new */ + } else { + if (outercmp == BMS_EQUAL) { + // The cost, pathkeys, and path parameters of the new and old paths are the same or similar. + // If the number of rows returned by the new path is small, the new path is accepted and the old path is discarded. + if (new_path->rows < old_path->rows) + remove_old = true; /* new dominates old */ + // If the number of rows returned by the new path is large, the new path is not accepted and the old path is retained. + else if (new_path->rows > old_path->rows) + accept_new = false; /* old dominates new */ + // The cost, pathkeys, path parameters, and number of rows in the result sets are similar. + // The range for determining the cost is strictly specified. If the new path is good, the new path is used and the old path is discarded. + else { + small_fuzzy_factor_is_used = true; + if (compare_path_costs_fuzzily(new_path, old_path, SMALL_FUZZY_FACTOR) == + COSTS_BETTER1) + remove_old = true; /* new dominates old */ + else + accept_new = false; /* old equals or + * dominates new */ + } + // If the cost and pathkeys are similar, compare the number of rows and parameters. If the number of rows and parameters of the new path is better than those of the old path, discard the old path; if the number of rows and parameters of the old path is better than those of the new path, discard the new path. + } else if (outercmp == BMS_SUBSET1 && + new_path->rows <= old_path->rows) + remove_old = true; /* new dominates old */ + else if (outercmp == BMS_SUBSET2 && + new_path->rows >= old_path->rows) + accept_new = false; /* old dominates new */ + /* else different parameterizations, keep both */ + } + break; + case COSTS_BETTER1: + // Based on all the comparison results of the new and old paths, it is determined that the new path is better than or equal to the old path. + // Therefore, the new path is accepted and the old path is discarded. + if (keyscmp != PATHKEYS_BETTER2) { + outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path), + PATH_REQ_OUTER(old_path)); + if ((outercmp == BMS_EQUAL || outercmp == BMS_SUBSET1) && + new_path->rows <= old_path->rows) + remove_old = true; /* new dominates old */ + } + break; + case COSTS_BETTER2: + // Based on all the comparison results of the new and old paths, it is determined that the old path is better than the old path. + //The new path is not accepted and the old path is retained. + if (keyscmp != PATHKEYS_BETTER1) { + outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path), + PATH_REQ_OUTER(old_path)); + if ((outercmp == BMS_EQUAL || outercmp == BMS_SUBSET2) && + new_path->rows >= old_path->rows) + accept_new = false; /* old dominates new */ + } + break; + default: + + /* + * can't get here, but keep this case to keep compiler + * quiet + */ + break; + } + ``` + +## Genetic Algorithm + +As a kind of evolutionary algorithm, the genetic algorithm draws on natural selection and genetic mechanism in the Darwinian theory of biological evolution. The optimal individual is generated by simulating the evolution process of natural selection and survival of the fittest. + +After a specific quantity of original individuals are generated, a new chromosome may be generated by means of gene arrangement and combination, and then a next-generation chromosome is obtained by means of chromosome hybridization and mutation. To select an excellent chromosome, a fitness function needs to be established to calculate a fitness value, so as to eliminate chromosomes with low fitness. In this way, the best individual is gradually evolved through constant inheritance and mutation among individuals. The individual is the solution of the problem by substituting this process into the solution. By genetic algorithm, the solution of the problem can converge to the optimal solution through this kind of intergenerational inheritance. + +Different from a method in which dynamic programming resolves a problem into several independent sub-problems, the genetic algorithm is a selection process. The genetic algorithm enlarges a solution space by using a method of constructing a new chromosome by means of chromosome hybridization, and performs screening in the solution space at any time by using a fitness function, to recommend a good gene and eliminate bad genes. As a result, the solution obtained by genetic algorithm is not necessarily the global optimal solution like dynamic programming, but it can be close to the global optimal solution as much as possible by improving hybridization and mutation. + +Thanks to the efficiency advantage in multi-table join, genetic algorithm is a useful supplement to the dynamic programming method in openGauss database. The genetic algorithm is used only when the Enable\_geqo parameter is enabled and the number of RelOptInfo structures to be joined exceeds Geqo\_threshold \(12 by default\). + +The genetic algorithm is implemented in the following five steps: + +\(1\) Pool initialization: A gene is encoded, and a plurality of chromosomes is generated by randomly arranging and combining the genes. These chromosomes form a new pool. In addition, fitness of the chromosome is calculated in the chro**mosome generation process.** + +\(2\) Chromosome selection: A chromosome used for crossover and mutation is selected through random selection \(actually, a probability-based random number generation algorithm is used, so that an excellent chromosome can be selected\). + +\(3\) Crossover: Chromosomes are crossed over, to generate a new chromosome and add the new chromosome to the pool. + +\(4\) Mutation: A mutation operation is performed on chromosomes, to generate a new chromosome and add the new chromosome to the pool. + +\(5\) Fitness calculation: Eliminates bad chromosomes. + +For example, if the genetic algorithm is used to resolve a travelling salesman problem \(TSP\), cities may be used as genes, a path traveling through each city is used as a chromosome, a total length of the paths is used as fitness, and the fitness function is responsible for screening out a relatively long path and retaining a relatively short path. The algorithm procedure is as follows: + +\(1\) Pool initialization: Cities are numbered, and the cities are arranged and grouped according to the numbers, to generate multiple new paths \(chromosomes\). Then, an overall path length \(fitness\) is calculated according to a distance between the cities, and the multiple new paths form a pool. + +\(2\) Chromosome selection: Two paths are selected for crossover \(it should be noted that a city cannot repeatedly appear in a new chromosome generated through crossover\), and a path length is calculated for a new path generated through the crossover operation. + +\(3\) Mutation: A chromosome is randomly selected for mutation \(a common method is to exchange locations of cities in a path\), and a path length is calculated for a new path obtained after the mutation operation. + +\(4\) Fitness calculation: All paths in the pool are sorted in ascending order based on the path length, and paths ranked at the bottom are eliminated. + +The genetic algorithm of the openGauss database simulates the method of solving the TSP. RelOptInfo is used as a gene, the finally generated join tree is used as a chromosome, the total cost of the join tree is used as fitness, and the fitness function is used for filtering based on the cost of the path; but the join path search in the openGauss database is slightly different from the path search for the TSP. For the TSP, the paths have no connection problems. The two cities are connected and the distance between any two cities can be calculated. Due to the restriction of the join constraints in the database, the two tables cannot be joined, or the join tree cannot be generated. In addition, it should be noted that the implementation of genetic algorithm in the openGauss database is slightly different from that of a common genetic algorithm, and the genetic algorithm in the openGauss database does not have a mutation process, and generates a new chromosome only by means of crossover. + +The general entry of genetic algorithm in the openGauss database is the geqo function. The input parameters are **root** \(querying the optimized context information\), **number\_of\_rels** \(number of RelOptInfo structures to be joined\), and **initial\_rels** \(all base tables\). + +### File Directory Structure + +As a relatively independent optimizer module, genetic algorithm has its own file directory structure, as shown in Table 6-17. + +Table 6-17 Optimizer file directory structure + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Document

+

Function

+

geqo_copy.cpp

+

Gene copy function, that is, the gepo_copy function.

+

geqo_cx.cpp

+

Cycle crossover (cx) algorithm function.

+

geqo_erx.cpp

+

Implemented based on EGDE RECOMBINATION CROSSOVER and provides the gimme_edge_table function.

+

geqo_eval.cpp

+

Calculates the fitness and calls the make_one_rel function to generate the join relationship.

+

geqo_main.cpp

+

Genetic algorithm entry, that is, the main function geqo.

+

geqo_misc.cpp

+

Genetic algorithm information printing function, which is an auxiliary function.

+

geqo_mutation.cpp

+

Gene mutation function, that is, the geqo_mutation function, which is called when the cx function fails.

+

geqo_ox1.cpp

+

Order crossover algorithm mode 1 (ox1) function.

+

geqo_ox2.cpp

+

Order crossover algorithm mode 2 (ox2) function.

+

geqo_pmx.cpp

+

Partially matched crossover (PMX) function.

+

geqo_pool.cpp

+

A gene pool that processes the genetic algorithm. A gene pool is a collection of all individuals (including chromosomes and new chromosomes obtained after multiple tables are joined).

+

geqo_px.cpp

+

Position crossover (px) algorithm function.

+

geqo_random.cpp

+

Random algorithm function of the genetic algorithm, which is used to randomly generate mutation content.

+

geqo_recombination.cpp

+

Recombination algorithm of the genetic algorithm, that is, the init_tour function.

+

geqo_selection.cpp

+

Random individual selection function of the genetic algorithm, that is, the geqo_selection function.

+
+ + +These files are stored in **src/gausskernel/optimizer/gepo** as modules of the optimizer genetic algorithm. We will interpret the code in these files in later sections. + +### Pool Initialization + +Before using the genetic algorithm, you can use the value of **Gepo\_threshold** to adjust the triggering condition. To facilitate code interpretation, the threshold condition is reduced to 4 \(that is, the genetic algorithm is used when the number of RelOptInfo structures or base tables is 4\). In the following code interpretation process, four tables t1, t2, t3, and t4 are used as examples for description. + +As a gene of the genetic algorithm, RelOptInfo needs to be encoded first. The openGauss database uses a real number encoding manner, that is, \{1,2,3,4\} is used to represent four tables t1, t2, t3, and t4, respectively. + +Then, the size of a pool is obtained by using the gimme\_pool\_size function. The size of the pool is affected by two parameters: **Geqo\_pool\_size** and **Geqo\_effort**. The pool is represented by using a Pool structure, and the chromosome is represented by using a Chromosome structure. The code is as follows: + +``` +/* Chromosome structure*/ +typedef struct Chromosome { +/* string is an integer array, which represents a sorting mode of genes and corresponds to a join tree.*/ +/* For example, {1,2,3,4} corresponds to t1 JOIN t2 JOIN t3 JOIN t4. */ +/* For example, {2,3,1,4} corresponds to t2 JOIN t3 JOIN t1 JOIN t4. */ +Gene* string; +Cost worth; /* Fitness of a chromosome, which is actually a path cost. */ +} Chromosome; + +/* Pool structure */ +typedef struct Pool { +Chromosome *data; /* Chromosome array. Each tuple in the array is a join tree.*/ +int size; /* Number of chromosomes, that is, number of join trees in data, generated by gimme_pool_size.*/ +int string_length; /* A quantity of genes in each chromosome is the same as a quantity of genes in the base table.*/ +} Pool; +``` + +In addition, a quantity of times of chromosome crossover is obtained by using the gimme\_number\_generations function. A larger quantity of times of chromosome crossover indicates that more new chromosomes are generated, and a better solution is more likely to be found. However, the larger quantity of times of chromosome crossover also affects performance. You can adjust the quantity of times of crossover by setting the **Geqo\_generations** parameter. + +The variables in the structure are as follows: + +\(1\) A quantity \(Pool.size\) of chromosomes that are determined by using gimme\_pool\_size. + +\(2\) A quantity \(Pool.string\_length\) of genes in each chromosome, which is the same as the quantity of base tables. + +Then, a chromosome may be generated. The chromosome is generated by using a Fisher-Yates shuffle algorithm, and finally a quantity \(Pool.size\) of chromosomes are generated. The algorithm is implemented as follows: + +``` +/* Initialize the gene sequence to {1,2,3,4}.*/ +for (i = 0; i < num_gene; i++) +tmp[i] = (Gene)(i + 1); + +remainder = num_gene - 1; /* Define the number of remaining genes.*/ + +/* Implement the shuffle method to randomly select genes for multiple times as a part of gene encoding.*/ +for (i = 0; i < num_gene; i++) { +/* choose value between 0 and remainder inclusive */ +next = geqo_randint(root, remainder, 0); +/* output that element of the tmp array */ +tour[i] = tmp[next]; /* Gene encoding*/ +/* and delete it */ +tmp[next] = tmp[remainder]; /* Update the remaining gene sequence.*/ +remainder--; +} +``` + +Table 6-18 describes the process of generating a chromosome. It is assumed that four random results are \{1, 1, 1, 0\}. + +Table 6-18 Process of generating a chromosome + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Gene Candidate Set

+

(Tmp)

+

Result Set

+

(tour)

+

Random Number

+

Range

+

Random Number

+

Description

+

1 2 3 4

+

2

+

0–3

+

1

+

Assume that the random number is 1, the first gene of the result set is tmp[1], and the value is 2. The candidate set tmp is updated and the unselected tail value is placed to the selected position.

+

1 4 3

+

2 4

+

0–2

+

1

+

Assume that the random number is 1 and the second gene of the result set is 4. The candidate set tmp is updated again.

+

1 3

+

2 4 3

+

0–1

+

1

+

Assume that the random number is 1, and the third gene in the result set is 3. Because the tail value is selected, the candidate set does not need to be updated.

+

1

+

2 4 3 1

+

0–0

+

0

+

The last gene is 1.

+
+ + +After a chromosome is randomly generated for a plurality of times, a pool is obtained. It is assumed that there are four chromosomes in total in the pool, and a structure of the pool is described by using a diagram, as shown in Figure 6-13. + +![](../figures/45.png) + +Figure 6-13 Chromosome structure + +Then, fitness \(worth\) is calculated for each chromosome. A process of calculating fitness is actually a process of generating a join tree according to the gene encoding sequence of the chromosome and calculating the cost of the join tree. + +In the openGauss database, each chromosome uses a left-deep tree by default. Therefore, after gene encoding of each chromosome is determined, a join tree of the chromosome is determined accordingly. For example, for a chromosome \{2, 4, 3, 1\}, the corresponding join tree is \(\(t2, t4\), t3\), t1\), as shown in Figure 6-14. + +![](../figures/zh-cn_image_0000001212089804.png) + +Figure 6-14 Chromosome join tree + +The openGauss database generates fitness by using the geqo\_eval function. The geqo\_eval function first generates a join tree based on gene encoding of a chromosome, and then calculates the cost of the join tree. + +The genetic algorithm uses the gimme\_tree function to generate a join tree. The merge\_clump function is recursively called in the function. The merge\_clump function joins tables as many as possible, generates a join subtree, and records the number of nodes in each join subtree. Then, the join subtree is recorded in the clumps linked list in descending order of the number of nodes. The code is as follows: + +``` +/* Traverse all tables cyclically and join tables as many as possible.*/ +For (rel_count = 0; rel_count < num_gene; rel_count++) { +int cur_rel_index; +RelOptInfo* cur_rel = NULL; +Clump* *cur_clump = NULL; + +/* tour represents a chromosome. Here, a gene in the chromosome is obtained, that is, a base table.*/ +cur_rel_index = (int) tour[rel_count]; +cur_rel = (RelOptInfo *) list_nth(private->initial_rels, cur_rel_index - 1); + +/* Generate a clump for the base table. size=1 indicates that there is only one base table in the current clump.*/ +cur_clump = (Clump*)palloc(sizeof(Clump)); +cur_clump->joinrel = cur_rel; +cur_clump->size = 1; + +/* Attempt to join, perform recursive operations, and record the clumps to the clumps linked list.*/ +clumps = merge_clump(root, clumps, cur_clump, false); +} +``` + +The previously generated chromosome \{2, 4, 3, 1\} is used as an example, assuming that: + +\(1\) 2 and 4 cannot be joined. + +\(2\) 4 and 3 can be joined. + +\(3\) 2 and 1 can be joined. + +Table 6-19 describes the process of generating a join tree under these conditions. + +Table 6-19 Join tree generation process + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Number of Rounds

+

relcount

+

Join Result Set

+

clumps

+

Description

+

Initial

+

NULL

+

Create a cur_clump node whose gene is 2 and cur_clump.size is 1.

+

0

+

{2}

+

Because clumps == NULL and cur_clump is not joined with any table, cur_clump is directly added to clumps.

+

1

+

{2}, {4}

+

Create a cur_clump node whose gene is 4 and cur_clump.size is 1, and attempt to join the cur_clump node whose gene is 4 with the node in the clumps linked list. Because 2 and 4 cannot be joined, node 4 is also added to the clumps linked list.

+

2

+

{2}

+

Create a cur_clump node whose gene is 3 and cur_clump.size is 1, traverse the clumps linked list, and attempt to join with 2 and 4 respectively. It is found that the join with 4 can be established. Create a new old_clumps node whose ols_clumps.size is 2 based on the join between 3 and 4 and delete node 4 from the clumps linked list.

+

{3, 4} {2}

+

Use the old_clumps generated by joining 2 and 4 as the parameter to recursively invoke merge_clump and attempt to join old_clumps with nodes in the clumps linked list. It is found that they cannot be joined. That is, {3,4} and {2} cannot be joined. In this case, add old_clumps to clumps. Because old_clumps.size is the largest currently, insert old_clumps to the beginning of clumps.

+

3

+

{3, 4}

+

Create a cur_clump node whose gene is 1 and cur_clump.size is 1.

+

Traverse the clumps linked list and try to join with {3, 4} and {2} respectively. It is found that the join with 2 can be established. Create a new old_clumps node whose ols_clumps.size is 2 based on 1 and 2, and delete node 2 from the clumps linked list.

+

{3, 4} {1, 2}

+

Use the new old_clumps generated by joining 1 and 2 as the parameter to recursively invoke merge_clump. Use old_clumps to join with the nodes in the clumps linked list. If the join fails, add old_clumps to clumps. Because old_clumps.size is 2, insert clumps to the end of clumps.

+
+ + +According to the steps in the example, the process of the merge\_clumps function is to continuously attempt to generate a larger clump. + +``` +/* If a join can be generated, try to generate a join with more nodes through recursion.*/ +if (joinrel != NULL) { +... +/* Generate a new join node and increase the number of joined nodes.*/ +old_clump->size += new_clump->size; +pfree_ext(new_clump); + +/* Delete the joined nodes from the clumps join table.*/ +clumps = list_delete_cell(clumps, lc, prev); +/* Use clumps and the newly generated join node (old_clump) as parameters to continue to generate joins.*/ +return merge_clump(root, clumps, old_clump, force); +} +``` + +According to the example in the preceding table, the **clumps** linked list contains two nodes, which are two join subtrees. After **force** is set to **true**, the system attempts to join the two nodes again. + +``` +/* If there are multiple nodes in clumps, it indicates that the join tree is not generated successfully.*/ +if (list_length(clumps) > 1) { +... +foreach(lc, clumps) { +Clump* clump = (Clump*)lfirst(lc); +/* Set the force parameter to true and try to join unconditionally.*/ +fclumps = merge_clump(root, fclumps, clump, true); +} +clumps = fclumps; +} +``` + +### 3. Operator Selection + +After a pool is generated, intergenerational genetic optimization can be performed. Two chromosomes are randomly selected from the pool to perform a crossover operation. In this way, a new chromosome can be generated. + +As chromosomes in the pool are already sorted according to fitness, a chromosome with lower fitness \(lower cost\) is better. It is expected that a better chromosome is inherited. Therefore, a chromosome with lower fitness is preferred when a father chromosome and a mother chromosome are selected. The concept of bias is involved in the selection process. It is a fixed value in the operator. The value of bias can be adjusted through the parameter **Geqo\_selection\_bias** \(by default, it is **2.0**\). + +``` +/* A father chromosome and a mother chromosome are selected by using the linear_rand function.*/ +first = linear_rand(root, pool->size, bias); +second = linear_rand(root, pool->size, bias); +``` + +To generate a random number \(x\) based on a certain probability distribution, you need to know the probability distribution function or probability density function \(PDF\) first. The PDF ![](figures/zh-cn_image_0000001212063076.gif)used by the openGauss database is as follows: + +![](../figures/zh-cn_image_0000001257142945.gif) + +The following cumulative distribution function \(CDF\) is obtained by using the PDF: + +![](../figures/zh-cn_image_0000001211903084.gif) + +Then, a random number that conforms to the probability distribution can be obtained by using the PDF and the inverse function method. + +Function: + +![](../figures/zh-cn_image_0000001256862999.gif) + +Inverse function: + +![](../figures/zh-cn_image_0000001212223058.gif) + +This is consistent with the implementation of the linear\_rand function in the source code. + +![](../figures/zh-cn_image_0000001256982939.png) + +The code of probability-based random number generation algorithm is extracted for calculation and verification, and the characteristics of random number generation are analyzed. It is assumed that bias is 2.0, and then the PDF is used to calculate the theoretical probability value of each interval for analysis. For example, for a range from 0.6 to 0.7, the theoretical probability is calculated as follows: + +![](../figures/zh-cn_image_0000001257063005.gif) + +Figure 6-15 shows the theoretical probability values in each range. + +![](../figures/46.png) + +Figure 6-15 Theoretical probability value of random number generation + +It can be learned from Figure 6-15 that theoretical probability values in all ranges decrease sequentially. In other words, when a parent chromosome is selected, a chromosome with lower fitness \(lower cost\) is more likely to be selected. + +### 4. Crossover Operator + +After the parent chromosomes are selected by using the selection operator, a crossover operation may be performed on the selected parent chromosomes, to generate a new child chromosome. + +The openGauss provides a plurality of crossover methods, including edge combination crossover, partially matched crossover, cycle crossover, position crossover, and order crossover. In the process of source code analysis, the position crossover method is taken as an example for illustration. + +It is assumed that the gene code of the selected father chromosome is \{1, 3, 2, 4\} with a fitness of 100, and the gene code of the selected mother chromosome is \{2, 3, 1, 4\} with a fitness of 200. When a child chromosome is not generated and is in an uninitialized state, the statuses of these chromosomes are shown in Figure 6-16. + +![](../figures/47.png) + +Figure 6-16 Chromosome status + +A random number **num\_positions** needs to be generated for the crossover operation. The position of the random number is in a range between 1/3 and 2/3 of a total quantity of genes. The random number represents a quantity of father chromosome genes that need to be inherited to a child chromosome according to the position. The code is as follows: + +``` +/* num_positions determines the number of genes inherited from the father chromosome to the child chromosome.*/ +num_positions = geqo_randint(root, 2 * num_gene / 3, num_gene / 3); + +/* Select a random position.*/ +for (i = 0; i < num_positions; i++) +{ +/* A position is randomly generated, and genes at the position of the father chromosome are inherited to the child chromosome.*/ +pos = geqo_randint(root, num_gene - 1, 0); + +offspring[pos] = tour1[pos]; +/* Mark that the genes at this position have been used. The mother chromosome cannot inherit the same genes to the child chromosome.*/ +city_table[(int) tour1[pos]].used = 1; +} +``` + +It is assumed that the father chromosome needs to inherit two genes to the child chromosome, to respectively transmit gene 1 and gene 2. In this case, the status of the child chromosome is shown in Figure 6-17. + +![](../figures/48.png) + +Figure 6-17 Current chromosome status + +Currently, the child chromosome already has two genes: 3 and 2. After the mother chromosome excludes the two genes, there are still two genes: 1 and 4. The two genes are written into the child chromosome according to the sequence in the mother chromosome, and a new child chromosome is generated, as shown in Figure 6-18. + +![](../figures/49.png) + +Figure 6-18 New chromosome status + +### 5. Fitness Calculation + +After the newly generated child chromosome is obtained, you can calculate fitness by using the geqo\_eval function. Then, add the chromosome to the pool by using the spread\_chromo function. + +``` +/* Fitness analysis */ +kid->worth = geqo_eval(root, kid->string, pool->string_length); + +/* Diffusion of the chromosome based on fitness*/ +spread_chromo(root, kid, pool); +``` + +Because chromosomes in the pool should always be in an ordered state, the spread\_chromo function may traverse the pool by using the dichotomy to compare fitness of the chromosomes in the pool and fitness of the new chromosome, and search for a position for inserting the new chromosome according to the fitness. The chromosome behind it automatically moves back by one position, and the last chromosome is eliminated. If the fitness of the new chromosome is the highest, the chromosome is eliminated directly. The code is as follows: + +``` +/* Use the dichotomy to traverse chromosomes in the pool.*/ +top = 0; +mid = pool->size / 2; +bot = pool->size - 1; +index = -1; + +/* Chromosome screening*/ +while (index == -1) { +/* Moving is required in the following four cases.*/ +if (chromo->worth <= pool->data[top].worth) { +index = top; +} else if (chromo->worth - pool->data[mid].worth == 0) { +index = mid; +} else if (chromo->worth - pool->data[bot].worth == 0) { +index = bot; +} else if (bot - top <= 1) { +index = bot; +} else if (chromo->worth < pool->data[mid].worth) { +/* +* The following two cases are handled separately because no new position is found. +*/ +bot = mid; +mid = top + ((bot - top) / 2); +} else { /* (chromo->worth > pool->data[mid].worth) */ +top = mid; +mid = top + ((bot - top) / 2); +} +} +``` + +The genetic algorithm continuously generates a new chromosome for a pool by selecting an excellent chromosome and performing intergenerational crossover for a plurality of times, and the chromosome is repeatedly generated, so as to push a solution of the algorithm to approach from local optimal to global optimal. + + + +## Summary + +This chapter describes the implementation process of the SQL engine, including SQL parsing, query rewriting, and query optimization. The SQL engine involves a large amount of code, featuring high code coupling and complex implementation logic. For better understanding, you are advised to master the overall code process and key structures, and summarize them in practice. + + diff --git a/content/en/post/2022/Segment-Page-Feature-of-openGauss-for-Solving-File-Storage-Problems.md b/content/en/post/2022/Segment-Page-Feature-of-openGauss-for-Solving-File-Storage-Problems.md new file mode 100644 index 0000000000000000000000000000000000000000..959e8c8f8280e4ac093067dceb52f1845126ee12 --- /dev/null +++ b/content/en/post/2022/Segment-Page-Feature-of-openGauss-for-Solving-File-Storage-Problems.md @@ -0,0 +1,227 @@ ++++ + +title = "Segment-Page Feature of openGauss for Solving File Storage Problems" + +date = "2021-10-20" + +tags = [ "Segment-Page Feature of openGauss for Solving File Storage Problems"] + +archives = "2021-10" + +author = "Peng Bao " + +summary = "Segment-Page Feature of openGauss for Solving File Storage Problems" + +img = "/en/post/2022/title/img16.png" + +times = "12:30" + ++++ + +# Segment-Page Feature of openGauss for Solving File Storage Problems + +In modern society, data is growing explosively, and service requirements in the industry are complex. The amount of data to be stored and the number of tables to be created keep increasing. Each common data table of openGauss corresponds to a logical large file \(maximum size: 32 TB\). The logical file is divided into multiple actual files based on the fixed size and stored in the corresponding database directory. Therefore, as the data volume of each data table increases, the number of files required for underlying data storage increases gradually. In addition, openGauss provides features such as hash bucket tables and large partitioned tables. Each data table is split into several sub-tables, and the number of files required at the bottom layer increases exponentially. Therefore, this storage management mode has the following problems: + +- 1. It depends greatly on the file system and cannot perform fine-grained control to improve maintainability. +- 2. There are too many file handles in the case of a large amount of data. Currently, only virtual handles can be used to solve the problem, which affects the system performance. +- 3. Too many small files may cause random I/O problems in scenarios such as full build and full backup, affecting performance. + +To resolve the foregoing problems, openGauss introduces a segment-page storage management mechanism, which is similar to segment-page memory management of an operating system, but differs greatly in implementation mechanisms. + +## Implementation Principle of the Segment-Page Mechanism + +With the segment-page storage management, tablespaces and data files are logically organized into segments, extents, and pages/blocks for storage allocation and management, as shown in the following figure. Specifically, a database \(in a tablespace\) has only one segment space. The actual physical storage may be a file or may be split into multiple files. Data is allocated to all tables in the database from this space. Therefore, the number of tables is irrelevant to the number of physical files. Each table has a logical segment, and all data in the table is stored in the segment. Multiple extents are mounted to each segment. Each extent is a continuous physical page. Extent sizes can be flexibly adjusted based on service requirements to avoid storage space waste. + +![](../figures/zh-cn_image_0000001207699778.jpg) + +Figure 1 Segment-page storage design + +Segment-page files can be automatically expanded until the disk space is used up or the limit threshold for the tablespace is reached. Segment-page storage does not automatically reclaim disk space. After some data tables are deleted, the space occupied by the data tables in the segment-page file is reserved and the disk space is not released. These reserved spaces will be reused by tables that are expanded or created later. If you do not need to reuse the space, you can manually call system functions to recycle and then release disk space. + +In internal implementation, each segment corresponds to a physical file that is originally stored in page mode. For example, each partitioned table and a bucket in each hash bucket table have an independent segment. Multiple extents are mounted to each segment. Each extent is consecutive in a file, but extents may not be consecutive between each other. A segment can be dynamically expanded by adding new extents, but an extent cannot be directly reclaimed. You can reclaim storage space by segment by truncating or clustering the entire table. + +Currently, four sizes of extents are supported: 64 KB, 1 MB, 8 MB, and 64 MB. For a segment, the size of the extent expanded each time is fixed. The size of the first 16 extents is 64 KB, the size of the 17th to 143th extents is 1 MB. The same rule applies to other extents. The following figure lists the parameters. + +Table 1 Classification of extents stored in a segment + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Group

+

Extent Size

+

Extent Page Count

+

Extent Count Range

+

Total Page Count

+

Total Size

+

1

+

64 KB

+

8

+

[1, 16]

+

128

+

1 MB

+

2

+

1 MB

+

128

+

[17, 143]

+

16K

+

128 MB

+

3

+

8 MB

+

1024

+

[144, 255]

+

128K

+

1 GB

+

4

+

64 MB

+

8192

+

[256, …]

+

...

+

...

+
+ +## Guide to Using Segment-Page Tables + +When using the CREATE TABLE statement to create a table, you can specify **segment=on** to enable a row-store table to store data in segment-page mode. If **hashbucket=on** is specified, **segment=on** is forcibly used by default. Currently, segment-page storage does not support column-store tables. Segment-page tablespaces are automatically created and do not require additional commands. + +- Set **segment** to **on** to create a segment-page common table. + + create table t1\(a int, b int, PRIMARY KEY\(a,b\)\) with\(segment=on\); + + ![](../figures/zh-cn_image_0000001207539820.jpg) + +- Set **hashbucket** to **on** to create a segment-page hash bucket table. + + create table t1\(a int, b int, PRIMARY KEY\(a,b\)\) with\(hashbucket=on\); + + ![](../figures/zh-cn_image_0000001252579733.jpg) + + To help users better use the segment-page storage function, openGauss provides two built-in system functions to display the extent usage. Users can use the two views to determine whether to reclaim data and which part of the data to be reclaimed. + +- **pg\_stat\_segment\_space\_info\(Oid tablespace, Oid database\);**specifies the OIDs of the tablespace and database to display the usage information about all extent groups in the tablespace. + + Table 2 pg\_stat\_segment\_space\_info view column information + + + + + + + + + + + + + + + + + + + + + + + + + +

Name

+

Description

+

extent_size

+

Extent specifications of an extent group. The unit is the number of blocks.

+

total_blocks

+

Total number of extents in a physical file

+

meta_data_blocks

+

Number of blocks occupied by the metadata managed in a tablespace, including the space header and map page but excluding the segment head

+

used_data_blocks

+

Number of extents used for storing data, including the segment head

+

utilization

+

Percentage of the number of used blocks to the total number of blocks, that is, (the value of used_data_blocks + the value of meta_data_block)/the value of total_blocks

+

high_water_mark

+

High-water mark, indicating the number of allocated extents and maximum physical page number. Blocks that exceed the high-water mark are not used and can be directly recycled.

+
+ + ![](../figures/zh-cn_image_0000001207699780.jpg) + + +- **pg\_stat\_segment\_extent\_usage\(Oid tablespace, Oid databse, uint32 extent\_type\);**specifies the usage information of each allocated extent in an extent group returned each time.**extent\_type** indicates the type of the extent group. The value is an integer ranging from 1 to 5.If the value is not within the range, an error is reported. + + Table 3 pg\_stat\_segment\_extent\_usage view column information + + + + + + + + + + + + + + + + + + + + + + +

Name

+

Description

+

start_block

+

Start physical page number of an extent

+

extent_size

+

Extent size

+

usage_type

+

Usage type of an extent, for example, segment head and data extent

+

ower_location

+

Object location of an extent to which a pointer points. For example, the owner of a data extent is the head of the segment to which the data extent belongs.

+

special_data

+

Position of an extent in its owner. The value of this column is related to the usage type. For example, special data of a data extent is the extent ID in the segment to which the data extent belongs.

+
+ + +- **gs\_spc\_shrink\(Oid tablespace, Oid database, uint32 extent\_type\);**specifies that one extent group is cleared at a time. The target size in shrinking is automatically calculated as follows: Active data volume + 128 MB. The value is rounded up and aligned with 128 MB. + +## Summary + +openGauss provides the segment-page solution to solve the problem that there are too many underlying file handles when there are a large number of hash bucket tables and large partitioned tables. In the segment-page solution, a table corresponds to a logical segment. Different segments at the bottom layer are stored in a physical file, greatly reducing the number of handles of the physical file at the bottom layer. Even in the case of a large amount of data, the scenario where there are too many file handles such as common tables is avoided, and the system maintainability is improved. In addition, in scenarios such as full build and full backup, random I/Os caused by too many small files can be reduced to improve system I/O performance. The parameters related to the current segment-page table are fixed. In the future, openGauss can use the AI technology to automatically adjust parameters for the segment-page storage mechanism, providing users with more intelligent and better-performance segment-page storage policies. + diff --git a/content/en/post/2022/Setting-up-One-Primary-and-Two-Standby-openGauss-Databases-on-Kubernetes.md b/content/en/post/2022/Setting-up-One-Primary-and-Two-Standby-openGauss-Databases-on-Kubernetes.md new file mode 100644 index 0000000000000000000000000000000000000000..592999cf6577d10a1ed54e3b9bf42571a90d01c1 --- /dev/null +++ b/content/en/post/2022/Setting-up-One-Primary-and-Two-Standby-openGauss-Databases-on-Kubernetes.md @@ -0,0 +1,466 @@ ++++ + +title = "Setting up One Primary and Two Standby openGauss Databases on Kubernetes" + +date = "2021-10-09" + +tags = [ "Setting up One Primary and Two Standby openGauss Databases on Kubernetes"] + +archives = "2021-10" + +author = "Bin Zhou" + +summary = "Setting up One Primary and Two Standby openGauss Databases on Kubernetes" + +img = "/en/post/2022/title/img2.png" + +times = "12:30" + ++++ + +# Setting up One Primary and Two Standby openGauss Databases on Kubernetes + +Initialize the environment as the **master** or **node** role. + + + + + + + + + + + + + + + + +

IP

+

Hostname

+

Role

+

192.168.0.1

+

k8smaster

+

master

+

192.168.0.2

+

k8snode01

+

node

+
+ +Disable **firewalld**. + +- systemctl stop firewalld +- systemctl disable firewalld + +## 1. Update Docker. + +``` +rpm -qa|grep docker +yum remove docker +curl -fsSL https://get.docker.com/ | sh +systemctl start docker +systemctl enable docker +``` + +## 2. Prepare the Kubernetes source. + +``` +vim /etc/yum.repos.d/kubernetes.repo + +[kubernetes] +name=Kubernetes +baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 +enabled=1 +gpgcheck=0 +repo_gpgcheck=0 +gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg + +yum install -y kubeadm kubectl etcd +``` + +## 3. Check image names required by kubeadm. + +``` +[root@ecs-66cc dockerimages]# kubeadm config images list + +k8s.gcr.io/kube-apiserver:v1.21.1 +k8s.gcr.io/kube-controller-manager:v1.21.1 +k8s.gcr.io/kube-scheduler:v1.21.1 +k8s.gcr.io/kube-proxy:v1.21.1 +k8s.gcr.io/pause:3.4.1 +k8s.gcr.io/etcd:3.4.13-0 +k8s.gcr.io/coredns/coredns:v1.8.0 +``` + +## 4. Install images required for Kubernetes. + +``` +docker pull registry.aliyuncs.com/google_containers/kube-apiserver:v1.21.1 +docker pull registry.aliyuncs.com/google_containers/kube-controller-manager:v1.21.1 +docker pull registry.aliyuncs.com/google_containers/kube-scheduler:v1.21.1 +docker pull registry.aliyuncs.com/google_containers/kube-proxy:v1.21.1 +docker pull registry.aliyuncs.com/google_containers/pause:3.4.1 +docker pull registry.aliyuncs.com/google_containers/etcd:3.4.13-0 +docker pull coredns/coredns:1.8.0 +``` + +## 5. Modify the Docker tags to match those required by kubeadm. + +- Download images from Chinese sources. + + ``` + docker tag registry.aliyuncs.com/google_containers/kube-apiserver:v1.21.1 k8s.gcr.io/kube-apiserver:v1.21.1 + docker tag registry.aliyuncs.com/google_containers/kube-controller-manager:v1.21.1 k8s.gcr.io/kube-controller-manager:v1.21.1 + docker tag registry.aliyuncs.com/google_containers/kube-scheduler:v1.21.1 k8s.gcr.io/kube-scheduler:v1.21.1 + docker tag registry.aliyuncs.com/google_containers/kube-proxy:v1.21.1 k8s.gcr.io/kube-proxy:v1.21.1 + docker tag registry.aliyuncs.com/google_containers/pause:3.4.1 k8s.gcr.io/pause:3.4.1 + docker tag registry.aliyuncs.com/google_containers/etcd:3.4.13-0 k8s.gcr.io/etcd:3.4.13-0 + docker tag docker.io/coredns/coredns:1.8.0 k8s.gcr.io/coredns/coredns:v1.8.0 + ``` + +- Delete invalid images. + + ``` + docker rmi registry.aliyuncs.com/google_containers/kube-apiserver:v1.21.1 + docker rmi registry.aliyuncs.com/google_containers/kube-controller-manager:v1.21.1 + docker rmi registry.aliyuncs.com/google_containers/kube-scheduler:v1.21.1 + docker rmi registry.aliyuncs.com/google_containers/kube-proxy:v1.21.1 + docker rmi registry.aliyuncs.com/google_containers/pause:3.4.1 + docker rmi registry.aliyuncs.com/google_containers/etcd:3.4.13-0 + docker rmi coredns/coredns:1.8.0 + ``` + + +## 6. Write Kubernetes initialization configurations and initialize Kubernetes as **master**. + +kubeadm.yaml + +``` +apiVersion: kubeadm.k8s.io/v1beta2 +clusterName: kubernetes +kind: ClusterConfiguration +kubernetesVersion: v1.21.1 +controllerManager: + extraArgs: + horizontal-pod-autoscaler-use-rest-clients: "true" + horizontal-pod-autoscaler-sync-period: "10s" + node-monitor-grace-period: "10s" +apiServer: + extraArgs: + runtime-config: "api/all=true" +``` + +Copy the configuration file to Kubernetes and specify it during initialization. + +``` +cp kubeadm.yaml /etc/kubernetes/manifests/ +kubeadm init --config kubeadm.yaml +``` + +After the operation is successful, retain the following information for later use: + +``` +kubeadm join 192.168.0.35:6443 --token ru2883.u4rhwkx5oqrol9at \ + --discovery-token-ca-cert-hash sha256:f2dbe7ce49b322e8145b6e9b4303e56468ad1352daabecb797f7bd161a64e018 +``` + +Perform initialization. + +``` +mkdir -p $HOME/.kube +sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config +sudo chown $(id -u):$(id -g) $HOME/.kube/config +``` + +Install the network plugin. + +``` +kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" +``` + +## 7. Join nodes. + +After the node is installed, it does not need to be initialized. Run the **kubeadm join** command to add the node to the primary node. + +``` +kubeadm join 192.168.0.35:6443 --token ru2883.u4rhwkx5oqrol9at \ + --discovery-token-ca-cert-hash sha256:f2dbe7ce49b322e8145b6e9b4303e56468ad1352daabecb797f7bd161a64e018 +``` + +## 8. Import images as **master** and **node**. + +``` +docker load < opengauss.tar.gz +``` + +## 9. Create a service \(SVC\) as **master**. + +Create an SVC for pods:kubectl create -f opengauss-svc.yaml + +The content of the **opengauss-svc.yaml** file is as follows: + +``` +apiVersion: v1 +kind: Service +metadata: + name: opengauss-service-1 +spec: + ports: + - port: 5432 + protocol: TCP + targetPort: 5432 + name: gsql + - port: 5434 + protocol: TCP + targetPort: 5434 + name: localport + - port: 2380 + protocol: TCP + targetPort: 2380 + name: etcd1-service + - port: 2379 + protocol: TCP + targetPort: 2379 + name: etcd1-local + selector: + app: opengauss-1 + clusterIP: None + +--- + +apiVersion: v1 +kind: Service +metadata: + name: opengauss-service-2 +spec: + ports: + - port: 5432 + protocol: TCP + targetPort: 5432 + name: gsql + - port: 5434 + protocol: TCP + targetPort: 5434 + name: localport + - port: 2380 + protocol: TCP + targetPort: 2380 + name: etcd1-service + - port: 2379 + protocol: TCP + targetPort: 2379 + name: etcd1-local + selector: + app: opengauss-2 + clusterIP: None + +--- + +apiVersion: v1 +kind: Service +metadata: + name: opengauss-service-3 +spec: + ports: + - port: 5432 + protocol: TCP + targetPort: 5432 + name: gsql + - port: 5434 + protocol: TCP + targetPort: 5434 + name: localport + - port: 2380 + protocol: TCP + targetPort: 2380 + name: etcd1-service + - port: 2379 + protocol: TCP + targetPort: 2379 + name: etcd1-local + selector: + app: opengauss-3 + clusterIP: None +``` + +## 10. Create a pod as **master**. + +Create the primary and standby pods of openGauss. + +kubectl create -f opengauss-pod.yaml + +The content of the **opengauss-pod.yaml** file is as follows: + +``` +apiVersion: v1 +kind: Pod +metadata: + name: opengauss-1 + labels: + app: opengauss-1 +spec: + restartPolicy: Never + containers: + - name: opengauss-1 + image: opengauss:1.0.5 + imagePullPolicy: Never + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: /var/lib/opengauss/data/ + name: openguass-volume + ports: + - containerPort: 5432 + name: opengauss + env: + - name: HOST_NAME + value: opengauss-1 + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: PEER_IPS + value: opengauss-service-2,opengauss-service-3 + - name: PEER_HOST_NAMES + value: opengauss-2,opengauss-3 + - name: PORT + value: "5432" + - name: GS_PASSWORD + value: "Test@56789" + - name: SERVER_MODE + value: primary + - name: db_config + value: + volumes: + - name: openguass-volume + hostPath: + path: /data/opengauss-1/ + type: DirectoryOrCreate + +--- + +apiVersion: v1 +kind: Pod +metadata: + name: opengauss-2 + labels: + app: opengauss-2 +spec: + restartPolicy: Never + containers: + - name: opengauss-2 + image: opengauss:1.0.5 + imagePullPolicy: Never + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: /var/lib/opengauss/data/ + name: openguass-volume + ports: + - containerPort: 5432 + name: opengauss + env: + - name: HOST_NAME + value: opengauss-2 + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: PEER_IPS + value: opengauss-service-1,opengauss-service-3 + - name: PEER_HOST_NAMES + value: opengauss-1,opengauss-3 + - name: PORT + value: "5432" + - name: GS_PASSWORD + value: "Test@56789" + - name: SERVER_MODE + value: standby + - name: db_config + value: + volumes: + - name: openguass-volume + hostPath: + path: /data/opengauss-2/ + type: DirectoryOrCreate + +--- + +apiVersion: v1 +kind: Pod +metadata: + name: opengauss-3 + labels: + app: opengauss-3 +spec: + restartPolicy: Never + containers: + - name: opengauss-3 + image: opengauss:1.0.5 + imagePullPolicy: Never + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: /var/lib/opengauss/data/ + name: openguass-volume + ports: + - containerPort: 5432 + name: opengauss + env: + - name: HOST_NAME + value: opengauss-3 + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: PEER_IPS + value: opengauss-service-1,opengauss-service-2 + - name: PEER_HOST_NAMES + value: opengauss-1,opengauss-2 + - name: PORT + value: "5432" + - name: GS_PASSWORD + value: "Test@56789" + - name: SERVER_MODE + value: standby + - name: db_config + value: + volumes: + - name: openguass-volume + hostPath: + path: /data/opengauss-3/ + type: DirectoryOrCreate +``` + +## 11. Test the database as **master**. + +``` +1. Access the primary node of the database. + +kubectl exec -it opengauss-1 -- /bin/bash +2. Switch the user. +su omm +3. Access the database. +gsql +``` + +## 12. Common Commands + +All commands are executed as **master**. + +``` +View cluster nodes. +kubectl get node +View cluster pods. +kubectl get pod --all-namespaces +Check the cluster service. +kubectl get svc --all-namespaces +Access the container. +kubectl exec -it Container name (pod name for a single container) -n opengauss -- /bin/bash +Run the following command to view pod or SVC details: +kubectl describe pod/svc pod/SVC name -n pod/SVC namespaces +View the log information. +kubectl logs pod Pod name -n Pod namespaces +``` + diff --git a/content/en/post/2022/The-Combination-of-openGauss-Database-and-AI.md b/content/en/post/2022/The-Combination-of-openGauss-Database-and-AI.md new file mode 100644 index 0000000000000000000000000000000000000000..875e5161d59823d3d1c65262cd78453dc6d342da --- /dev/null +++ b/content/en/post/2022/The-Combination-of-openGauss-Database-and-AI.md @@ -0,0 +1,125 @@ ++++ + +title = "The Combination of openGauss Database and AI" + +date = "2020-10-10" + +tags = [ "The Combination of openGauss Database and AI"] + +archives = "2020-10" + +author = "Tianqing Wang" + +summary = "The Combination of openGauss Database and AI" + +img = "/en/post/2022/title/img8.png" + +times = "12:30" + ++++ + +# The Combination of openGauss Database and AI + +openGauss has powerful computing performance and efficient data processing capabilities. It is also a native AI database that supports self-tuning parameters, SQL diagnosis, fault self-diagnosis, and full lifecycle management. The following describes the AI features of openGauss. + +## X-Tuner: database parameter tuning framework. + +A database is a very complex system. It contains a large number of configuration parameters and controls memory allocation, I/O optimization, query plan cost, parallelism, log recording, data recovery, and other behaviors. The database performance varies according to the configuration parameters under specific loads. Many database administrators and experts are trying to adjust these parameters to achieve good database performance. + +However, database parameter tuning is a NP-hard problem. Generally, people adjust parameters based on their own experience and understanding of the current environment. There are a large number of uncertainties during tuning. To solve this problem, database engineers try to build an intelligent system that can perform automatic tuning, such as Postgresqltuner.pl of PostgreSQL and mysqltuner.pl of MySQL. Similarly, with the emergence of AI technologies, some AI-based tuning solutions, such as OtterTune, have emerged. Nevertheless, all tuning solutions have the following restrictions: + +- The database has hundreds of parameters. DBAs cannot adjust such a number of parameters at a time. +- Traditional machine learning requires a large amount of data training. It is difficult to collect data, especially data with good performance, regardless of whether this method is feasible. + +- Some methods use reinforcement learning, but the relationship between database status and database parameters is not considered during tuning. + +Considering the preceding restrictions, openGauss develops its own database parameter tuning framework X-Tuner. Compared with the traditional methods, X-Tuner has the following features: + +- **1. **Robust and fault-tolerant: + + The X-Tuner framework is designed with a large number of fault tolerance and emergency handling mechanisms. When the system or database is faulty, the algorithm can exit normally without affecting the system. + +- **2. **Flexible deployment and easy to use: + + X-Tuner is developed based on Python 3.0+ and supports Linux and Windows OSs. Users can easily deploy X-Tuner. In terms of usage, X-Tuner supports local and remote connection modes, which are applicable to various user situations. + +- **3. **Easy to understand and facilitate secondary development: + + X-Tuner is compiled strictly based on the benchmark test module, tuning algorithm module, connection module, and log module. X-Tuner is highly hierarchical and easy to understand. In addition, users can tune or compile their own functional modules based on X-Tuner. + + Tests show that the X-Tuner parameter tuning framework based on reinforcement learning and heuristic algorithms can greatly improve system performance with the minimum memory usage. Figure 1 shows the parameter tuning process. + + ![](../figures/zh-cn_image_0000001251969031.jpg)Figure 1 X-Tuner parameter tuning process + + +With the X-Tuner technology, HUAWEI CLOUD DAS can intelligently recommend parameters based on the historical load of user databases. Tests show that the overall performance is improved by about 20%, greatly saving cloud computing resources and reducing production costs. + +## SQLDiag: intelligently identifies SQL statements. + +SQLDiag is a framework for estimating the SQL statement execution duration in openGauss. An existing prediction technology is mainly a model prediction based on an execution plan. However, these prediction solutions are mainly applicable to an OLAP scenario, and a complete execution plan of an SQL statement needs to be obtained. This greatly limits short queries such as OLTP or HTAP. + +Different from the preceding solution, SQLDiag focuses on a historical SQL statement of a database. Because execution duration of the SQL statement of the database does not differ greatly in a short time, SQLDiag may detect a similar SQL statement from historical data, and predict the execution duration of the SQL statement based on an SQL vectorization technology and a time series prediction algorithm. In this way, potential slow SQL statements can be identified. This framework has the following advantages: + +- Execution plans do not require SQL statements and have no impact on database performance. +- SQLDiag is widely used in many scenarios and can even be used in NoSQL after reconstruction, while many other algorithms in the industry only target at a scenario, such as, OLTP or OLAP. +- The framework is robust and easy to understand. Users can design their own prediction models by simply modifying the framework. + + ![](../figures/zh-cn_image_0000001251839693.png) + + +Figure 2 shows the SQLDiag prediction result. + +Figure 2 SQLDiag prediction result + +## Intelligent optimizer: database execution cost prediction. + +In a large number of service scenarios that depend on databases, operations such as account login, order query, as well as report query, data mining involving hundreds of millions of lines are performed. These operations are abstracted and converted at the service application layer and performed in the form of SQL statements. The SQL engine in the database kernel further optimizes the SQL statements. + +Challenges from service logic: During SQL statement processing, service scenarios become increasingly complex and business intelligence tools are used, generating SQL statements of different quality. Some of the SQL statements may consume a large number of read/write and computing resources. As a result, servers block other service statements. Therefore, the SQL diagnosis capability of the upper-layer service components of the database is required. + +Challenges from SQL statements: During optimization of a query execution plan, more complex query statements also bring new challenges to query optimization. In the early stage of database system development, query optimization mainly depends on rule-based expert system, that is, a series of optimization rules with strict sequence. This type of expert system can be considered as the experience summary of database optimization experts in some common scenarios. As a result, the rule system always generates the same query plan for the same query statement regardless of the actual data volume and distribution in the database. If a scenario does not comply with experience rules, the database efficiency cannot be ensured. + +The core problem of SQL diagnosis and plan optimization is the evaluation of resource overhead. In mainstream database products, the query resource overhead mainly depends on sampling and analysis of full data in the database, establishment of statistical models and cost models, and abstraction of the plan execution process. Currently, the following challenges are faced: + +- The data sampling and analysis process occupies the disk read/write and CPU computing resources of the server to a large extent. However, if the sampling rate is reduced to avoid the resource overhead, the evaluation accuracy of the query resource overhead decreases. +- As service statements are executed continuously, the data scale and distribution change gradually, which invalidates the original data models. These two types of problems may cause the server to fail to respond for a long time. +- Is there a way to maintain the accuracy of resource overhead prediction with as little encroachment on database resources as possible? The answer is yes. openGauss provides the query performance prediction function based on online deep learning for database users. + +The intelligent optimizer has the following features: + +- **1. **One-click modeling and machine learning. + + For SQL performance evaluation, openGauss integrates data collection, cleaning, preprocessing, and encoding, and training monitoring. Users only need to configure a few parameters for the model based on the recommendation and call the model prediction API to obtain the performance prediction result after the model training is complete. + +- **2. **Fine-grained, easily locating performance bottlenecks. + + It supports fragment-level query performance prediction of fine-grained query plans, helping users locate performance bottlenecks and better rewrite statements. In the future, intelligent SQL optimization based on plan segment cost performance prediction will be supported. + +- **3. **Flexible deployment, minimizing the impact on database performance. + + The model computing module can be deployed on the cloud or in other environments isolated from the database based on user requirements. Historical performance data is used for modeling, and no extra resource overhead caused by data sampling is required. + + +- **4. **Open APIs, benefiting data scientists. + + The HTTPS protocol is used to connect the database kernel to the deep learning model. Open APIs allow users to build custom machine learning jobs into database functions for one-click calling. + + +According to actual tests, the prediction accuracy of query performance is improved by 40% compared with the native PostgreSQL model. + +![](../figures/zh-cn_image_0000001207089084.gif) + +Figure 3 Predicted query performance of openGauss compared with the native PostgreSQL database + +According to the distribution of the prediction accuracy, the accuracy of the 95th percentile is improved by 3e5 times, and the accuracy of the 75th percentile is improved by 124 times. The overall prediction reliability is greatly improved. + +![](../figures/zh-cn_image_0000001207249058.gif) + +Figure 4 Prediction accuracy distribution of openGauss and PostgreSQL + +## Summary and Prospect + +openGauss is a shallow attempt to combine AI with databases, and there may still be many shortcomings. In addition to the preceding typical open-source AI features, many AI features are still under exploration. We can feel that in the preceding application scenarios, the AI-based method can greatly reduce manpower and improve production efficiency. + +Although the road to combining AI with databases is very difficult and rough, and even faces doubts from the industry, numerous R&D engineers have never given up their beliefs and are moving forward with the ideal of "all things intelligent." We hope that the open source openGauss can attract more developers in the industry to integrate AI with databases, further promote the upgrade of database technologies, and stimulate the emergence of more valuable and meaningful AI databases. In this way, we can realize the great vision of intelligent databases in the future. + diff --git a/content/en/post/2022/Transaction-Mechanism-Source-Code-Analysis.md b/content/en/post/2022/Transaction-Mechanism-Source-Code-Analysis.md new file mode 100644 index 0000000000000000000000000000000000000000..d18eb05bbd9c7627177d58a6ac19d1c5315c39f2 --- /dev/null +++ b/content/en/post/2022/Transaction-Mechanism-Source-Code-Analysis.md @@ -0,0 +1,2249 @@ ++++ + +title = "Transaction Mechanism Source Code Analysis" + +date = "2021-07-05" + +tags = [ "Transaction Mechanism Source Code Analysis"] + +archives = "2021-07" + +author = "Jiangjun Jiang" + +summary = "Transaction Mechanism Source Code Analysis" + +img = "/en/post/2022/title/img17.png" + +times = "12:30" + ++++ + +# Transaction Mechanism Source Code Analysis + + + +A transaction is the execution unit of a database operation and must have the basic atomicity, consistency, isolation, and durability \(ACID\) properties. + +1. Atomicity: After a transaction is committed, operations in the transaction are all executed or none of the operations is executed. +2. Consistency: Transaction execution cannot damage the integrity and consistency of database data. +3. Isolation: The execution of a transaction cannot be interfered by other transactions in a concurrent environment. +4. Durability: Once a transaction is committed, its changes to the state of the database are permanently saved in the database. + +This chapter describes how the openGauss transaction module implements the basic properties of database transactions to ensure that user data is not lost, is modified correctly, and is queried correctly. + +## 5.1 Overall Transaction Architecture and Code Overview + +Figure 5-1 shows the overall structure of the transaction module. + +Overall structure + +![](../figures/171.png) + +In openGauss, the implementation of transactions is closely related to the implementation of the storage engine. The code is mainly stored in the **src/gausskernel/storage/access/transam** and **src/gausskernel/storage/lmgr** directories. Figure 5-1 shows the key files. + +1. Transaction manager: It is the core of the transaction system. It is implemented as a finite loop state machine. It receives commands from external systems and determines the next execution process of a transaction based on the state of the current transaction. +2. Log manager: It records the transaction execution state and data change process, including transaction commit logs \(Clogs\), transaction commit sequence number logs \(CSNlogs\), and transaction logs \(Xlogs\). Clogs record only transaction execution results. CSNlogs record the sequence in which logs are committed for visibility determination. Xlogs are redo logs for data restoration and persistency. +3. Thread management mechanism: Transaction information of all threads is recorded at a memory area. Any thread can access this area to obtain the status information of other transactions. +4. Multi-Version Concurrency Control \(MVCC\) mechanism: In openGauss, the MVCC mechanism is used in the transaction read process based on the commit sequence number \(CSN\) committed by each transaction. In this way, read and write operations on tuples do not block each other. For details about how to determine visibility, see section 5.2 "Transaction Concurrency Control." +5. Lock manager: It controls the write concurrency of the system and uses the lock mechanism to ensure the isolation between the transaction write processes. + +## 5.2 Transaction Concurrency Control + +The transaction concurrency control mechanism is used to ensure the ACID properties of openGauss when transactions are concurrently executed. The following describes the components of the transaction concurrency control mechanism in more detail. + +- **Transaction State Machine** + + openGauss divides the transaction system into two layers: TBlockState \(upper layer\) and TransState \(lower layer\). + + With a layered design, details can be shielded when upper-layer services are processed to flexibly support various transaction execution statements \(BEGIN, START TRANSACTION, COMMIT, ROLLBACK, and END\) on the client. + + 1. TBlockState: state of the query statements sent from the client, which is used to improve data operation flexibility. Multiple query statements can be executed in a transaction in the form of transaction blocks. + 2. TransState: state of the entire transaction from the perspective of the kernel. + - **Upper-Layer Transaction State Machines** + + The code of the TBlockState structure is as follows: + + ``` + typeset enum TBlockState + { + /* State not in the transaction block: A transaction contains a single SQL statement.*/ + TBLOCK_DEFAULT, /* Default state of the transaction block*/ + TBLOCK_STARTED,/* Execute a single query statement.*/ + + /* State in the transaction block: A transaction contains multiple statements.*/ + TBLOCK_BEGIN,/* Execute the BEGIN or START TRANSACTION statement.*/ + TBLOCK_INPROGRESS,/* The transaction block is being processed.*/ + TBLOCK_END, /* Execute the END or COMMIT statement.*/ + TBLOCK_ABORT,/* Wait for the ROLLBACK statement from the client and then execute it after an error is reported during execution in the transaction block.*/ + TBLOCK_ABORT_END, /* Receive the ROLLBACK statement from the client and then execute it after an error is reported during execution in the transaction block.*/ + TBLOCK_ABORT_PENDING,/* Receive the ROLLBACK statement from the client and then execute it after execution in the transaction block is successful.*/ + TBLOCK_PREPARE, /* Execute the PREPARE TRANSACTION statement for two-phase transaction commit.*/ + + /* The state of the sub-transaction block is similar to that of the preceding transaction block.*/ + TBLOCK_SUBBEGIN,/* Execute the SAVEPOINT statement.*/ + TBLOCK_SUBINPROGRESS,/* The sub-transaction block is being processed.*/ + TBLOCK_SUBRELEASE,/* Execute the RELEASE SAVEPOINT statement.*/ + TBLOCK_SUBCOMMIT,/* Execute the END or COMMIT statement to recursively commit the lowest-layer sub-transaction to the top-layer transaction.*/ + TBLOCK_SUBABORT,/* Wait for the ROLLBACK TO or ROLLBACK statement from the client and then execute them after an error is reported during execution in the sub-transaction block.*/ + TBLOCK_SUBABORT_END,/* Receive and then execute the ROLLBACK TO statement to roll back to the upper-layer sub-transaction or the ROLLBACK statement from the client after an error is reported during execution in the sub-transaction block.*/ + TBLOCK_SUBABORT_PENDING,/* Receive and then execute the ROLLBACK TO statement to roll back to the upper-layer sub-transaction or the ROLLBACK statement from the client after execution in the sub-transaction block is successful.*/ + TBLOCK_SUBRESTART,/* Receive and then execute the ROLLBACK TO statement to roll back to the current sub-transaction after execution in the sub-transaction block is successful.*/ + TBLOCK_SUBABORT_RESTART /* Receive and then execute the ROLLBACK TO statement to roll back to the current sub-transaction after an error is reported during execution in the sub-transaction block.*/ + } TBlockState; + ``` + + For better understanding, the state of the sub-transaction block is omitted. The state machine behavior of the sub-transaction block is similar to that of the transaction block. The relationship between a transaction block and its sub-transaction block is similar to the implementation of a stack. The sub-transaction block starts later the transaction block and ends earlier. + + Figure 5-2 shows the state machine of an explicit transaction block and the corresponding transition functions. + + Figure 5-2:State machine of a transaction block + + ![](../figures/172.png) + + Table 1 lists the values in the transaction state machine structure corresponding to the transaction block states in Figure 5-2. + + **表 1** Transaction block states + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Transaction State

+

Transaction State Machine Structure

+

Default

+

TBLOCK_DEFAULT

+

Started

+

TBLOCK_STARTED

+

Transaction block started

+

TBLOCK_BEGIN

+

Transaction block in progress

+

TBLOCK_INPROGRESS

+

Transaction block ended

+

TBLOCK_END

+

Rolling back

+

TBLOCK_ABORT

+

Rollback ended

+

TBLOCK_ABORT_END

+

Waiting for rollback

+

TBLOCK_ABORT_PENDING

+
+ + When no exception occurs, the state machine of a transaction block runs in the following states cyclically: TBLOCK\_DEFAULT -\> TBLOCK\_STARTED -\> TBLOCK\_BEGIN -\> TBLOCK\_INPROGRESS -\> TBLOCK\_END -\> TBLOCK\_DEFAULT, as shown in Figure 5-2. The remaining state machines are exception processing branches of each state point in the preceding normal scenarios. + + 1. Errors that occur before entering the TBLOCK\_INPROGRESS state: If a transaction does not start, an error will be reported and the transaction will be rolled back. The system will clear resources and return to the TBLOCK\_DEFAULT state. + 2. Errors that occur in the TBLOCK\_INPROGRESS state in the following scenarios: Transaction execution failure: TBLOCK\_INPROGRESS -\> TBLOCK\_ABORT -\> TBLOCK\_ABORT\_END -\> TBLOCK\_DEFAULT; manual rollback of a transaction that is successfully executed: TBLOCK\_INPROGRESS -\> TBLOCK\_ABORT\_PENDING -\> TBLOCK\_DEFAULT + 3. Errors that occur when a user executes the COMMIT statement: TBLOCK\_END -\> TBLOCK\_DEFAULT As shown in Figure 5-2, the transaction exits the TBLOCK\_DEFAULT state after it starts and returns to this state after it ends. + 4. openGauss also supports implicit transaction blocks. When a client executes a single SQL statement, the SQL statement can be automatically committed. The state machine of openGauss is relatively simple and runs in the following states cyclically: TBLOCK\_DEFAULT -\> TBLOCK\_STARTED -\> TBLOCK\_DEFAULT. + + - **Lower-Layer Transaction States** + + The TransState structure specifies transaction states from the perspective of the kernel. Its code is as follows: + + ``` + typedef enum TransState + { + TRANS_DEFAULT, /* The current state is the default idle state. No transaction starts.*/ + TRANS_START, /* The transaction is being started.*/ + TRANS_INPROGRESS, /* The transaction is stared and is in progress.*/ + TRANS_COMMIT, /* The transaction is being committed.*/ + TRANS_ABORT, /* The transaction is being rolled back.*/ + TRANS_PREPARE /* The two-phase commit transaction enters the PREPARE TRANSACTION state.*/ + } TransState; + ``` + + ![](../figures/173.png) + + Figure 5-3 Lower-layer transaction states + + Figure 5-3 shows the lower-layer states in the kernel. For details about the lower-layer state machine, see the description of TransState. + + 1. Before a transaction starts, the transaction state is TRANS\_DEFAULT. + 2. When a transaction starts, the transaction state is TRANS\_START. + 3. After a transaction is successfully started, the transaction state is always TRANS\_INPROGRESS. + 4. When a transaction ends or is rolled back, the transaction state is TARNS\_COMMIT or TRANS\_ABORT. + 5. After a transaction ends, the transaction state goes back to TRANS\_DEFAULT. + + - **Transaction State Machine Running Instance** + + This section provides a running instance of a state machine in SQL to help you better understand how internal transactions work. Execute the following SQL statements on the client: + + ``` + BEGIN; + SELECT * FROM TABLE1; + END; + ``` + + 1\) Overall execution process + + Figure 5-4 shows the overall execution process. The execution of any statement first enters the transaction block of the transaction processing interface, then calls the underlying function of the transaction to process the specific statement, and finally returns to the transaction block. + + **Figure 5-4** Overall execution process + + ![](../figures/174.png) + + 2\) Execution process of the BEGIN statement \(Figure 5-5\) + + \(1\)The entry function **exec\_simple\_query** processes the BEGIN statement. + + \(2\)The **start\_xact\_command** function starts a QUERY statement and calls the **StartTransactionCommand** function. At this time, the upper-layer state of the transaction block is not TBLOCK\_DEFAULT. The **StartTransaction** function is called to set the lower-layer state of the transaction to TRANS\_START. After the memory, buffer, and lock resources are initialized, the lower-layer state of the transaction is set to TRANS\_INPROGRESS, and the upper-layer state of the transaction block is set to TBLOCK\_STARTED in the **StartTransactionCommand** function. + + \(3\) The **PortalRun** function processes the BEGIN statement, calls functions downwards, and calls the **BeginTransactionBlock** function to set the upper-layer state of the transaction block to TBLOCK\_BEGIN. + + \(4\) The **finish\_xact\_command** function ends a QUERY statement, calls the **CommitTransactionCommand** function to change the upper-layer state of the transaction block from TBLOCK\_BEGIN to TBLOCK\_INPROGRESS, and waits for reading the next statement. + + ![](../figures/175.png) + + Figure 5-6 Execution process of the BEGIN statement + + 3\) Execution process of the SELECT statement \(Figure 5-6\) + + \(1\) The entry function exec\_simple\_query processes the SELECT \* FROM table1; command. + + \(2\) The start\_xact\_command function starts a QUERY statement and calls the StartTransactionCommand function. The upper-layer state of the transaction block is TBLOCK\_INPROGRESS, which indicates that the TBlockState structure is inside the transaction block. Therefore, NULL is returned without changing the upper-layer state and lower-layer state of the transaction. + + \(3\) The PortalRun function executes the SELECT statement and calls the ExecutorRun function downwards to query the optimal path based on the execution plan. + + \(4\) The finish\_xact\_command function ends the QUERY statement and calls the CommitTransactionCommand function. The current upper-layer state of the transaction block is still TBLOCK\_INPROGESS, and the current upper-layer state and lower-layer state of the transaction are not changed. + + ![](../figures/176.png) + + Figure 5-7 Execution process of the SELECT statement + + 4\) Execution process of the END statement \(Figure 5-7\) + + \(1\) The entry function exec\_simple\_query processes the END statement. + + \(2\) The start\_xact\_command function starts a QUERY statement and calls the StartTransactionCommand function. The current upper-layer state of the transaction block is TBLOCK\_INPROGESS, indicating that the transaction is still in progress. In this case, the upper-layer state and lower-layer state of the transaction are not changed. + + \(3\) The PortalRun function processes the END statement, calls the processUtility function in sequence, and finally calls the EndTransactionBlock function to set the current upper-layer state of the transaction block to TBLOCK\_END. + + \(4\) The finish\_xact\_command function ends the QUERY statement and calls the CommitTransactionCommand function. The current state of the transaction block is TBLOCK\_END. Then, this function calls the CommitTransaction function to commit the transaction, sets the lower-layer state of the transaction to TRANS\_COMMIT, commits the transaction, and clears transaction resources. After the cleanup, the lower-layer state of the transaction is set to TRANS\_DEFAULT, and the CommitTansactionCommand function is returned. The upper-layer state of the transaction block is set to TBLOCK\_DEFAULT, and the entire transaction block ends. + + ![](../figures/177.png) + + Figure 5-8 Execution process of the END statement + + - Functions related to transaction state transition + + 1\) Transaction processing subfunctions for applying for, recycling, and clearing transaction resources based on the current upper-layer state machine of the transaction + + For details, see Table 5-2. + + **表 2** Transaction processing subfunctions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Subfunction

+

Description

+

StartTransaction

+

Starts a transaction, initializes the memory and variables, and sets the lower-layer state of the transaction to TRANS_INPROGRESS.

+

CommitTransaction

+

Transits the current lower-layer state from TRANS_INPROGRESS to TRANS_COMMIT, makes Clogs and Xlogs persistent locally, clears the corresponding transaction slot information, and then sets the lower-layer state to TRANS_DEFAULT.

+

PrepareTransaction

+

Similar to the CommitTransaction function, transits the current lower-layer state from TRANS_INPROGRESS to TRANS_PREPARE, constructs a two-phase GXACT structure, creates a two-phase file, adds dummy slot information, transfers the thread lock information to the dummy slot, releases resources, and finally sets the lower-layer state to TRANS_DEFAULT.

+

AbortTransaction

+

Releases LWLocks, UnlockBuffers, and LockErrorCleanup, transits the lower-layer state from TRANS_INPROGRESS to TRANS_ABORT, records the corresponding Clogs, clears the transaction slot information, and releases various resources.

+

CleanupTransaction

+

The current lower-layer state should be TRANS_ABORT. This function is generally called after the AbortTransaction function is called and also clears some resources.

+

FinishPreparedTransaction

+

Ends a two-phase commit transaction.

+

StartSubTransaction

+

Starts a sub-transaction.

+

CommitSubTransaction

+

Commits a sub-transaction.

+

AbortSubTransaction

+

Rolls back a sub-transaction.

+

CleanupSubTransaction

+

Clears resource information about sub-transactions, which is similar to CleanupTransaction.

+

PushTransaction/PopTransaction

+

A sub-transaction is similar to stack information. These two functions start and end a sub-transaction, respectively.

+
+ + 2\) Processing functions for calling subfunctions based on the corresponding state machine + + For details, see Table 5-3. + + **表 3** Transaction execution function + + + + + + + + + + + + + + + + +

Function

+

Description

+

StartTransactionCommand

+

Calls the corresponding transaction execution function based on the upper-layer state when a transaction starts.

+

CommitTransactionCommand

+

Calls the corresponding transaction execution function based on the upper-layer state when a transaction ends.

+

AbortCurrentTransaction

+

Calls the longjump function, clears the corresponding resources in advance, and sets the upper-layer state of a transaction to TBLOCK_ABORT when an internal error occurs in the transaction.

+
+ + 3\) Functions for controlling the upper-layer transaction state machine + + For details, see Table 5-4. + + **表 4** Functions for controlling the upper-layer transaction state machine + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Function

+

Description

+

BeginTransactionBlock

+

Sets the upper-layer transaction state to TBLOCK_BEGIN when a transaction starts explicitly.

+

EndTransactionBlock

+

Sets the upper-layer transaction state to TBLOCK_END when a transaction is committed explicitly.

+

UserAbortTransactionBlock

+

Sets the upper-layer transaction state to TBLOCK_ABORT_PENDING or TBLOCK_ABORT_END when a transaction is rolled back explicitly.

+

PrepareTransactionBlock

+

Sets the upper-layer transaction state to TBLOCK_PREPARE when the PREPARE statement is executed explicitly.

+

DefineSavepoint

+

Calls the PushTransaction function to set the upper-layer transaction state of the sub-transaction to TBLOCK_SUBBEGIN when the SAVEPOINT statement is executed.

+

ReleaseSavepoint

+

Sets the upper-layer transaction state of the sub-transaction to TBLOCK_SUBRELEASE when the RELEASE SAVEPOINT statement is executed.

+

RollbackToSavepoint

+

Sets the upper-layer transaction state of all sub-transactions to TBLOCK_SUBABORT_PENDING or TBLOCK_SUBABORT_END and that of top-layer transactions to TBLOCK_SUBABORT_RESTART when the ROLLBACK TO statement is executed.

+
+ + + + +- **XID Allocation, Clogs, and CSNlogs** + + To distinguish different transactions in the database, openGauss allocates unique identifiers to the transactions, that is, transaction IDs \(XIDs\). An XID is a monotonically increasing number of the uint64 type. After a transaction ends, Clogs are used to record whether the transaction is committed, and CSNlogs are used to record the sequence number of the committed transaction for visibility determination. + + - 64-Bit XID Allocation + + openGauss assigns a unique XID to each write transaction. When a transaction is inserted, the transaction information is written to the **xmin** field in the tuple header, indicating the XID of tuple insertion. When a transaction is updated or deleted, the current transaction information is written to the **xmax** field in the tuple header, indicating the XID of tuple deletion. Currently, XIDs are allocated as uint64 numbers that monotonically increase. To save space and be compatible with earlier versions, the **xmin** and **xmax** fields in the tuple header are stored in two parts. The values of the **xmin** and **xmax** fields in the tuple header are both uint32 numbers. The page header stores the 64-bit **xid\_base** field, which is the **xid\_base** field of the current page. + + Figure 5-8 shows the tuple structure, and Figure 5-9 shows the page header structure. The formula for calculating the values of the **xmin** and **xmax** fields of each tuple is as follows: Value of **xmin**/Value of **xmax** in the tuple header + Value of **xid\_base** in the page header. + + ![](../figures/zh-cn_image_0000001252563289.png) + + Figure 5-9 Tuple structure + + ![](../figures/zh-cn_image_0000001207963344.png) + + Figure 5-9 Page header structure + + When larger XIDs are continuously inserted into the page, the XID may exceed the value of **xid\_base** + 232. In this case, you need to adjust the value of **xid\_base** to ensure that the values of the **xmin** and **xmax** fields of all tuples can be calculated based on the value of **xid\_base** and the tuple header value. For details about the logic, see section \(3\) in section 3\) "Key functions" in 5.2.2.4 "Clogs and CSNlogs". + + To prevent XIDs from being consumed too quickly, openGauss allocates XIDs only to write transactions and does not allocate extra XIDs to read-only transactions. That is, XIDs are allocated only when they are required. If an XID has not been allocated to a transaction when an XID is allocated to its sub-transaction, the system allocates an XID to the transaction first to ensure that the XID of the sub-transaction is greater than that of the transaction. Theoretically, 64-bit XIDs are sufficient. If transactions per second \(TPS\) of the database are 10 million, that is, 10 million transactions can be processed per second, 64-bit XIDs can be used for 580,000 years. + + - **Clogs and CSNlogs** + + Clogs and CSNlogs are used to maintain the mapping between XIDs and Clogs and that between XIDs and CSNlogs, respectively. Because memory resources are limited and long transactions may exist in the system, not all mappings can be stored in the memory. In this case, the mappings need to be written to disks as physical files. Therefore, Clog files \(XID - \> CommitLog Map\) and CSNlog files \(XID -\> CommitSeqNoLog Map\) are generated. Both CSNlogs and Clogs use the simple least recently used \(SLRU\) mechanism to read files and flush data to disks. + + 1\) Clogs are used to record the commit status of XIDs. In openGauss, four bits are used to identify the status of each XID. The code of Clogs is as follows: + + ``` + #define CLOG_XID_STATUS_IN_PROGRESS 0x00: The transaction has not started or is in progress (crash may occur). + #define CLOG_XID_STATUS_COMMITTED 0x01: The transaction has been committed. + #define CLOG_XID_STATUS_ABORTED 0x02: The transaction has been rolled back. + #define CLOG_XID_STATUS_SUB_COMMITTED 0x03: The sub-transaction has been committed but the status of its transaction is unknown. + ``` + + Figure 5-10 shows the physical structure of a Clog page. + + ![](../figures/178.png) + + Figure 5-10 Physical structure of a Clog page + + Figure 5-10 shows that transactions 1, 4, and 5 are still in progress, transaction 2 has been committed, and transaction 3 has been rolled back. + + 2\) CSNlogs are used to record the sequence number of transaction commit. openGauss allocates an 8-byte CSN of the uint64 type to each XID. Therefore, an 8-KB page can store the CSNs of 1000 transactions. When the size of the CSNlogs reaches a certain value, the logs are divided into file blocks. The size of each CSNlog file block is 256 KB. Similar to the XIDs, several special numbers are reserved for the CSNs. The code of CSNlogs is as follows: + + ``` + #define COMMITSEQNO_INPROGRESS UINT64CONST(0x0): The transaction has not been committed or rolled back. + #define COMMITSEQNO_ABORTED UINT64CONST(0x1): The transaction has been rolled back. + #define COMMITSEQNO_FROZEN UINT64CONST(0x2): The transaction has been committed and is visible to any snapshot. + #define COMMITSEQNO_FIRST_NORMAL UINT64CONST(0x3): start value of the CSN of the transaction + #define COMMITSEQNO_COMMIT_INPROGRESS (UINT64CONST(1) << 62): The transaction is being committed. + ``` + + Similar to the Clogs, the physical structure of the CSNlogs is shown in Figure 5-11. + + ![](../figures/179.png) + + Figure 5-11 Physical structure of CSNlogs + + The CSNs corresponding to the XIDs 2048, 2049, 2050, 2051, 2052 and 2053 are 5, 4, 7, 10, 6, and 8 respectively. That is, the transaction commit sequence is 2049 -\> 2048 -\> 2052 -\> 2050 -\> 2053 -\> 2051. + + 3\) Key functions + + Functions for calculating the value of **xid\_base** on the page with 64-bit XIDs are as follows: + + \(1\) **Heap\_page\_prepare\_for\_xid**: This function is called when a write operation is performed on the page to adjust the value of **xid\_base**. + +  If a new XID is between the value of **xid\_base** + value of **FirstNormalxid** and the value of **xid\_base** + value of **MaxShortxid\(0xFFFFFFFF\)**, the value of **xid\_base** does not need to be adjusted. + + ‚ If a new XID is less than the value of **xid\_base** + value of **FirstNormalxid**, the value of **xid\_base** needs to be decreased. + + ƒ If a new XID is larger than the value of **xid\_base** + value of **MaxShortxid**, the value of **xid\_base** needs to be increased. + + „ In special cases, if the XID span of the page is greater than the range that can be represented by 32 bits, smaller XIDs on the page need to be frozen. That is, **xid** of the transaction to be committed is set to **FrozenTransactionId \(2\)**, which is visible to all transactions, and **xid** of the transaction to be rolled back is set to **InvalidTransactionId \(0\)**, which is invisible to all transactions. + + \(2\) **Freeze\_single\_heap\_page**: This function is used to freeze small XIDs on the page. + +  Calculate the value of **oldestxid**. Transactions with an XID smaller than the value of **oldestxid** will not be accessed anymore. In this case, the XID of the transaction that has been committed can be marked as **FrozenTransactionId**, which is visible to all transactions, and the XID of the transaction that has been rolled back can be marked as **InvalidTransactionId**, which is invisible to all transactions. + + ‚ Clear the hot update link, redirect the item ID, and arrange the page space. + + ƒ Process each tuple based on **oldestxid**. + + \(3\) **Heap\_page\_shift\_base**: This function is used to update the value of **xid\_base** and adjust the values of **xmin** and **xmax** in each tuple header on the page. + + \(4\) **GetNewTransactionId**: This function is used to obtain the latest XID. + + +- **MVCC Mechanism for Visibility Determination** + + openGauss uses the MVCC mechanism to ensure data consistency. During data scanning, each transaction obtains only the data generated when the snapshot is obtained, instead of the latest state of data. This prevents data inconsistency caused by updates of other concurrent transactions. A main advantage of the MVCC mechanism is that a lock request for reading data does not conflict with a lock request for writing data, so that the read operation and the write operation do not block each other. The following describes the transaction isolation levels and the CSN mechanism for determining visibility in openGauss. + + - **Transaction Isolation Levels** + + The SQL standard considers the phenomena that should be avoided between parallel transactions and defines the following isolation levels, as shown in Table 5-5. + + **表 5** Transaction isolation levels + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Isolation Level

+

P0 (Dirty Read)

+

P1 (Dirty Read)

+

P2 (Fuzzy Read)

+

P3 (Phantom Read)

+

Read uncommitted

+

Impossible

+

Possible

+

Possible

+

Possible

+

Read committed

+

Impossible

+

Impossible

+

Possible

+

Possible

+

Repeatable read

+

Impossible

+

Impossible

+

Impossible

+

Possible

+

Serializable

+

Impossible

+

Impossible

+

Impossible

+

Impossible

+
+ + \(1\) Dirty write: Two transactions are written, committed, or rolled back separately, and the transaction results cannot be determined. That is, one transaction can roll back the commit of the other transaction. + + \(2\) Dirty read: A transaction can read modified data that is not committed by another transaction. + + \(3\) Fuzzy read: A transaction repeatedly reads data that has been read, and the data result is modified by another transaction. + + \(4\) Phantom read: A transaction repeatedly performs a range query and returns a group of data that meets the conditions. The number of data records in the result set of each query changes due to the modification of other transactions. + + During the implementation of various types of databases, some new phenomena occur in concurrent transactions, and some extensions are made based on the original isolation level. For details, see Table 5-6. + + **表 6** Transaction isolation level extensions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Isolation Level

+

P0 (Dirty Read)

+

P1 (Dirty Read)

+

P4 (Lost Update)

+

P2 (Fuzzy Read)

+

P3 (Phantom Read)

+

A5A (Read Skew)

+

A5B (Write Skew)

+

Read uncommitted

+

Impossible

+

Possible

+

Possible

+

Possible

+

Possible

+

Possible

+

Possible

+

Read committed

+

Impossible

+

Impossible

+

Possible

+

Possible

+

Possible

+

Possible

+

Possible

+

Repeatable read

+

Impossible

+

Impossible

+

Impossible

+

Impossible

+

Possible

+

Impossible

+

Impossible

+

Snapshot consistent read

+

Impossible

+

Impossible

+

Impossible

+

Impossible

+

Occasional

+

Impossible

+

Possible

+

Serializable

+

Impossible

+

Impossible

+

Impossible

+

Impossible

+

Impossible

+

Impossible

+

Impossible

+
+ + \(5\) Lost update: When a transaction reads a tuple and updates the tuple, another transaction modifies the tuple value. As a result, the modification is lost. + + \(6\) Read skew: It is assumed that data x and y have an implicit constraint x + y =100. Transaction 1 reads x = 50, and transaction 2 writes x = 25 and updates y = 75 to ensure that the constraint is met. After transaction 2 is committed, transaction 1 reads y = 75 again. As a result, transaction 1 reads x + y = 125, which does not meet the constraint. + + \(7\) Write skew: It is assumed that data x and y have an implicit constraint x + y ≤ 100. Transaction 1 reads x=50 and writes y = 50. Transaction 2 reads y=30, writes x = 70, and commits the data. Then, transaction 1 commits its data. As a result, x = 70 and y = 50 do not meet the constraint of x + y ≤ 100. + + openGauss provides the read committed and repeatable read isolation levels. The repeatable read isolation level does not have the phantom read problem but has the A5B \(write skew\) problem. + + - **CSN Mechanism** + + 1\) Working principles of CSNs \(Figure 5-12\) + + ![](../figures/zh-cn_image_0000001208473690.png) + + Figure 5-12 Working principles of CSNs + + Each non-read-only transaction is assigned with an XID during running. Then the CSN is pushed when the transaction is committed, and the mapping between the CSN and the XID is saved \(in CSNlogs\). In Figure 5-12, the solid vertical line indicates that value 4 which is the next value of the CSN \(3\) of the latest committed transaction is obtained when the snapshot is obtained. Transactions TX1, TX3, and TX5 have been committed, and their CSNs are 1, 2, and 3, respectively. Transactions TX2, TX4, and TX6 are in progress, and transactions TX7 and TX8 have not started. For the current snapshot, the commit results of transactions whose CSN is smaller than 4 are visible. The commit results of other transactions are invisible because they are not committed when the snapshot is obtained. + + 2\) Process for determining visibility by using MVCC snapshots + + When a snapshot is obtained, the minimum active XID is recorded as the value of **snapshot.xmin**. The XID of the latest committed transaction \(specified by **latestCompleteXid**\) + 1 is recorded as the value of **snapshot.xmax**. The CSN of the latest committed transaction + 1 \(**NextCommitSeqNo**\) is recorded as the value of **snapshot.csn**. Figure 5-13 shows the process of determining visibility. + + ![](../figures/1710.png) + + Figure 5-13 Process for determining visibility by using MVCC snapshots + + \(1\) If the XID is greater than or equal to the value of **snapshot.xmax**, the XID is invisible. + + \(2\) If the XID is smaller than the value of **snapshot.xmin**, the XID has ended before the transaction starts. You need to query the commit state of the transaction in the Clog and set a flag in the tuple header. + + \(3\) If the XID is between the value of **snapshot.xmin** and that of **snapshot.xmax**, the CSN of transaction ending needs to be read from the CSN-XID mapping. If the CSN has a value that is smaller than the value of **snapshot.csn**, the transaction is visible. Otherwise, the transaction is invisible. + + 3\) Commit process + + Figure 5-14 shows the transaction commit process. + + ![](../figures/1711.png) + + Figure 5-14 Commit process + + \(1\) The **commit-in-progress** flag is set for the CSN-XID mapping. + + \(2\) The atom updates the value of **NextCommitSeqNo**. + + \(3\) Redo logs are generated, and Clogs and CSNlogs are written. + + \(4\) The PGPROC structure is updated to remove the corresponding transaction information from the PGPROC structure. Both **xid** and **xmin** are set to **InvalidTransactionId**. + + 4\) Hot backup support + + Xlogs of the **commit-in-progress** flag are added between steps \(1\) and \(2\) in the transaction commit process. When reading the snapshot, the standby node obtains the lightweight lock ProcArrayLock and calculates the current snapshot. If the CSN corresponding to the XID has the **COMMITSEQNO\_COMMIT\_INPROGRESS** flag when the CSN in the current snapshot is used, you must wait for the corresponding transaction to commit the Xlog and then read the corresponding CSN for visibility determination after the Xlog playback is complete. To implement the preceding wait operation, the standby node calls the **XactLockTableInsert** function to obtain the transaction exclusive lock of the corresponding XID when performing the redo operation on the Xlog of the **commit-in-progress** flag. If other read transactions access the XID, they wait on the transaction lock of the XID until the corresponding transaction commits the Xlog for playback. + + - **Key Data Structures and Functions** + + 1\). Snapshots + + ``` + Code related to snapshots is as follows: + typedef struct SnapshotData { + SnapshotSatisfiesFunc satisfies; /* Function for determining visibility. Generally, HeapTupleSatisfiesMVCC is used.*/ + TransactionId xmin; /* Minimum XID of the current active transaction. If the XID is less than the value, the transaction ends.*/ + TransactionId xmax; /* XID of the latest committed transaction (specified by latestCompeleteXid) + 1. If the XID is greater than or equal to the value, the transaction has not started and the XID is invisible.*/ + TransactionId* xip; /* Recorded linked list of current active transactions. The value is invalid in the CSN version.*/ + TransactionId* subxip; /* Recorded linked list of cached active sub-transactions. The value is invalid in the CSN version.*/ + uint32 xcnt; /* Recorded number of active transactions (number of tuples in XIDs of active transactions). The value is invalid in the CSN version.*/ + GTM_Timeline timeline; /* Invalid in standalone openGauss.*/ + uint32 max_xcnt; /* Maximum number of XIDs of active transactions. The value is invalid in the CSN version.*/ + int32 subxcnt; /* Number of linked lists of cached active sub-transactions. The value is invalid in the CSN version.*/ + int32 maxsubxcnt; /* Maximum number of linked lists of cached active sub-transactions. The value is invalid in the CSN version.*/ + bool suboverflowed; /* Whether the number of linked lists of active sub-transactions exceeds the pre-allocated upper limit in the shared memory. The value is invalid in the CSN version. */ + + CommitSeqNo snapshotcsn; /* CSN of the snapshot. Generally, the value is the CSN of the latest committed transaction + 1 (NextCommitSeqNo). Transactions whose CSN is smaller than the value are visible. */ + + int prepared_array_capacity; /* Invalid in standalone openGauss.*/ + int prepared_count; /* Invalid in standalone openGauss.*/ + TransactionId* prepared_array; /* Invalid in standalone openGauss.*/ + + bool takenDuringRecovery; /* Whether the snapshot is generated during recovery.*/ + bool copied; /* Whether the snapshot is static at the session level or is copied from the newly allocated memory.*/ + + CommandId curcid; /* Command sequence number in the transaction block. Data inserted earlier in the same transaction is visible to subsequent statements. */ + uint32 active_count; /* refcount of ActiveSnapshot stack*/ + refcount of uint32 regd_count; /* refcount of RegisteredSnapshotList*/ + void* user_data; /* Used by the local multi-version snapshot, indicating that the snapshot is used by threads and cannot be released directly.*/ + SnapshotType snapshot_type; /* Invalid in standalone openGauss.*/ + } SnapshotData; + ``` + + 2\) HeapTupleSatisfiesMVCC + + This function is used to scan snapshots of common read transactions based on the CSN logic. The code is as follows: + + ``` + bool HeapTupleSatisfiesMVCC(HeapTuple htup, Snapshot snapshot, Buffer buffer) + { + …… /* Initializes variables.*/ + + if (!HeapTupleHeaderXminCommitted(tuple)) { /* Determines the hint bit recorded by a bit. During visibility determination, openGauss needs to know the commit state of the Clog corresponding to xmin and xmax of tuples. To avoid repeated access to the Clog, openGauss optimizes visibility determination. The hint bit records the transaction state in the tuple header and uses a bit to indicate the commit or rollback states. openGauss does not update the hint bit in a tuple when a transaction is committed or rolled back. Instead, openGauss reads and sets the hint bit from the Clog if the hint bit is not set during visibility determination. Otherwise, openGauss directly reads the hint bit, and this prevents a tuple from repeatedly obtaining the final transaction commit state. If xmin and xmax of the tuple are found to have been committed during a scanning, the corresponding flag is added to accelerate the scanning. If no flag is added, visibility determination continues. */ + if (HeapTupleHeaderXminInvalid(tuple)) /* Also check the hint bit. If xmin is marked as invalid, the transaction that inserts the tuple has been rolled back. In this case, the tuple is invisible.*/ + return false; + + if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(page, tuple))) { /* If the tuple is accessed inside a transaction, the command ID (CID) of the tuple needs to be identified. That is, in the same transaction. The scanning result inserted earlier by the current transaction can be queried in the subsequent query.*/ + ……. + } else { /* If other transactions are scanned, determines whether the transactions are visible based on the snapshot.*/ + } else { /*visible = XidVisibleInSnapshot(HeapTupleHeaderGetXmin(page, tuple), snapshot, &hintstatus); /* Determine whether the transaction is visible based on CSNlogs and return the final commit state of the transaction.*/ + if (hintstatus == XID_COMMITTED) /* If the transaction is committed, add the committed hint bit to accelerate determination.*/ + SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED, HeapTupleHeaderGetXmin(page, tuple)); + + if (hintstatus == XID_ABORTED) { + … /* If the transaction is rolled back, add a rollback flag.*/ + SetHintBits(tuple, buffer, HEAP_XMIN_INVALID, InvalidTransactionId); + } + if (!visible) { /* If xmin is invisible, the tuple is invisible; otherwise, the transaction that inserts the tuple has been committed for the snapshot. In this case, continue to determine whether the tuple deletion transaction is committed for the snapshot.*/ + return false; + } + } + } + } else {/* If xmin of the tuple has been marked with the committed hint bit, use the function interface CommittedXidVisibleInSnapshot to determine whether the tuple is visible to the snapshot.*/ + /* xmin is committed, but maybe not according to our snapshot */ + if (!HeapTupleHeaderXminFrozen(tuple) && + !CommittedXidVisibleInSnapshot(HeapTupleHeaderGetXmin(page, tuple), snapshot)) { + return false; + } + } + …… /* Subsequent visibility determination for xmax is similar to that for xmin. If xmax is visible to the current snapshot, the tuple deletion transaction has been committed, and xmax is invisible. Otherwise, xmax is visible.*/ + if (!(tuple->t_infomask & HEAP_XMAX_COMMITTED)) { + if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmax(page, tuple))) { + if (HeapTupleHeaderGetCmax(tuple, page) >= snapshot->curcid) + return true; /* The transaction to be deleted has been committed before the scanning starts.*/ + else + return false; /* The operation deletion transaction is committed after the scanning starts.*/ + } + + visible = XidVisibleInSnapshot(HeapTupleHeaderGetXmax(page, tuple), snapshot, &hintstatus); + if (hintstatus == XID_COMMITTED) { + /* Set the hint bit of xmax.*/ + SetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED, HeapTupleHeaderGetXmax(page, tuple)); + } + if (hintstatus == XID_ABORTED) { + /* Rollback or crash*/ + SetHintBits(tuple, buffer, HEAP_XMAX_INVALID, InvalidTransactionId); + } + if (!visible) { + return true; /* Consider the tuple active if the transaction corresponding to xmax in the snapshot is invisible.*/ + } + } else { + /* Consider that the operation of deleting the tuple is not complete and the tuple is still visible if the transaction corresponding to xmax has been committed but the transaction in the snapshot is invisible.*/ + if (!CommittedXidVisibleInSnapshot(HeapTupleHeaderGetXmax(page, tuple), snapshot)) { + return true; /* Consider the tuple visible.*/ + } + } + return false; + } + ``` + + 3\) HeapTupleSatisfiesNow + + The logic of this function is similar to that of MVCC. The only difference is that this function only determines the states of **xmin** and **xmax**, and no longer calls the **XidVisibleInSnapshot** and **CommittedXidVisibleInSnapshot** functions to determine visibility to snapshots. + + 4\) HeapTupleSatisfiesVacuum + + This function returns the corresponding state according to the value of **oldestXmin**. A dead tuple \(invisible tuple of an earlier version in the openGauss MVCC mechanism\) that is not accessed by any other unfinished transactions \(value of **xmax** < value of **oldestXmin**\) can be cleared by executing the VACUUM statement. The function code is as follows: + + ``` + HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer) + { + …… /* Initializes variables.*/ + if (!HeapTupleHeaderXminCommitted(tuple)) { /* Accelerate hint bits. The logic is the same as that of MVCC. */ + if (HeapTupleHeaderXminInvalid(tuple)) /* Return and clear the dead tuple if xmin is not committed. */ + return HEAPTUPLE_DEAD; + xidstatus = TransactionIdGetStatus(HeapTupleGetRawXmin(htup), false); /* Obtain the current transaction state through the CSNlog.*/ + if (xidstatus == XID_INPROGRESS) { + if (tuple->t_infomask & HEAP_XMAX_INVALID) /* If xmax does not exist, the tuple is not deleted. In this case, the tuple is being inserted. Otherwise, the tuple is being deleted.*/ + return HEAPTUPLE_INSERT_IN_PROGRESS; + return HEAPTUPLE_DELETE_IN_PROGRESS; /* Return a message indicating that the deletion is in progress.*/ + } else if (xidstatus == XID_COMMITTED) { /* Add the hint bit and then check whether xmax is committed if xmin is committed. */ + SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED, HeapTupleGetRawXmin(htup)); + } else { + .... /* If a transaction ends and is not committed, it may be aborted or crashed. Generally, a dead tuple is returned and can be deleted. In a standalone system, t_thrd.xact_cxt.useLocalSnapshot does not take effect, and its value is always false. */ + SetHintBits(tuple, buffer, HEAP_XMIN_INVALID, InvalidTransactionId); + return ((!t_thrd.xact_cxt.useLocalSnapshot || IsInitdb) ? HEAPTUPLE_DEAD : HEAPTUPLE_LIVE); + } + } + /* Check xmax. If xmax is not set, the tuple is not deleted. In this case, the tuple is alive and cannot be deleted. */ + if (tuple->t_infomask & HEAP_XMAX_INVALID) + return HEAPTUPLE_LIVE; + ...... + if (!(tuple->t_infomask & HEAP_XMAX_COMMITTED)) { /* Check whether the value of xmax is smaller than that of oldesxmin if xmax is committed. If the value of xmax is smaller than that of oldesxmin, no unfinished transactions access the tuple and the tuple can be deleted. */ + xidstatus = TransactionIdGetStatus(HeapTupleGetRawXmax(htup), false); + if (xidstatus == XID_INPROGRESS) + return HEAPTUPLE_DELETE_IN_PROGRESS; + else if (xidstatus == XID_COMMITTED) + SetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED, HeapTupleGetRawXmax(htup)); + else { + … /* The transaction corresponding to xmax aborts or crashes.*/ + SetHintBits(tuple, buffer, HEAP_XMAX_INVALID, InvalidTransactionId); + return HEAPTUPLE_LIVE; + } + } + + /* Check whether the tuple can be deleted. If the value of xmax is smaller than that of oldestXmin, the tuple can be deleted. */ + if (!TransactionIdPrecedes(HeapTupleGetRawXmax(htup), OldestXmin)) + return ((!t_thrd.xact_cxt.useLocalSnapshot || IsInitdb) ? HEAPTUPLE_RECENTLY_DEAD : HEAPTUPLE_LIVE); + + /* The tuple may be considered dead, is not accessed by any active transaction, and can be deleted. */ + return ((!t_thrd.xact_cxt.useLocalSnapshot || IsInitdb) ? HEAPTUPLE_DEAD : HEAPTUPLE_LIVE); + } + ``` + + 5\) SetXact2CommitInProgress + + This function sets the **COMMITSEQNO\_COMMIT\_INPROGRESS** flag \(for details, see section 5.2.2 XID Allocation, Clogs, and CSNlogs\) of the CSNlog corresponding to an XID, indicating that the transaction corresponding to the XID is being committed. This operation is performed to ensure atomicity during visibility determination, that is, to prevent concurrent read transactions from reading inconsistent data during CSN setting. + + 6\) CSNLogSetCommitSeqNo + + This function sets CSNlogs for the corresponding XID. + + 7\) RecordTransactionCommit + + This function records transaction commit, including writing Clogs, CSNlog Xlogs, Clogs, and CSNlogs. + + +- **Intra-Process Multi-Thread Management Mechanism** + + This section briefly describes the data structures of the intra-process multi-thread management mechanism and the multi-version snapshot computing mechanism. + + - **Transaction Information Management** + + When the database is started, a shared memory segment is maintained. When each thread is initialized, a slot is obtained from the shared memory and the thread information is recorded in the slot. When a snapshot is obtained, the slot information needs to be updated in the shared memory array. When the transaction ends, the transaction information needs to be cleared from the slot. During snapshot calculation, the global array is traversed to obtain transaction information of all concurrent threads and calculate snapshot information \(such as values of **xmin**, **xmax**, and **snapshotcsn**\). The key data structure code for transaction information management is as follows: + + ``` + typedef struct PGXACT { + GTM_TransactionHandle handle; /* Invalid in standalone mode.*/ + TransactionId xid; /* XID of the thread. If there is no XID, the value is 0.*/ + TransactionId prepare_xid; /* XID in the preparation phase.*/ + + TransactionId xmin; /* Minimum active XID when the current transaction starts. The operation by executing the VACUUM statement does not delete tuples whose XID is greater than or equal to the value of xmin. */ + CommitSeqNo csn_min; /* Minimum active CSN when the current transaction starts.*/ + TransactionId next_xid; /* Invalid in standalone mode*/ + int nxids; /*Number of sub-transactions*/ + uint8 vacuumFlags; /* Flags related to the operation by executing the VACUUM statement*/ + + bool needToSyncXid; /* Invalid in standalone mode*/ + bool delayChkpt; /* If the thread requires the checkpoint thread to delay the wait, the value is true. + #ifdef __aarch64__ */ + char padding[PG_CACHE_LINE_SIZE - PGXACT_PAD_OFFSET]; /* Structure alignment for performance*/ + #endif + } PGXACT; + + struct PGPROC { + SHM_QUEUE links; /* Pointer in the linked list*/ + + PGSemaphoreData sem; /* Semaphore waiting in sleep mode*/ + int waitStatus; /* Waiting status*/ + + Latch procLatch; /* Common latch of the thread*/ + + LocalTransactionId lxid; /* Local top-layer XID of the current thread*/ + ThreadId pid; /* Thread ID*/ + + ThreadId sessMemorySessionid; + uint64 sessionid; /* Current session ID in thread pool mode*/ + int logictid; /* Logical thread ID*/ + TransactionId gtt_session_frozenxid; /* Frozen XID of a session-level global temporary table*/ + + int pgprocno; + int nodeno; + + /* When the thread starts, the following data structures are 0.*/ + BackendId backendId; /* Background ID of the thread*/ + Oid databaseId; /*Object identifier (OID) of the currently accessed database*/ + Oid roleId; /* OID of the current user*/ + + /* Version number, which is used to determine the old and new versions during the upgrade.*/ + uint32 workingVersionNum; + + /* Mark whether the current transaction receives conflict signals in hot backup mode. The ProcArray lock is required for setting this parameter. */ + bool recoveryConflictPending; + + /* Information about the LWLock waited by the thread */ + bool lwWaiting; /* The value is true when the LWLock is waited for.*/ + uint8 lwWaitMode; /* Pre-obtain the lock mode.*/ + bool lwIsVictim; /*Forcibly abandon the LWLock.*/ + dlist_node lwWaitLink; /* Wait for the next waiter of the same LWLock object.*/ + + /* Common lock information about thread waiting*/ + LOCK* waitLock; /*Regular lock object that is waited for*/ + PROCLOCK* waitProcLock; /* Holder of the regular lock object that is waited for*/ + LOCKMODE waitLockMode; /* Pre-obtain the mode of the regular lock object.*/ + LOCKMASK heldLocks; /*Bit mask of the lock object mode obtained by the thread*/ + + /* Wait for the primary and standby servers to replay the log synchronization information.*/ + XLogRecPtr waitLSN; /* LSN that is waited for*/ + int syncRepState; /* Wait for the primary/standby synchronization state.*/ + bool syncRepInCompleteQueue; /* Whether waiting in the completion queue*/ + SHM_QUEUE syncRepLinks; /* Pointer pointing to the synchronization queue*/ + + DataQueuePtr waitDataSyncPoint; /* Data synchronization point of data page replication*/ + int dataSyncRepState; /*Synchronization state of data page replication*/ + SHM_QUEUE dataSyncRepLinks; /* Pointer pointing to the data page synchronization queue*/ + + MemoryContext topmcxt; /* Top-layer memory context of the thread*/ + char myProgName[64]; + pg_time_t myStartTime; + syscalllock deleMemContextMutex; + + SHM_QUEUE myProcLocks[NUM_LOCK_PARTITIONS]; + + /* The following structures are used to commit XIDs in batches.*/ + /* Whether the member is a member in XID batch commit*/ + bool procArrayGroupMember; + /* Next member in XID batch commit*/ + pg_atomic_uint32 procArrayGroupNext; + /* Larger XID between the XIDs of a transaction and its sub-transaction*/ + TransactionId procArrayGroupMemberXid; + + /* SCN*/ + CommitSeqNo commitCSN; + + /* The following structures are used to commit Clogs in batches.*/ + bool clogGroupMember; /* Whether the member is a member in Clog batch commit*/ + pg_atomic_uint32 clogGroupNext; /* Next member in Clog batch commit*/ + TransactionId clogGroupMemberXid; /*XID committed in Clog batch commit*/ + CLogXidStatus clogGroupMemberXidStatus; /* Transaction state in Clog batch commit*/ + int64 clogGroupMemberPage; /* Clog page corresponding to Clog batch commit*/ + XLogRecPtr clogGroupMemberLsn; /* LSN of members in Clog batch commit*/ + #ifdef __aarch64__ + /* The following structures are used to insert playback logs in batches in the ARM architecture.*/ + bool xlogGroupMember; + pg_atomic_uint32 xlogGroupNext; + XLogRecData* xlogGrouprdata; + XLogRecPtr xlogGroupfpw_lsn; + XLogRecPtr* xlogGroupProcLastRecPtr; + XLogRecPtr* xlogGroupXactLastRecEnd; + void* xlogGroupCurrentTransactionState; + XLogRecPtr* xlogGroupRedoRecPtr; + void* xlogGroupLogwrtResult; + XLogRecPtr xlogGroupReturntRecPtr; + TimeLineID xlogGroupTimeLineID; + bool* xlogGroupDoPageWrites; + bool xlogGroupIsFPW; + uint64 snap_refcnt_bitmap; + #endif + + LWLock* subxidsLock; + struct XidCache subxids; /* XID of the sub-transaction*/ + + LWLock* backendLock; /* Lightweight lock of each thread, used to protect concurrent access to the following data structures*/ + + /* Lock manager data, recording fast-path locks taken by this backend. */ + uint64 fpLockBits; /* Holding mode of the fast path lock*/ + FastPathTag fpRelId[FP_LOCK_SLOTS_PER_BACKEND]; /* Slot ID of the table object*/ + bool fpVXIDLock; /* Whether to obtain the fast path lock of the local XID*/ + LocalTransactionId fpLocalTransactionId; /* Local XID*/ + }; + ``` + + ![](../figures/1712.png) + + Figure 5-15 Transaction information + + As shown in Figure 5-15, **proc\_base\_all\_procs** and **proc\_base\_all\_xacts** are global shared areas. When a thread starts, a slot is registered in the shared area, and the thread-level pointer variables _t\_thrd.proc_ and _t\_thrd.pgxact_ point to the area. When a transaction starts in the thread, information such as **xmin** and **xid** of the transaction is filled in the **pgxact** structure. The key functions and interfaces are as follows: + + \(1\) **GetOldestXmin**: Returns the value of **oldestXmin** cached by the current multi-version snapshot. \(For details about the multi-version snapshot mechanism, see the following sections.\) + + \(2\) ProcArrayAdd: Registers a slot in the shared area when a thread starts. + + \(3\) ProcArrayRemove: Removes the current thread from the ProcArray array. + + \(4\) TransactionIdIsInProgress: Checks whether an XID is in progress. + + - **Multi-Version Snapshot Mechanism** + + openGauss uses a shared memory segment to obtain snapshots and manage transaction information of each thread. If a shared lock is held when a snapshot is calculated and an exclusive lock is held when a transaction ends, severe lock contention occurs. To resolve this issue, openGauss introduces the multi-version snapshot mechanism. Each time a transaction ends, an exclusive lock is held, a version of the snapshot is calculated, and the version is recorded to a loop buffer queue memory. When another thread obtains the snapshot, the thread does not hold the shared lock to recalculate the snapshot. Instead, the thread obtains the latest snapshot from the top of the loop queue through atomic operations and increases the reference count by 1. After the snapshot information is copied, the reference count is decreased by 1. If the reference count of a slot is 0, the slot can be reused by a new snapshot. + + 1\) Data structure of a multi-version snapshot + + The code of the data structure of a multi-version snapshot is as follows: + + ``` + typedef struct _snapxid { + TransactionId xmin; + TransactionId xmax; + CommitSeqNo snapshotcsn; + TransactionId localxmin; + bool takenDuringRecovery; + ref_cnt_t ref_cnt[NREFCNT]; /* Reference count of the snapshot. If the value is 0, the snapshot can be reused.*/ + } snapxid_t; /* Content of the multi-version snapshot. If CSNs are used in openGauss, only key information such as xmin, xmax, and snapshotcsn needs to be recorded. */ + + static snapxid_t* g_snap_buffer = NULL; /* Pointer of the buffer queue memory area*/ + static snapxid_t* g_snap_buffer_copy = NULL; /* Shallow copy of the buffer queue memory*/ + static size_t g_bufsz = 0; + Whether the static bool g_snap_assigned = false; /* Whether the buffer queue of the multi-version snapshot has been initialized.*/ + + #define SNAP_SZ sizeof(snapxid_t) /* Size of each multi-version snapshot*/ + #define MaxNumSnapVersion 64 /* Size of the multi-version snapshot queue (64 versions)*/ + + static volatile snapxid_t* g_snap_current = NULL; /* Current snapshot pointer*/ + static volatile snapxid_t* g_snap_next = NULL; /* Snapshot pointer of the next available slot*/ + ``` + + 2\) Process of creating a buffer queue + + When shared memory is created, the size of the shared memory area is calculated as follows: Value of **MaxNumSnapVersion** x Value of **SNAP\_SZ**. **g\_snap\_current** is set to **0** and **g\_snap\_next** is set to the value of 1 x value of **SNAP\_SZ**. + + 3\) Calculating a multi-Version snapshot + + \(1\) Obtain the current **g\_snap\_next**. + + \(2\) Ensure that the exclusive lock of the PGPROC array is held, calculate key structures such as **xmin**, **xmax**, and the CSN, and save the calculation result to **g\_snap\_next**. + + \(3\) Search for the next reusable slot whose **refcount** is set to **0**, set **g\_snap\_current** to **g\_snap\_next**, and set **g\_snap\_next** to the reusable slot offset. + + 4\) Obtaining a multi-version snapshot + + \(1\) Obtain the g\_snap\_current pointer and add 1 to the reference count of the current snapshot slot to prevent it from being reused during concurrent snapshot updates. + + \(2\) Copy the information in the current snapshot to the static snapshot memory of the current connection. + + \(3\) Release the current multi-version snapshot and decrease the reference count of the current snapshot slot by 1. + + 5\) Key functions + + \(1\) CreateSharedRingBuffer: Creates shared memory information for a multi-version snapshot. + + \(2\) GetNextSnapXid: Obtains the position of the next multi-version snapshot. The function code is as follows: + + ``` + static inline snapxid_t* GetNextSnapXid() + { + return g_snap_buffer ? (snapxid_t*)g_snap_next : NULL; + } + ``` + + \(3\) SetNextSnapXid: Obtains the next available slot and updates the current multi-version snapshot to the latest version. The function code is as follows: + + ``` + static void SetNextSnapXid() + { + if (g_snap_buffer != NULL) { + g_snap_current = g_snap_next; /* Update the multi-version snapshot to the latest version. */ + pg_write_barrier(); /* Prevent ARM disorders during ring buffer initialization. */ + g_snap_assigned = true; + snapxid_t* ret = (snapxid_t*)g_snap_current; + size_t idx = SNAPXID_INDEX(ret); + loop: /* Main loop. The overall idea is to continuously traverse the slot information of multiple versions and search for a reusable slot whose refcout is set to 0. */ + do { + ++idx; + /* Search again if rewinding occurs.*/ + if (idx == g_bufsz) + idx = 0; + ret = SNAPXID_AT(idx); + if (IsZeroRefCount(ret)) { + g_snap_next = ret; + return; + } + } while (ret != g_snap_next); + ereport(WARNING, (errmsg("snapshot ring buffer overflow."))); + /* Currently, the number of multi-version snapshots is 64. Theoretically, the slots may be fully occupied. If there is no idle slot, traverse the slots again. */ + goto loop; + } + } + ``` + + \(4\) CalculateLocalLatestSnapshot: Calculates the information about the multi-version snapshot. The function code is as follows: + + ``` + void CalculateLocalLatestSnapshot(bool forceCalc) + { + …/*Initialize variables.*/ + + snapxid_t* snapxid = GetNextSnapXid(); /* Set the slot information of the next idle multi-version snapshot.*/ + + /* Initialize xmax to the value of latestCompletedXid + 1.*/ + xmax = t_thrd.xact_cxt.ShmemVariableCache->latestCompletedXid; + TransactionIdAdvance(xmax); + + /* The values of xmin and oldestxmin are not recalculated when each transaction is committed. They are calculated only when 1000 transactions are committed or at an interval of 1s. In this case, the values of xmin and oldestxmin are small, but visibility determination is not affected. */ + currentTimeStamp = GetCurrentTimestamp(); + if (forceCalc || ((++snapshotPendingCnt == MAX_PENDING_SNAPSHOT_CNT) || + (TimestampDifferenceExceeds(snapshotTimeStamp, currentTimeStamp, CALC_SNAPSHOT_TIMEOUT)))) { + snapshotPendingCnt = 0; + snapshotTimeStamp = currentTimeStamp; + + /* Initialize xmin.*/ + globalxmin = xmin = xmax; + + int* pgprocnos = arrayP->pgprocnos; + int numProcs; + + /* + Traverse the PGPROC structure cyclically and calculate the snapshot value. + */ + numProcs = arrayP->numProcs; + /* The main process is to traverse proc_base_all_xacts, record the minimum value of pgxact->xid as xmin, and record the minimum value of pgxact->xmin as oldestxmin. */ + for (index = 0; index < numProcs; index++) { + int pgprocno = pgprocnos[index]; + volatile PGXACT* pgxact = &g_instance.proc_base_all_xacts[pgprocno]; + TransactionId xid; + + if (pgxact->vacuumFlags & PROC_IN_LOGICAL_DECODING) + continue; + + /* Skip xmin of autovacuum to prevent long operations by executing the VACUUM statement from blocking dirty tuple recycling.*/ + if (pgxact->vacuumFlags & PROC_IN_VACUUM) + continue; + + /* Use the minimum value of xmin to update globalxmin.*/ + xid = pgxact->xmin; + + if (TransactionIdIsNormal(xid) && TransactionIdPrecedes(xid, globalxmin)) + globalxmin = xid; + + xid = pgxact->xid; + + if (!TransactionIdIsNormal(xid)) + xid = pgxact->next_xid; + + if (!TransactionIdIsNormal(xid) || !TransactionIdPrecedes(xid, xmax)) + continue; + + if (TransactionIdPrecedes(xid, xmin)) + xmin = xid; + } + + if (TransactionIdPrecedes(xmin, globalxmin)) + globalxmin = xmin; + + t_thrd.xact_cxt.ShmemVariableCache->xmin = xmin; + t_thrd.xact_cxt.ShmemVariableCache->recentLocalXmin = globalxmin; + } + /* Assign values to the multi-version snapshot information. The values of xmin and oldestxmin may be small because they are not calculated in time. The value of xmax and CSN value are accurate. Note that the exclusive lock must be held when the snapshot is calculated. */ + snapxid->xmin = t_thrd.xact_cxt.ShmemVariableCache->xmin; + snapxid->xmax = xmax; + snapxid->localxmin = t_thrd.xact_cxt.ShmemVariableCache->recentLocalXmin; + snapxid->snapshotcsn = t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo; + snapxid->takenDuringRecovery = RecoveryInProgress(); + SetNextSnapXid(); /* Set the current multi-version snapshot.*/ + } + ``` + + \(5\) GetLocalSnapshotData: Obtains the latest multi-version snapshot for transactions. The function code is as follows: + + ``` + Snapshot GetLocalSnapshotData(Snapshot snapshot) + { + /* Check whether a multi-version snapshot exists. Before the recovery process starts, the multi-version snapshot is not calculated. In this case, NULL is returned. */ + if (!g_snap_assigned || (g_snap_buffer == NULL)) { + ereport(DEBUG1, (errmsg("Falling back to origin GetSnapshotData: not assigned yet or during shutdown\n"))); + return NULL; + } + pg_read_barrier(); /* Prevent ARM disorders during ring buffer initialization.*/ + snapxid_t* snapxid = GetCurrentSnapXid(); /* Add 1 to refcount of the current multi-version snapshot to prevent it from being reused by transactions that concurrently calculate new snapshots. */ + + snapshot->user_data = snapxid; + + … /* Assign the information in snapxid of the multi-version snapshot to the snapshot. Note that this is deep copy because the multi-version snapshot has only several key variables. You can directly assign a value to the snapshot. Then, you can release refcount of the multi-version snapshot. */ + u_sess->utils_cxt.RecentXmin = snapxid->xmin; + snapshot->xmin = snapxid->xmin; + snapshot->xmax = snapxid->xmax; + snapshot->snapshotcsn = snapxid->snapshotcsn; + … + ReleaseSnapshotData(snapshot); /* Release refcount of the multi-version snapshot so that it can be reused. */ + return snapshot; + } + ``` + + + +## 5.3 Lock Mechanism + +In a database, concurrency control on public resources is implemented by using locks. According to different purposes of locks, locks can be generally classified into three types: spinlock, LWLock, and regular lock. Further encapsulation can be performed based on the three types of locks. The general operation process of using a lock includes locking, performing operations in the critical section, and releasing the lock. On the premise of accuracy, lock usage and contention have become important factors that restrict performance. The following briefly describes three types of locks in openGauss, and then focuses on lock-related performance optimization of openGauss based on the Kunpeng architecture. + +- **Spinlocks** + + A spinlock is generally implemented by using a test-and-set \(TAS\) atomic instruction of a CPU. There are only two states: locked and unlocked. A spinlock can be held by only one process. The difference between a spinlock and a semaphore is that when a process cannot obtain resources, the semaphore makes the process asleep and blocked, while the spinlock makes the process busy and waiting. The spinlock is mainly used in a scenario in which the locking duration is very short, for example, modifying a flag or reading a flag field, within dozens of instructions. When writing code, ensure that the spinlock is locked and unlocked in the same function. Deadlock cannot be detected and shall be guaranteed by the code itself. There are no waiting queues. The spinlock consumes CPU resources. If it is not used properly for a long time, a core dump is triggered. In openGauss, many 32-bit, 64-bit, and 128-bit variables are updated by compare-and-swap \(CAS\) atomic operations to avoid or reduce the use of spinlocks. + + Operations related to spinlocks are as follows: + + \(1\) SpinLockInit: Initializes the spinlock. + + \(2\) SpinLockAcquire: Locks the spinlock. + + \(3\) SpinLockRelease: Releases the spinlock. + + \(4\) SpinLockFree: Destroys the spinlock and clears related resources. + +- **LWLocks** + + LWLocks are implemented by using atomic operations, waiting queues, and semaphores. There are two types of LWLocks: shared lock and exclusive lock. Multiple processes can obtain a shared lock at the same time, but an exclusive lock can be held by only one process. When a process cannot obtain resources, the LWLock makes the process asleep and blocked. LWLocks are mainly used in scenarios where operations in the internal critical section take a long time. Locking and unlocking operations can cross functions but must be released immediately after use. Deadlock shall be guaranteed by the code itself. However, due to code complexity and handling of different exceptions, openGauss provides a deadlock detection mechanism to avoid LWLock deadlock in various exception scenarios. + + Functions related to LWLocks are as follows: + + \(1\) LWLockAssign: Applies for an LWLock. + + \(2\) LWLockAcquire: Locks an LWLock. + + \(3\) LWLockConditionalAcquire: Conditionally locks an LWLock. If no lock is obtained, **false** is returned and the system does not keep waiting. + + \(4\) LWLockRelease: Releases an LWLock. + + \(5\) LWLockReleaseAll: Releases all LWLocks held by the current thread. If an error occurs during the transaction, all LWLocks are rolled back and released to prevent subsequent operations from being blocked. + + The related structure code is as follows: + + ``` + #define LW_FLAG_HAS_WAITERS ((uint32)1 << 30) + #define LW_FLAG_RELEASE_OK ((uint32)1 << 29) + #define LW_FLAG_LOCKED ((uint32)1 << 28) + + #define LW_VAL_EXCLUSIVE ((uint32)1 << 24) + #define LW_VAL_SHARED 1 /* Mark the state of an LWLock to obtain or release the lock.*/ + + typedef struct LWLock { + uint16 tranche; /* ID of an LWLock*/ + pg_atomic_uint32 state; /* Lock state*/ + dlist_head waiters; /* Linked list of threads that are waiting for locks*/ + #ifdef LOCK_DEBUG + pg_atomic_uint32 nwaiters; /* Number of threads waiting for locks*/ + struct PGPROC *owner; /* Last holder of an exclusive lock*/ + #endif + #ifdef ENABLE_THREAD_CHECK + pg_atomic_uint32 rwlock; + pg_atomic_uint32 listlock; + #endif + } LWLock; + ``` + +- **Regular Locks** + + Regular locks are implemented by using a hash table. Regular locks support multiple lock modes. The semantics and conflicts between these lock modes are defined by conflict tables. Regular locks are used to lock database objects accessed by services. The locking of a regular lock complies with the two-phase locking protocol of the database. That is, a regular lock is locked during access and is released when a transaction is committed. + + A regular lock has a waiting queue and provides a deadlock detection mechanism. When a deadlock is detected, a transaction is rolled back. + + openGauss provides eight lock levels for controlling the concurrency of different statements. Level-1 locks are used for the SELECT statement. Level-3 locks are used for the INSERT, UPDATE, and DELETE statements. Level-4 locks are used for the VACUUM and ANALYZE statements. Level-8 locks are used for various DDL statements. The specific macro definitions and naming code are as follows: + + ``` + #define AccessShareLock 1 /* SELECT statement*/ + #define RowShareLock 2 /* SELECT FOR UPDATE and FOR SHARE statements*/ + #define RowExclusiveLock 3 /* INSERT, UPDATE, and DELETE statements*/ + #define ShareUpdateExclusiveLock \ + 4 /* VACUUM (non-FULL), ANALYZE, and CREATE INDEX CONCURRENTLY statements*/ + #define ShareLock 5 /* CREATE INDEX (WITHOUT CONCURRENTLY) statement*/ + #define ShareRowExclusiveLock \ + 6 /* It is similar to the exclusive mode, but concurrent access in ROW SHARE mode is allowed.*/ + #define ExclusiveLock \ + 7 /* Blocks ROW SHARE. In this case, the SELECT...FOR UPDATE statement will not be executed.*/ + #define AccessExclusiveLock \ + 8 /* ALTER TABLE, DROP TABLE, VACUUM FULL, and LOCK TABLE statements*/ + ``` + + Table 5-7 describes the eight levels of lock conflict and concurrency control, where √ indicates that two lock operations can be performed concurrently.![](figures/zh-cn_image_0000001252142637.gif) + + **表 7** Lock conflict and concurrency control + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Lock Level

+

1

+

2

+

3

+

4

+

5

+

6

+

7

+

8

+

1. ACCESS SHARE

+

+

+

+

+

+

+

+

-

+

2. ROW SHARE

+

+

+

+

+

+

+

-

+

-

+

3. ROW EXCLUSIVE

+

+

+

+

+

-

+

-

+

-

+

-

+

4. SHARE UPDATE EXCLUSIVE

+

+

+

+

-

+

-

+

-

+

-

+

-

+

5. SHARELOCK

+

+

+

-

+

-

+

+

-

+

-

+

-

+

6. SHARE ROW EXCLUSIVE

+

+

+

-

+

-

+

-

+

-

+

-

+

-

+

7. EXCLUSIVE

+

+

-

+

-

+

-

+

-

+

-

+

-

+

-

+

8. ACCESS EXCLUSIVE

+

-

+

-

+

-

+

-

+

-

+

-

+

-

+

-

+
+ + The lock object data structure is as follows. Different lock objects are identified by assigning values to field 1 to field 5, and **locktag\_type** is used to identify lock object types, such as table-level relation objects, row-level tuple objects, and transaction objects. The corresponding code is as follows: + + ``` + typedef struct LOCKTAG { + uint32 locktag_field1; /* 32 bits*/ + uint32 locktag_field2; /* 32 bits*/ + uint32 locktag_field3; /* 32 bits*/ + uint32 locktag_field4; /* 32 bits*/ + uint16 locktag_field5; /* 32 bits*/ + uint8 locktag_type; /* For details, see LockTagType.*/ + uint8 locktag_lockmethodid; /* Lock method type*/ + } LOCKTAG; + + typedef enum LockTagType { + LOCKTAG_RELATION, /* Table relation*/ + /* The ID of LOCKTAG_RELATION consists of the OID of the database and the OID of the table. If the OID of the database is 0, the table is a shared table. The OID is a common object identifier of the openGauss kernel.*/ + LOCKTAG_RELATION_EXTEND, /* Priority of the extension table*/ + /* ID of LOCKTAG_RELATION_EXTEND*/ + LOCKTAG_PARTITION, /* Partition*/ + LOCKTAG_PARTITION_SEQUENCE, /* Partition sequence*/ + LOCKTAG_PAGE, /* Page in the table*/ + /* The ID of LOCKTAG_PAGE is the value of RELATION + value of BlockNumber (page number).*/ + LOCKTAG_TUPLE, /* Physical tuple*/ + /* The ID of LOCKTAG_TUPLE is the value of PAGE + value of OffsetNumber (offset on the page).*/ + LOCKTAG_TRANSACTION, /* XID (to wait for the corresponding transaction to end)*/ + /* The ID of LOCKTAG_TRANSACTION is the XID.*/ + LOCKTAG_VIRTUALTRANSACTION, /* Virtual XID*/ + /* The ID of LOCKTAG_VIRTUALTRANSACTION is its virtual XID.*/ + LOCKTAG_OBJECT, /* Non-table database object*/ + /* The ID of LOCKTAG_OBJECT is data OID + class OID + object OID + sub-ID.*/ + LOCKTAG_CSTORE_FREESPACE, /* Free space of column store*/ + LOCKTAG_USERLOCK, /* Lock object reserved for the user lock*/ + LOCKTAG_ADVISORY, /* Advisory lock*/ + LOCK_EVENT_NUM + } LockTagType; + ``` + + In the structure of a regular lock, **tag** is the unique identifier of a regular lock object, and the PROCLOCK structure is the pointer that connects all threads that hold the lock and those that are waiting for the lock. The corresponding code is as follows: + + ``` + typedef struct LOCK { + /* Hash key*/ + LOCKTAG tag; /* Unique identifier of the lock object*/ + + /* Data*/ + LOCKMASK grantMask; /* Bit mask of the obtained lock object*/ + LOCKMASK waitMask; /* Bit mask of the lock object that is waited for*/ + SHM_QUEUE procLockss; /* Object linked list of the PROCLOCK structure associated with the lock*/ + PROC_QUEUE waitProcss; /* Object linked list of the PGPROC structure waiting for the lock*/ + int requested[MAX_LOCKMODES]; /* Lock request counts*/ + int nRequested; /* Total number of requested arrays*/ + int granted[MAX_LOCKMODES]; /* Count of obtained locks*/ + int nGranted; /* Total number of granted arrays*/ + } LOCK; + ``` + + The PROCLOCK structure is used to connect the information about the threads that wait for a lock and about those that hold the lock. The corresponding code is as follows: + + ``` + typedef struct PROCLOCK { + /* Identifier*/ + PROCLOCKTAG tag; /* Object unique identifier of the PROCLOCK structure*/ + + /* Data */ + LOCKMASK holdMask; /* Bit mask of the lock type that has been obtained*/ + LOCKMASK releaseMask; /* Bit mask of the lock type that has been pre-released*/ + SHM_QUEUE lockLink; /* Pointer pointing to the linked list of a lock object*/ + SHM_QUEUE procLink; /* Pointer pointing to the linked list of the PGPROC structure*/ + } PROCLOCK; + ``` + + The **waitLock** field in the t\_thrd.proc structure records the lock that the thread is waiting for. The **procLocks** field in the structure associates all lock-related holders and waits. Figure 5-16 shows the queue relationship. + + ![](../figures/zh-cn_image_0000001252803745.png) + + Figure 5-16 Queue relationship of the t\_thrd.proc structure + + The main functions of a regular lock are as follows: + + \(1\) LockAcquire: Locks a lock object. + + \(2\) LockRelease: Releases a lock object. + + \(3\) LockReleaseAll: Releases all lock resources. + +- **Deadlock Detection Mechanism** + + A deadlock occurs because process B needs to access the resources of process A, but process A does not release the resources occupied by its lock due to various reasons. As a result, the database is always in the blocked state. As shown in Figure 5-17, T1 uses resource R1 and requests resource R2, while T2 holds resource R2 and requests resource R1. + + ![](../figures/1713.png) + + Figure 5-17 Deadlock status + + The necessary condition for a deadlock is that resources are requested and held. Each process can use one resource and request another resource at the same time. A common way to break a deadlock is to interrupt the execution of one of the transactions and break waiting loop. openGauss provides a deadlock detection mechanism for both LWLocks and regular locks. The following describes the related principles and code. + + - **Deadlock Detection and Self-Sealing of LWLocks** + + openGauss uses an independent monitoring thread to detect, diagnose, and release deadlocks of LWLocks. A worker thread writes a timestamp value before successfully requesting a LWLock. After successfully obtaining the lock, the worker thread sets the timestamp to 0. The monitoring thread can quickly compare the timestamp values to locate the thread that fails to obtain the lock resource for a long time. This process is fast and lightweight. Diagnosis of deadlock detection is triggered only when a long lock wait is detected. This prevents frequent diagnosis from affecting service execution. Once a deadlock loop is confirmed, the monitoring thread records the deadlock information in the log, and then takes recovery measures to recover the deadlock. That is, the monitoring thread selects a thread in the deadlock loop to report an error and then exit. Figure 5-18 shows the mechanism. + + ![](../figures/1714.png) + + Figure 5-18 Deadlock detection and self-healing of LWLocks + + Deadlock detection and verification are operations that consume too many CPU resources. To prevent the database performance and running stability from being affected, LWLock detection uses a lightweight detection method to quickly determine whether a deadlock may occur. The watchdog is used for detection by using timestamps. When a lock is requested, the worker thread writes the timestamp when the wait starts in the global memory. After the lock request is successful, the timestamp is set to 0. For a deadlocked thread, its lock request is in the wait state, and the timestamp is not set to 0. In addition, the difference between the timestamp and the current running timestamp becomes larger and larger. The GUC parameter **fault\_mon\_timeout** specifies the check interval. The default value is 5 seconds. Deadlock detection for LWLocks is performed at the interval specified by **fault\_mon\_timeout**. If the same thread and lock ID are detected and the timestamp exceeds the detection interval, deadlock detection is triggered. The functions for time statistics and lightweight detection are as follows: + + \(1\) pgstat\_read\_light\_detect: Reads the timestamp related to the thread and lock ID from the statistical information structure and records the timestamp to the pointer queue. + + \(2\) lwm\_compare\_light\_detect: Compares the state with that several seconds before detection. If threads and lock IDs that may be deadlocked are found, **true** is returned. Otherwise, **false** is returned. + + LWLock deadlock detection is a directed acyclic graph \(DAG\) determination process. Its implementation is similar to that of a regular lock, which will be described in detail in the following section. Deadlock detection requires two types of information: lock information \(including request and allocation information\) and thread information \(including waiting and holding information\). The information is recorded in corresponding global variables and can be accessed and determined by the deadlock monitoring thread. The related functions are as follows: + + \(1\) lwm\_heavy\_diagnosis: Detects whether a deadlock occurs. + + \(2\) lwm\_deadlock\_report: Reports detailed deadlock information for fault locating and diagnosis. + + \(3\) lw\_deadlock\_auto\_healing: Heals a deadlock by selecting a thread in the loop to exit. + + The data structure related to the lock and thread used for deadlock detection is as follows: + + \(1\) **lock\_entry\_id** records thread information. **thread\_id** and **sessionid** adapt to the thread pool framework so that correct information can be found from the statistics. The corresponding code is as follows: + + ``` + typedef struct { + ThreadId thread_id; + uint64 st_sessionid; + } lock_entry_id; + ``` + + \(2\) **lwm\_light\_detect** records the thread that may be deadlocked and uses a linked list to connect all the current information. The corresponding code is as follows: + + ``` + typedef struct { + /* Thread ID*/ + lock_entry_id entry_id; + + /* Reference count of LWLock detection*/ + int lw_count; + } lwm_light_detect; + ``` + + \(3\) **lwm\_lwlocks** records thread-related lock information, including the number of held locks and lock wait information. The corresponding code is as follows: + + ``` + typedef struct { + lock_entry_id be_tid; /* Thread ID*/ + int be_idx; /* Location of the background thread*/ + LWLockAddr want_lwlock; /* Information about the lock that has been obtained in advance*/ + int lwlocks_num; /* Number of LWLocks held by the thread*/ + lwlock_id_mode* held_lwlocks; /* LWLock array held by the thread*/ + } lwm_lwlocks; + ``` + + - **Deadlock Detection for Regular Locks** + + If no conflict occurs when openGauss obtains a lock, openGauss directly locks the lock. If a conflict occurs, openGauss sets a timer and waits. After the specified period of time, openGauss is called by the timer to detect deadlock. If process T2 is behind process T1 in the waiting queue of a lock, and the lock that process T2 needs to obtain conflicts with the lock that process T1 needs to obtain, there is a soft edge from T2 to T1. If the lock request from process T2 conflicts with the lock held by process T1, a hard edge exists. The overall idea is, by calling functions recursively, to start from the thread that is waiting for a lock currently and move forward along the waiting edge to check whether a loop exists. If a soft edge exists in the loop, the two processes in the loop are waiting for the lock. In this case, sort the lock waiting queue again to try to solve the deadlock conflict. If there is no soft edge, only the transaction waiting for the current lock can be terminated to solve the deadlock loop. As shown in Figure 5-19, the dashed line indicates a soft edge, and the solid line indicates a hard edge. Thread A waits for thread B, thread B waits for thread C, and thread C waits for thread A. Because thread A waits for thread B on a soft edge, the wait relationship is adjusted, as shown in Figure 5-19. In this case, thread A waits for thread C, and thread C waits for thread A. There is no soft edge, and a deadlock is detected. + + ![](../figures/1715.png) + + Figure 5-19 Deadlock detection for regular locks + + The main functions are as follows: + + \(1\) DeadLockCheck: Detects deadlocks. + + \(2\) DeadLockCheckRecurse: Returns **true** if a deadlock occurs, or returns **false** and resolves the deadlock conflict if a soft edge exists. + + \(3\) check\_stack\_depth: openGauss checks the deadlock recursive detection stack. \(If the deadlock detection recursive stack is too long, all LWLock partitions are held for a long time during deadlock detection, blocking services.\) + + \(4\) CheckDeadLockRunningTooLong: openGauss checks the deadlock detection time to prevent the time from being too long. If deadlock detection lasts for too long, all subsequent services are blocked. The corresponding code is as follows: + + ``` + static void CheckDeadLockRunningTooLong(int depth) + {/* Check every four layers.*/ + if (depth > 0 && ((depth % 4) == 0)) { + TimestampTz now = GetCurrentTimestamp(); + long secs = 0; + int usecs = 0; + + if (now > t_thrd.storage_cxt.deadlock_checker_start_time) { + TimestampDifference(t_thrd.storage_cxt.deadlock_checker_start_time, now, &secs, &usecs); + if (secs > 600) { /* An error is reported if deadlock detection lasts for more than 10 minutes. */ + #ifdef USE_ASSERT_CHECKING + DumpAllLocks();/* All lock information for fault locating in the debug version is exported. */ + #endif + + ereport(defence_errlevel(), (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("Deadlock checker runs too long and is greater than 10 minutes."))); + } + } + } + } + ``` + + \(5\) FindLockCycle: Checks for deadlock loops. + + \(6\) FindLockCycleRecurse: internal recursive function called during deadlock detection. + + The corresponding data structures are as follows: + + \(1\) The core and most critical directed edge data structure in deadlock detection. The corresponding code is as follows: + + ``` + typedef struct EDGE { + PGPROC *waiter; /* Waiting thread*/ + PGPROC *blocker; /* Blocked thread*/ + int pred; /* Workspace for topology sorting*/ + int link; /* Workspace for topology sorting*/ + } EDGE; + ``` + + \(2\) A waiting queue that can be rearranged. The corresponding code is as follows: + + ``` + typedef struct WAIT_ORDER { + LOCK *lock; /* the lock whose wait queue is described */ + PGPROC **procs; /* array of PGPROC *'s in new wait order */ + int nProcs; + } WAIT_ORDER; + ``` + + \(3\) Information printed at the end of the deadlock detection. The corresponding code is as follows: + + ``` + typedef struct DEADLOCK_INFO { + LOCKTAG locktag; /* Unique identifier of the lock object that is waited for*/ + LOCKMODE lockmode; /* Type of the lock object that is waited for*/ + ThreadId pid; /* ID of the blocked thread*/ + } DEADLOCK_INFO; + ``` + + +- **Lockless Atomic Operation** + + openGauss encapsulates atomic operations of 32, 64, and 128 bits, which are used to replace spinlocks and implement atomic update operations of simple variables. + + \(1\) gs\_atomic\_add\_32: Performs a 32-bit add operation and returns the new value. The corresponding code is as follows: + + ``` + static inline int32 gs_atomic_add_32(volatile int32* ptr, int32 inc) + { + return __sync_fetch_and_add(ptr, inc) + inc; + } + ``` + + \(2\) Adds gs\_atomic\_add\_64: Performs a 64-bit add operation and returns the new value. The corresponding code is as follows: + + ``` + static inline int64 gs_atomic_add_64(int64* ptr, int64 inc) + { + return __sync_fetch_and_add(ptr, inc) + inc; + } + ``` + + \(3\) gs\_compare\_and\_swap\_32: 32-bit CAS operation. If the value of **dest** is not updated before, **newval** is written to **dest**. If the value of **dest** is not updated, **true** is returned. Otherwise, **false** is returned. The corresponding code is as follows: + + static inline bool gs\_compare\_and\_swap\_32\(int32\* dest, int32 oldval, int32 newval\) + + ``` + { + if (oldval == newval) + return true; + + volatile bool res = __sync_bool_compare_and_swap(dest, oldval, newval); + + return res; + } + ``` + + \(4\) gs\_compare\_and\_swap\_64: 64-bit CAS operation. If the value of **dest** is not updated before, **newval** is written to **dest**. If the value of **dest** is not updated, **true** is returned. Otherwise, **false** is returned. The corresponding code is as follows: + + ``` + static inline bool gs_compare_and_swap_64(int64* dest, int64 oldval, int64 newval) + { + if (oldval == newval) + return true; + + return __sync_bool_compare_and_swap(dest, oldval, newval); + } + ``` + + \(5\) arm\_compare\_and\_swap\_u128: openGauss provides cross-platform 128-bit CAS operations. On an ARM platform, a separate instruction set is used to assemble 128-bit atomic operations to improve the lock concurrency performance of the kernel. For details, see the next section. The corresponding code is as follows: + + ``` + static inline uint128_u arm_compare_and_swap_u128(volatile uint128_u* ptr, uint128_u oldval, uint128_u newval) + { + #ifdef __ARM_LSE + return __lse_compare_and_swap_u128(ptr, oldval, newval); + #else + return __excl_compare_and_swap_u128(ptr, oldval, newval); + #endif + } + #endif + ``` + + \(6\) atomic\_compare\_and\_swap\_u128: 128-bit CAS operation. If the value of **dest** is not updated by other threads before update, **newval** is written to **dest**. If the value of **dest** is not updated, a new value is returned. Otherwise, the value updated by other threads is returned. Note that the upper-layer caller must ensure that the input parameters are 128-bit aligned. The corresponding code is as follows: + + ``` + static inline uint128_u atomic_compare_and_swap_u128( + volatile uint128_u* ptr, + uint128_u oldval = uint128_u{0}, + uint128_u newval = uint128_u{0}) + { + #ifdef __aarch64__ + return arm_compare_and_swap_u128(ptr, oldval, newval); + #else + uint128_u ret; + ret.u128 = __sync_val_compare_and_swap(&ptr->u128, oldval.u128, newval.u128); + return ret; + #endif + } + ``` + +- **Performance Optimization Based on Kunpeng Servers** + + This section describes how to optimize the lock-related functions and structures of openGauss based on hardware structures. + + - **WAL Group Insert Optimization** + + The redo log cache system of the database refers to the write cache for database redo log persistency. Database redo logs are written to the log cache before being written to disks for persistency. The write efficiency of the log cache is the main factor that determines the overall throughput of the database. To ensure that logs are written in sequence, lock contention occurs when threads write logs. As such, lock contention becomes the main performance bottleneck. Based on the CPU characteristics of ARM-based Kunpeng servers, openGauss inserts logs in groups to reduce lock contention and improve the efficiency of inserting WALs, thereby improving the throughput performance of the entire database. Figure 5-20 shows the process of inserting logs in groups. + + ![](../figures/1716.png) + + Figure 5-20 Inserting logs in groups + + \(1\) All threads do not need to contend for a lock. + + \(2\) In the same time window, all threads join a group before contending for a lock. The first thread that joins the group is the leader thread. CAS atomic operations are performed to manage queues. + + \(3\) The leader thread contends for the lock on behalf of the entire group. Other follower threads in the group start to sleep and wait for the leader thread to wake them up. + + \(4\) After obtaining the lock, the leader thread traverses the logs to be inserted by all threads in the group to obtain the total space required. The leader thread reserves space only once. + + \(5\) The leader thread writes the logs to be written by all threads in the group to the log buffer. + + \(6\) The lock is released and all follower threads are awakened. + + \(7\) The follower threads do not need to contend for the lock because the logs to be written have been written by the leader thread. They directly enter the subsequent process. + + The key function code is as follows: + + ``` + static XLogRecPtr XLogInsertRecordGroup(XLogRecData* rdata, XLogRecPtr fpw_lsn) + { + …/* Initialize variables and perform simple verification.*/ + START_CRIT_SECTION(); /* Start the critical section.*/ + + proc->xlogGroupMember = true; + … + proc->xlogGroupDoPageWrites = &t_thrd.xlog_cxt.doPageWrites; + + nextidx = pg_atomic_read_u32(&t_thrd.shemem_ptr_cxt.LocalGroupWALInsertLocks[groupnum].l.xlogGroupFirst); + + while (true) { + pg_atomic_write_u32(&proc->xlogGroupNext, nextidx); /* Record the previous member to the PGPROC structure.*/ + /* Prevent ARM disorders to ensure that all previous write operations are visible.*/ + pg_write_barrier(); + + if (pg_atomic_compare_exchange_u32(&t_thrd.shemem_ptr_cxt.LocalGroupWALInsertLocks[groupnum].l.xlogGroupFirst, + &nextidx, + (uint32)proc->pgprocno)) { + break; + } /* Obtain the proc no field of the previous member. If the field is invalid, the member is the leader. */ + } + } /* Non-leader members do not obtain the WAL Insert lock. They only wait until they are awakened by the leader.*/ + if (nextidx != INVALID_PGPROCNO) { + int extraWaits = 0; + + for (;;) { + } /* Function as a read barrier.*/ + PGSemaphoreLock(&proc->sem, false); + } /* Function as a read barrier.*/ + pg_memory_barrier(); + if (!proc->xlogGroupMember) { + break; + } + extraWaits++; + } + + while (extraWaits-- > 0) { + PGSemaphoreUnlock(&proc->sem); + } + END_CRIT_SECTION(); + return proc->xlogGroupReturntRecPtr; + } + /* The leader member holds the lock.*/ + WALInsertLockAcquire(); + } /* Calculate the size of Xlog records of each member thread.*/ + … + /* The leader thread inserts the Xlog records of all member threads into the buffer.*/ + while (nextidx != INVALID_PGPROCNO) { + localProc = g_instance.proc_base_all_procs[nextidx]; + + if (unlikely(localProc->xlogGroupIsFPW)) { + nextidx = pg_atomic_read_u32(&localProc->xlogGroupNext); + localProc->xlogGroupIsFPW = false; + continue; + } + XLogInsertRecordNolock(localProc->xlogGrouprdata, + localProc, + XLogBytePosToRecPtr(StartBytePos), + XLogBytePosToEndRecPtr( + StartBytePos + MAXALIGN(((XLogRecord*)(localProc->xlogGrouprdata->data))->xl_tot_len)), + XLogBytePosToRecPtr(PrevBytePos)); + PrevBytePos = StartBytePos; + StartBytePos += MAXALIGN(((XLogRecord*)(localProc->xlogGrouprdata->data))->xl_tot_len); + nextidx = pg_atomic_read_u32(&localProc->xlogGroupNext); + } + + WALInsertLockRelease(); /* Complete the work, release the lock, and wake up all member threads.*/ + while (wakeidx != INVALID_PGPROCNO) { + PGPROC* proc = g_instance.proc_base_all_procs[wakeidx]; + + wakeidx = pg_atomic_read_u32(&proc->xlogGroupNext); + pg_atomic_write_u32(&proc->xlogGroupNext, INVALID_PGPROCNO); + proc->xlogGroupMember = false; + pg_memory_barrier(); + + if (proc != t_thrd.proc) { + PGSemaphoreUnlock(&proc->sem); + } + } + + END_CRIT_SECTION(); + return proc->xlogGroupReturntRecPtr; + } + ``` + + - **False Sharing Elimination by Using Cache Alignment** + + When accessing the main memory, the CPU obtains the data of the entire cache line at a time. The typical value for x86 is 64 bytes. Both the L1 and L2 caches of the ARM 1620 chip occupy 64 bytes, and the L3 cache occupies 128 bytes. This method of obtaining data can greatly improve data access efficiency. However, if data at different locations in a same cache line is frequently read and written by different threads, a same cache line of another CPU becomes invalid during writing. Therefore, the CPU's efforts to obtain data in the main memory based on the cache line are not only wasted, but also become a performance burden. False sharing refers to a behavior with low performance in which different CPUs simultaneously access different locations in a same cache line. + + Take LWLocks as an example. The code is as follows: + + ``` + #ifdef __aarch64__ + #define LWLOCK_PADDED_SIZE PG_CACHE_LINE_SIZE(128) + #else + #define LWLOCK_PADDED_SIZE (sizeof(LWLock) <= 32 ? 32 : 64) + #endif + typedef union LWLockPadded + { + LWLocklock; + charpad[LWLOCK_PADDED_SIZE]; + } LWLockPadded; + ``` + + In the current lock logic, access to LWLocks is still one of the mostly discussed topics. If the value of **LWLOCK\_PADDED\_SIZE** has 32 bytes and LWLocks are stored in a continuous array, a 64-byte cache line can contain two LWLockPadded structures, and a 128-byte cache line can contain four LWLockPadded structures at the same time. When the system contends fiercely for LWLocks, the corresponding cache line is continuously obtained and becomes invalid, wasting a large number of CPU resources. Therefore, when the ARM machine is optimized, **padding\_size** is set to **128** to eliminate false sharing and improve the overall performance of LWLocks. + + - **Lock-free Critical Section Protection by Using 128-Bit CAS Operations of WAL Insert Locks** + + Currently, WAL of the database or file system needs to insert the log information generated in the memory to the log buffer. To implement high-speed log caching, the log management system concurrently inserts logs by reserving global locations. Generally, two 64-bit global data location indexes are used to indicate the start and end positions of store insertion. A maximum of 16-EB data indexes is supported. To protect the global location indexes, WAL introduces a high-performance atomic lock to protect each log buffer. In the NUMA architecture, especially in the ARM architecture, concurrent WAL cache protection becomes a bottleneck due to atomic lock backoff, high cross-CPU access latency, and cache consistency performance differences. + + A main idea involved in optimization is to replace an atomic lock with the information about the two 64-bit global data locations through 128-bit atomic operations, eliminating costs of cross-CPU access, backoff, and cache consistency of the atomic lock. For details, see Figure 5-21. + + ![](../figures/zh-cn_image_0000001208315958.gif) + + Figure 5-21 Lock-free critical section protection by using 128-bit CAS operations + + The global location information includes a 64-bit start address and a 64-bit end address. These two addresses are combined into 128-bit information, and the lock-free location information is reserved through CAS atomic operations. The ARM platform does not support the 128-bit atomic operation library. openGauss loads two pieces of 64-bit ARM data by executing the **exclusive** command. The 64-bit ARM assembly instruction is LDXP/STXP. + + The key data structure and the code of the **ReserveXLogInsertLocation** function are as follows: + + ``` + typedef union { + uint128 u128; + uint64 u64[2]; + uint32 u32[4]; + } uint128_u; /* To ensure readability and operability of the code, 128-bit unsigned integers are designed as a union structure, and 64-bit values are assigned to the memory location. */ + static void ReserveXLogInsertLocation(uint32 size, XLogRecPtr* StartPos, XLogRecPtr* EndPos, XLogRecPtr* PrevPtr) + { + volatile XLogCtlInsert* Insert = &t_thrd.shemem_ptr_cxt.XLogCtl->Insert; + uint64 startbytepos; + uint64 endbytepos; + uint64 prevbytepos; + + size = MAXALIGN(size); + + #if defined(__x86_64__) || defined(__aarch64__) + uint128_u compare; + uint128_u exchange; + uint128_u current; + + compare = atomic_compare_and_swap_u128((uint128_u*)&Insert->CurrBytePos); + + loop1: + startbytepos = compare.u64[0]; + endbytepos = startbytepos + size; + + exchange.u64[0] = endbytepos; /* To ensure readability of the code, 128-bit unsigned integers are designed as a union structure. The start and end positions are written to exchange. */ + exchange.u64[1] = startbytepos; + + current = atomic_compare_and_swap_u128((uint128_u*)&Insert->CurrBytePos, compare, exchange); + if (!UINT128_IS_EQUAL(compare, current)) { /* If update is performed concurrently by other threads, it will be performed cyclically.*/ + UINT128_COPY(compare, current); + goto loop1; + } + prevbytepos = compare.u64[1]; + + #else + SpinLockAcquire(&Insert->insertpos_lck); /* Other platforms use atomic spinlocks to protect variable updates.*/ + startbytepos = Insert->CurrBytePos; + prevbytepos = Insert->PrevBytePos; + endbytepos = startbytepos + size; + Insert->CurrBytePos = endbytepos; + Insert->PrevBytePos = startbytepos; + + SpinLockRelease(&Insert->insertpos_lck); + #endif /* __x86_64__|| __aarch64__ */ + *StartPos = XLogBytePosToRecPtr(startbytepos); + *EndPos = XLogBytePosToEndRecPtr(endbytepos); + *PrevPtr = XLogBytePosToRecPtr(prevbytepos); + } + ``` + + - **Clog Partition Optimization** + + For details about Clogs, see section 5.2.2 XID Allocation, Clogs, and CSNlogs. Each transaction has four states: **IN\_PROGRESS**, **COMMITED**, **ABORTED**, and **SUB\_COMMITED**. Each log occupies 2 bits. Clogs need to be stored on disks. One page \(occupying 8 KB\) can contain 215 logs, and each log file \(segment = 256 x 8 KB\) can contain 226 logs. Currently, access to Clogs is implemented through a buffer pool. A unified SLRU buffer pool is used in the code. + + ![](../figures/1717.png) + + Figure 5-22 Clog buffer pool before optimization + + ![](../figures/1718.png) + + Figure 5-23 Clog buffer pool after optimization + + As shown in Figure 5-22, the buffer pool of Clogs is globally unique in the shared memory in the name of **CLOG Ctl**, which is shared by worker threads. In a high-concurrency scenario, resource contention becomes a performance bottleneck. Figure 5-23 shows the Clog buffer pool after partition optimization. A modulo operation \(obtaining the remainder after dividing two numbers\) is performed based on the page number to evenly distribute logs to the buffer pools of multiple shared memory, and the logs are recorded in the thread local object array ClogCtlData. The buffer pools are named **CLOG Ctl** _i_ . Buffer pool objects and corresponding global locks are added to the shared memory synchronously. The overall throughput is improved in a scattered manner. + + To optimize Clog partitions, the operations related to the original buffer pool in the source code need to be changed to the operations on the buffer pool of the corresponding partition. The corresponding partition can be easily located based on the XID and page number, and the corresponding control lock is changed from one lock to multiple locks. The involved structure code is as follows. Table 5-8 lists the involved functions. + + ``` + /* Clog partition*/ + #define NUM_CLOG_PARTITIONS 256 /* Number of partitions*/ + /* Clog lightweight partition lock*/ + #define CBufHashPartition(hashcode) \ + ((hashcode) % NUM_CLOG_PARTITIONS) + #define CBufMappingPartitionLock(hashcode) \ + (&t_thrd.shemem_ptr_cxt.mainLWLockArray[FirstCBufMappingLock + CBufHashPartition(hashcode)].lock) + #define CBufMappingPartitionLockByIndex(i) \ + (&t_thrd.shemem_ptr_cxt.mainLWLockArray[FirstCBufMappingLock + i].lock) + ``` + + **表 8** Functions for Clog partition optimization + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Function

+

Description

+

CLOGShmemInit

+

Calls SimpleLruInit to initialize the Clog buffer in the shared memory.

+

ZeroCLOGPage

+

Initializes the value on the Clog log page to 0.

+

BootStrapCLOG

+

Creates an initial available Clog page in the buffer, calls ZeroCLOGPage to initialize the value on the page to 0, writes the Clog page to the disk, and returns to the page when a database is created.

+

CLogSetTreeStatus

+

Sets the final state of transaction commit.

+

CLogGetStatus

+

Queries the transaction state.

+

ShutdownCLOG

+

Closes the buffer and refreshes the data to the disk.

+

ExtendCLOG

+

Creates a Clog page for a newly allocated transaction.

+

TruncateCLOG

+

Deletes logs that expire due to the creation of log checkpoints to save space.

+

WriteZeroPageXlogRec

+

Writes the CLOG_ZEROPAGE XLOG log for future use when an Xlog page is created.

+

clog_redo

+

Performs redo operations related to Clogs, including CLOG_ZEROPAGE and CLOG_TRUNCATE.

+
+ + - **NUMA-aware Data and Thread Access Distribution** + + Remote NUMA: Memory access involves two physical locations: access thread and accessed memory. Memory access is performed locally only when the locations are on the same NUMA node. Otherwise, cross-node remote access is involved, and in this case, the performance overhead is high. + + The numactl open-source software provides the libnuma library that allows applications to easily bind threads to a specific NUMA node or CPU list and allocate memory to a specified NUMA node. The following describes the APIs that may be involved in the openGauss code. + + \(1\) int numa\_run\_on\_node\(int node\): Runs the current task and its subtasks on a specified node. The function corresponding to this API are as follows: + + ``` + numa_run_on_node: Runs the current task and its subtasks on a specific node. These tasks are not migrated to the CPUs of other nodes until the node association is reset by using the numa_run_on_node_mask function. –1 is passed to let the kernel schedule the tasks again on all nodes. The value 0 is returned when the operation is successful, and the value –1 is returned when the operation fails. The error code is recorded in errno. + ``` + + \(2\) void numa\_set\_localalloc\(void\): Sets the memory allocation policy of the caller thread to local allocation. That is, memory is preferentially allocated from the current node. The function corresponding to this API are as follows: + + ``` + numa_set_localalloc: Sets the memory allocation policy of the call task to local allocation. In this mode, the preferred node for memory allocation is the node where the task is being executed during memory allocation. + ``` + + \(3\) void numa\_alloc\_onnode\(void\): Allocates memory to a specified NUMA node. The function corresponding to this API are as follows: + + ``` + numa_alloc_onnode: Allocates memory to a specific node. The allocated size is a multiple of the system page size and is rounded up. If a specified node rejects the process externally, the call fails. Compared with the Malloc(3) function series, this function works slowly. The numa_free function must be used to release the memory. When an error occurs, NULL is returned. + ``` + + The internal data structure of openGauss is optimized based on the NUMA architecture. + + 1\)Global PGPROC array optimization + + ![](../figures/1719.png) + + Figure 5-24 Global PGPROC array optimization + + As shown in Figure 5-24, the system allocates a dedicated PGPROC structure for each client connection to maintain related information. ProcGlobal-\>allProcs is a global array with the PGPROC structure. However, the NUMA node where the physical memory is located is uncertain. As a result, when each transaction thread accesses its PGPROC structure, the thread may be scheduled among multiple NUMA nodes by the operating system. In addition, the physical memory location of the corresponding PGPROC structure is also uncertain, and there is a high probability that memory is accessed remotely. + + Because the PGPROC structure is frequently accessed, the global structure array is divided into multiple subarrays based on the number of NUMA nodes, and each subarray uses **numa\_alloc\_onnode** to allocate memory to NUMA nodes. To minimize structural changes to the current code, the structure of ProcGlobal-\>allProcs is changed from PGPROC\* to PGPROC\*\*. All access to ProcGlobal-\>allProcs needs to be adjusted accordingly \(an additional layer of indirect pointer reference is added\). The related code is as follows: + + ``` + #ifdef __USE_NUMA + if (nNumaNodes > 1) { + ereport(INFO, (errmsg("InitProcGlobal nNumaNodes: %d, inheritThreadPool: %d, groupNum: %d", + nNumaNodes, g_instance.numa_cxt.inheritThreadPool, + (g_threadPoolControler ? g_threadPoolControler->GetGroupNum() : 0)))); + + int groupProcCount = (TotalProcs + nNumaNodes - 1) / nNumaNodes; + size_t allocSize = groupProcCount * sizeof(PGPROC); + for (int nodeNo = 0; nodeNo < nNumaNodes; nodeNo++) { + initProcs[nodeNo] = (PGPROC *)numa_alloc_onnode(allocSize, nodeNo); + if (!initProcs[nodeNo]) { + ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("InitProcGlobal NUMA memory allocation in node %d failed.", nodeNo))); + } + add_numa_alloc_info(initProcs[nodeNo], allocSize); + int ret = memset_s(initProcs[nodeNo], groupProcCount * sizeof(PGPROC), 0, groupProcCount * sizeof(PGPROC)); + securec_check_c(ret, "\0", "\0"); + } + } else { + #endif + ``` + + 2\) Global WALInsertLock array optimization + + WALInsertLocks are used to perform concurrency protection on WAL Insert operations. You can configure multiple WALInsertLocks, for example, 16. Before optimization, all WALInsertLocks are in the same global array and are allocated by using the shared memory. When a transaction thread is running, one of the WALInsertLocks in the entire global array is allocated for use. Therefore, there is a high probability that remote memory access is involved. That is, there is cross-node and cross-package contention among multiple threads. WALInsertLocks can also allocate memory separately by NUMA node, and each transaction thread uses only the WALInsertLock in the local node group. In this way, data contention can be limited to the same NUMA node. Figure 5-25 shows the basic principles. + + ![](../figures/1720.png) + + Figure 5-25 Global WALInsertLock array optimization principles + + For example, if 16 WALInsertLocks and four NUMA nodes are configured, the original array with 16 elements will be split into four arrays, and each array has four elements. The global structure is WALInsertLockPadded \*\*GlobalWALInsertLocks. The local WALInsertLocks of the thread point to WALInsertLock\[4\] on the current node. Different NUMA nodes have WALInsertLock subarrays with different addresses. GlobalWALInsertLocks are used to trace WALInsertLock arrays under multiple nodes to facilitate traversal. Figure 5-26 shows the WALInsertLock grouping diagram. + + ![](../figures/zh-cn_image_0000001208124506.png) + + Figure 5-26 WALInsertLock grouping diagram + + The code for initializing the WALInsertLock structure is as follows: + + ``` + WALInsertLockPadded** insertLockGroupPtr = + (WALInsertLockPadded**)CACHELINEALIGN(palloc0(nNumaNodes * sizeof(WALInsertLockPadded*) + PG_CACHE_LINE_SIZE)); + #ifdef __USE_NUMA + if (nNumaNodes > 1) { + size_t allocSize = sizeof(WALInsertLockPadded) * g_instance.xlog_cxt.num_locks_in_group + PG_CACHE_LINE_SIZE; + for (int i = 0; i < nNumaNodes; i++) { + char* pInsertLock = (char*)numa_alloc_onnode(allocSize, i); + if (pInsertLock == NULL) { + ereport(PANIC, (errmsg("XLOGShmemInit could not alloc memory on node %d", i))); + } + add_numa_alloc_info(pInsertLock, allocSize); + insertLockGroupPtr[i] = (WALInsertLockPadded*)(CACHELINEALIGN(pInsertLock)); + } + } else { + #endif + char* pInsertLock = (char*)CACHELINEALIGN(palloc( + sizeof(WALInsertLockPadded) * g_instance.attr.attr_storage.num_xloginsert_locks + PG_CACHE_LINE_SIZE)); + insertLockGroupPtr[0] = (WALInsertLockPadded*)(CACHELINEALIGN(pInsertLock)); + #ifdef __USE_NUMA + } + #endif + ``` + + On an ARM platform, the two-dimensional array GlobalWALInsertLocks needs to be traversed to access WALInsertLocks. Specifically, NUMA nodes at the first layer and the WALInsertLock array on the nodes at the second layer are traversed. + + The LWLock memory structure referenced by WALInsertLocks is also optimized and adapted on the ARM platform. The code is as follows: + + ``` + typedef struct + { + LWLock lock; + #ifdef __aarch64__ + pg_atomic_uint32xlogGroupFirst; + #endif + XLogRecPtrinsertingAt; + } WALInsertLock; + ``` + + The lock member variable references an element in the global LWLock array in the shared memory. After WALInsertLock optimization, although the WALInsertLocks have been distributed by NUMA node, the LWLocks referenced by the WALInsertLocks cannot control their physical memory locations. Therefore, fierce cross-node contention is still involved when the WALInsertLocks are accessed. Therefore, the LWLocks are directly embedded into the WALInsertLocks. In this way, the LWLocks in use can be distributed to NUMA nodes, and access to cache lines is reduced. + + + +## 5.4 Summary + +This chapter describes the transaction system and concurrency control mechanism of openGauss. + +As an important role of the database, the transaction system connects the SQL, execution, and storage modules. After receiving an external command, the transaction system determines the execution direction based on the current internal system state. This ensures the continuity and accuracy of transaction processing. + +In addition to the basic and core transaction system of openGauss, this chapter also describes how openGauss optimizes its performance based on Kunpeng servers. + +In a word, the transaction system and concurrency control module of openGauss provides extreme speed and stability. + diff --git a/content/en/post/2022/Using-DataChecker-to-Ensure-Data-Accuracy-After-Migration.md b/content/en/post/2022/Using-DataChecker-to-Ensure-Data-Accuracy-After-Migration.md new file mode 100644 index 0000000000000000000000000000000000000000..eab3ce1879c996196dbeb23b680b8fb00db863d0 --- /dev/null +++ b/content/en/post/2022/Using-DataChecker-to-Ensure-Data-Accuracy-After-Migration.md @@ -0,0 +1,391 @@ ++++ + +title = "Using DataChecker to Ensure Data Accuracy After Migration" + +date = "2021-06-25" + +tags = [ "Using DataChecker to Ensure Data Accuracy After Migration"] + +archives = "2021-06" + +author = "Wenhao Zhao" + +summary = "Using DataChecker to Ensure Data Accuracy After Migration" + +img = "/en/post/2022/title/img16.png" + +times = "12:30" + ++++ + +# Using DataChecker to Ensure Data Accuracy After Migration + +We have introduced several tools for migrating data from Oracle or MySQL to openGauss. Now, we can use the DataChecker tool to ensure data accuracy after migration. + +## 1 Introduction to DataChecker + +DataChecker is a tool written in Java for checking data consistency between two databases. Some of its architecture and implementation are based on Alibaba's open-source data migration tool yugong. + +Code repository: https://gitee.com/opengauss/openGauss-tools-datachecker + +- 1.1 Application Scenario + + Generally, DataChecker is used to verify data accuracy after the data is migrated. After migrating a large amount of data from one database to another, you need to check whether the migrated data is accurate and complete. In this case, you can use DataChecker to check whether the data in the two databases is consistent. + + +- 1.2 Implementation Principles + + The architecture of DataChecker consists of two parts: Extractor and Applier. + + ![](../figures/zh-cn_image_0000001251852313.png) + + Extractor is used to extract data from the source database. Data is extracted in batches based on the sequence of the data in the source table. + + Applier is used to locate the data extracted by Extractor in the target database, compare the columns one by one, and return the result. + + +## 2 Usage Guide + +- 2.1 Environment Requirements + + **Operating System** + + DataChecker is developed based on Java with bat and shell scripts. It supports both Windows and Linux. + + JDK 1.6.25 or later is recommended. + + **Database** + + The source database supports MySQL and will support Oracle in the future. + + The target database supports only openGauss. + +- 2.2 Downloading DataChecker + + You can download the source code and compiled package at https://gitee.com/opengauss/openGauss-tools-datachecker. + + Self-compilation: + + ``` + git clone git@gitee.com:opengauss/openGauss-tools-datachecker.git + cd openGauss-tools-datachecker + mvn clean install -Dmaven.test.skip -Denv=release + ``` + + If you do not want to compile the binary package by yourself, you can obtain the complied binary package **DataChecker-1.0.0-SNAPSHOT.tar.gz** in the **target** folder in the cloned home directory. + +- 2.3 Directory Structure + + The structure of the **target** directory is as follows: + + + + + +

/target

+

bin/

+

startup.bat

+

startup.sh

+

stop.sh

+

conf/

+

gauss.properties

+

logback.xml

+

lib/

+

logs/

+
+ + The **bin** directory contains three files, namely, **startup.bat**, **startup.sh**, and **stop.sh**, for starting and stopping programs in Windows and Linux. + + The **conf** directory contains two configuration files. Generally, only **gauss.properties** is configured. + + The **lib** directory stores the dependency files required for running. + + The **logs** directory stores the result logs after running. + +- 2.4 Configuration Modification + + Modify the configuration in the **/conf/ gauss.properties** file. Generally, you only need to modify basic information, such as the addresses of the source and target databases and the tables to be verified. For other information, you can use the default values or modify it as required. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Parameter

+

Description

+

Default Value

+

gauss.database.source.username

+

Specifies the username for accessing the source database.

+

N/A

+

gauss.database.source.password

+

Specifies the password for accessing the source database.

+

N/A

+

gauss.database.source.type

+

Specifies the type of the source database.

+

Mysql

+

gauss.database.source.url

+

Specifies the URL for connecting to the source database. The URL must comply with certain format requirements.

+

N/A

+

gauss.database.source.encode

+

Specifies the encoding format of the source database.

+

UTF-8

+

gauss.database.target.username

+

Specifies the username for accessing the target database.

+

N/A

+

gauss.database.target.password

+

Specifies the password for accessing the target database.

+

N/A

+

gauss.database.target.type

+

Specifies the type of the target database.

+

OPGS (openGauss)

+

gauss.database.target.url

+

Specifies the URL for connecting to the target database. The URL must comply with certain format requirements.

+

N/A

+

gauss.database.target.encode

+

Specifies the encoding format of the target database.

+

UTF-8

+

gauss.table.onceCrawNum

+

Specifies the maximum number of records processed by Extractor or Applier in each batch.

+

1000

+

gauss.table.tpsLimit

+

Specifies the limit on transactions per second (TPS). The value 0 indicates that TPS is not limited.

+

0

+

gauss.table.skipApplierException

+

The value true indicates that a single abnormal data record can be ignored when a database exception occurs in Applier, for example, constraint key conflict.

+

false

+

gauss.table.white

+

Specifies the whitelist that lists the tables to be verified.

+

The format of the value is schema.tablename. Multiple table names can be separated by commas (,). To verify all tables in a schema, you only need to enter the schema name.

+

N/A

+

gauss.table.black

+

Specifies the blacklist that lists the tables to be ignored. The format of the value is the same as that specified by gauss.table.white.

+

N/A

+

gauss.table.inc.tablepks

+

Specifies the primary key of the tables to be verified, which is used to speed up verification. The format of the value is tablename1&pk1&pk2|tablename2&pk1.

+

N/A

+

gauss.table.concurrent.enable

+

Specifies whether to enable parallel processing for multiple tables. If the value is false, serial processing is required.

+

true

+

gauss.table.concurrent.size

+

Specifies the number of tables that can be concurrently processed.

+

5

+

gauss.table.retry.times

+

Specifies the number of retry times after an error occurs in table verification.

+

3

+

gauss.extractor.dump

+

Specifies whether to record all data extracted by Extractor.

+

false

+

gauss.extractor.concurrent.global

+

Extractor adopts the global thread pool mode. If the value is true, all Extractor tasks use a group of thread pools. The thread pool size is specified by concurrent.size.

+

false

+

gauss.extractor.concurrent.size

+

Specifies the number of threads that can be concurrently processed. This parameter takes effect only after concurrent.enable is enabled.

+

30

+

gauss.applier.dump

+

Specifies whether to record all data extracted by Applier.

+

false

+

gauss.applier.concurrent.enable

+

Specifies whether parallel processing is enabled for Applier.

+

true

+

gauss.applier.concurrent.global

+

Applier adopts the global thread pool mode. If the value is true, all Applier tasks use a group of thread pools. The thread pool size is specified by concurrent.size.

+

false

+

gauss.applier.concurrent.size

+

Specifies the number of threads that can be concurrently processed. This parameter takes effect only after concurrent.enable is enabled.

+

30

+

gauss.stat.print.interval

+

Specifies the frequency of printing statistical information.

+

If the value is 5, statistical information is printed once after five rounds of Extractor and Applier operations are complete.

+

5

+
+ +- 2.5 Starting and Stopping the Tool + + **Starting the Tool in Linux** + + sh startup.sh + + **Stopping the Tool in Linux** + + sh stop.sh + + **Starting the Tool in Windows** + + startup.bat + + **Stopping the Tool in Windows** + + You can directly close the terminal. + +- 2.6 Log Description + + The log structure is as follows: + + + + + +

/logs

+

summary/

+

summary.log

+

gauss/

+

table.log

+

${table}/

+

table.log

+

extractor.log

+

applier.log

+

check.log

+
+ + The **table.log** file in the **gauss** directory records all logs in the entire verification process. + + The **summary.log** file in the **summary** directory records the names of all tables whose verification results are incorrect. That is, the data in the two tables is inconsistent. + + _$\{table\}_ indicates the name of each table. In the **$\{table\}** directory, the **table.log** file records all logs generated during verification of a table, the **extractor.log** file records all logs generated during data extraction, and the **applier.log** file records all logs generated during verification implementation \(data comparison\). The **check.log** file records the data that fails to be verified in a specific line. If the **check.log** file does not exist, the verification result is correct. + +- 2.7 Example + + **Preparing the Database** + + Create a table in the **mysql** schema in MySQL, as shown in the following figure. + + ![](../figures/zh-cn_image_0000001252252279.png) + + Assume that after data is migrated to openGauss, only four of the five data records are successfully migrated, as shown in the following figure. + + ![](../figures/zh-cn_image_0000001206972348.png) + + **Configuring gauss.properties** + + ![](../figures/zh-cn_image_0000001206812360.png) + + **Running startup.bat or startup.sh** + + ![](../figures/zh-cn_image_0000001207132328.png) + + **Viewing Logs** + + Check the **/logs/summary/summary.log** file and locate the **mysql.test** table where the error occurs. + + Access **/logs/mysql.test/** to view details. + + ![](../figures/zh-cn_image_0000001206972352.png) + + There are four log files. You can mainly view **check.log**. + + ![](../figures/zh-cn_image_0000001252252281.png) + + The record whose ID is 5 and whose name is 5 fails to be migrated. + + diff --git a/content/en/post/2022/Using-Ora2Pg-to-Migrate-Data-from-Oracle-to-openGauss.md b/content/en/post/2022/Using-Ora2Pg-to-Migrate-Data-from-Oracle-to-openGauss.md new file mode 100644 index 0000000000000000000000000000000000000000..ea0f91887c51e3f01f8c0c5c36456795ea389cee --- /dev/null +++ b/content/en/post/2022/Using-Ora2Pg-to-Migrate-Data-from-Oracle-to-openGauss.md @@ -0,0 +1,273 @@ ++++ + +title = "Using Ora2Pg to Migrate Data from Oracle to openGauss" + +date = "2021-06-09" + +tags = [ "Using Ora2Pg to Migrate Data from Oracle to openGauss"] + +archives = "2021-06" + +author = "Xiaobin Chen" + +summary = "Using Ora2Pg to Migrate Data from Oracle to openGauss" + +img = "/en/post/2022/title/img14.png" + +times = "12:30" + ++++ + +# Using Ora2Pg to Migrate Data from Oracle to openGauss + +## **Introduction to Ora2Pg** + +Ora2Pg is an open-source tool for migrating data from Oracle to PostgreSQL. By connecting to Oracle, Ora2Pg automatically scans and extracts object structures and data, generates SQL scripts, and applies the scripts to PostgreSQL manually or automatically. + +Official website: https://ora2pg.darold.net/ + +## **Advantages of Ora2Pg** + +- Supports exporting most types of database objects, including tables, views, sequences, indexes, foreign keys, constraints, functions, stored procedures, and others. +- Automatically converts the PL/SQL syntax to the PL/pgSQL syntax, avoiding manual modification to some extent. +- Generates migration reports, containing migration difficulty evaluation and person-day estimation. +- Compresses exported data to reduce disk overhead as required. +- Provides various configuration items, allowing you to customize migration operations. + +## **Application of Ora2Pg in openGauss** + +The main language of Ora2Pg is Perl. It uses the Perl DBI module and connects to the target PostgreSQL database by using DBD: Pg. openGauss is compatible with PostgreSQL communication protocols and most syntaxes. As such, you only need to modify some names. Ora2Pg can also be used in openGauss. + +## **Usage Example of Ora2Pg** + +Ora2Pg exports DDL statements of object structures to SQL files. Table data can be exported to files by running the INSERT or COPY statement or be directly imported to the target database without generating intermediate SQL files. + +Ora2Pg provides the **ora2pg** command to complete these processes. Generally, multiple commands need to be executed for one migration. Ora2Pg provides a more convenient method to obtain the export and import scripts and the migration directory template by creating a migration project. The scripts integrate several **ora2pg** commands and the **psql** command \(**gsql** for openGauss\) required for importing SQL files. You only need to run the two scripts to complete the migration. The demonstration in this document uses the scripts. + +- 1. Installing Dependencies + + The language of Ora2Pg is Perl. Therefore, you need to install the required Perl module. + + ``` + # Perform the following operations as the root user: + yum install -y perl-ExtUtils-CBuilder perl-ExtUtils-MakeMaker + yum install perl-CPAN + ``` + + Install DBI, DBD:Pg, DBD:Oracle on which Ora2Pg depends to connect to the database. + + ``` + perl -MCPAN -e 'install DBI' + perl -MCPAN -e 'install DBD:Pg' + ``` + + Install Oracle Instant Client or Oracle on the local host before installing DBD:Oracle. + + ``` + # Download Oracle Instant Client from the Oracle official website and install it. + rpm -ivh oracle-instantclient12.2-basic-12.2.0.1.0-1.x86_64.rpm + rpm -ivh oracle-instantclient12.2-devel-12.2.0.1.0-1.x86_64.rpm + rpm -ivh oracle-instantclient12.2-jdbc-12.2.0.1.0-1.x86_64.rpm + rpm -ivh oracle-instantclient12.2-sqlplus-12.2.0.1.0-1.x86_64.rpm + # Set the environment variable ORACLE_HOME. + export ORACLE_HOME=/usr/lib/oracle/11.2/client64 + # Set ORACLE_HOME as follows if Oracle has been installed on the local host: + export ORACLE_HOME=/opt/oracle/product/19c/dbhome_1 + export LD_LIBRARY_PATH=$ORACLE_HOME/lib + # Install DBD:Oracle. + perl -MCPAN -e 'install DBD:Oracle' + ``` + + +- 2. Installing Ora2Pg and Creating a Migration Project + + Source code path: https://github.com/darold/ora2pg + + ``` + # Go to the code directory. is the target installation path. + perl Makefile.PL PREFIX= + make && make install + + # Set environment variables and check whether the installation is successful. is the path for downloading the code. + export PERL5LIB=/lib + export PATH=$PATH:/usr/local/bin + ora2pg –help + + # Create a migration project. + ora2pg --init_project oramig + ``` + + After a migration project is created, the **oramig** directory template is generated in the current directory, as shown in the following figure. The directory template contains the **export\_schema.sh** and **import\_all.sh** scripts, which are used for subsequent data export and import. The **schema** and **sources** directories store DDL statements of each object. The **schema** directory stores statements after the PL/SQL syntax is converted into the PL/pgSQL syntax, while the **sources** directory stores PL/SQL statements before conversion. The **data** directory stores table data files, the **config** directory contains the **ora2pg.conf** configuration file, and the **reports** directory stores migration reports. + + ``` + ./oramig/ + schema/ + dblinks/ + directories/ + functions/ + grants/ + mviews/ + packages/ + partitions/ + procedures/ + sequences/ + synonyms/ + tables/ + tablespaces/ + triggers/ + types/ + views/ + sources/ + functions/ + mviews/ + packages/ + partitions/ + procedures/ + triggers/ + types/ + views/ + data/ + config/ + reports/ + ``` + + Now, you can run the **ora2pg** command. The following lists some command-line parameters that can be specified when you run the **ora2pg** command. These parameters can be set in the **ora2pg.conf** file. When you specify a configuration file, the values of the command-line parameters overwrite the corresponding values in the configuration file. + + ``` + Usage: ora2pg [-dhpqv --estimate_cost --dump_as_html] [--option value] + + -a | --allow str: Specifies the list of objects that can be exported. The objects are separated by commas (,). + -b | --basedir dir: Specifies the default export directory for storing exported SQL files. + -c | --conf file: Specifies the path of the configuration file. + -e | --exclude str: Specifies the list of objects that are excluded from the export. The objects are separated by commas (,). + -i | --input file: Specifies the SQL file to be imported. You do not need to connect to Oracle when importing the file. + -o | --out file: Specifies the path for storing the exported SQL file. The default value is the output.sql file in the current directory. + -p | --plsql: Enables the conversion from PL/SQL code to PL/pgSQL code. + -s | --source DSN: Specifies the data source of Oracle DBI. + -t | --type export: Specifies the export type. This parameter will overwrite the export type (specified by TYPE) in the configuration file. + -u | --user name: Specifies the username for connecting to Oracle. You can also use the ORA2PG_USER environment variable. + -w | --password pwd: Specifies the user password for connecting to Oracle. You can also use the ORA2PG_PASSWD environment variable. + --init_project NAME: Initializes a typical Ora2Pg project and generates a directory template. + --view_as_table str: Exports views as tables. Use commas (,) to separate multiple views. + ``` + +- 3. Configuring Ora2Pg + + The **ora2pg.conf** file contains all configuration items, which can be used to customize migration operations. The following describes some common configuration items. + + **ORACLE\_HOME**: Specifies the environment variable _ORACLE\_HOME_. The DBD:Oracle module uses this variable to search for the required Oracle database. The setting method is involved in dependency installation. + + **ORACLE\_DSN**: Specifies the data source name in the standard DBI DSN form. For example: + + ``` + ORACLE_DSN dbi:Oracle:host=oradb_host.myhost.com;sid=DB_SID;port=1521 + ``` + + or + + ``` + ORACLE_DSN dbi:Oracle:DB_SID + ``` + + For the second method, you need to declare the system identifier \(SID\) in the **$ORACLE\_HOME/network/admin/tnsnames.ora** file or the **tnsnames.ora** file in the directory specified by the environment variable _TNS\_ADMIN_. + + **ORACLE\_USER** and **ORACLE\_PWD**: Define the username and password for connecting to Oracle, respectively. Note that if possible, log in as the Oracle super administrator to avoid permission issues and ensure that nothing is missing. + + **PG\_DSN**: Specifies the name of the target database. The following uses openGauss as an example. The database to be connected is **mydb** with the IP address **localhost** and the port number **5432**. + + ``` + PG_DSN dbi:Pg:dbname=mydb;host=localhost;port=5432 + ``` + + **PG\_USER** and **PG\_PWD**: Specify the username and password for connecting to the target database, respectively. Note that the user must have the permission to remotely connect to openGauss as well as read and write permissions on the corresponding database. Specifically, the host where Ora2Pg runs and the user must be in the remote access whitelist of openGauss. + + **SCHEMA**: Specifies the schema to be exported. As shown in the following, the objects associated to the **APPS** schema are extracted. + + ``` + SCHEMA APPS + ``` + + **ORA\_INITIAL\_COMMAND**: Sends an initial command to Oracle after the connection. For example, disable an access control policy before reading an object or set some session parameters. + + **TYPE**: Specifies the type of the objects to be exported, including tables, views, sequences, indexes, foreign keys, constraints, functions, stored procedures, and others. The default value is **TABLE**. As shown in the following, ordinary tables and views are exported. + + ``` + TYPE TABLE VIEW + ``` + + For more details about the configuration items, visit the official website at the following link: + + https://ora2pg.darold.net/documentation.html + + Test the connection. After configuring the DSN of Oracle, run the following command to test the database connection: + + ``` + ora2pg -t SHOW_VERSION -c config/ora2pg.conf + ``` + + The version of Oracle will be displayed by running the preceding command. + +- 4. Running Migration Scripts + + The configuration is as follows. Connect to the target database as the **system** user. + + ``` + ORACLE_HOME /opt/oracle/product/19c/dbhome_1 + ORACLE_DSN dbi:Oracle:host=127.0.0.1;sid=ORCLCDB;port=1521 + ORACLE_USER system + ORACLE_PWD manager + SCHEMA testuser + PG_DSN dbi:Pg:dbname=mydb;host=127.0.0.1;port=5432 + PG_USER testuser + PG_PWD openGauss123 + ``` + + Modify the export type of **export\_schema.sh**. In this migration, tables and functions are exported. + + ![](../figures/zh-cn_image_0000001207289100.jpg) + + Run the export script and wait until the migration is complete. A DDL file of the corresponding type is generated in the **schema** and **source** subdirectories, and the command for exporting table data is provided at the end. + + ``` + sh export_schema.sh + ``` + + ![](../figures/zh-cn_image_0000001252129111.jpg) + + In addition, a migration report in HTML format is generated in the **reports** directory. + + ![](../figures/zh-cn_image_0000001252009063.jpg) + + Before running the import script, perform the following operations: + + 1. Create a database in openGauss, create a user in the database, and set the owner of **mydb** to the user. \(In **import\_all.sh**, **createuser** and **createdb** of PostgreSQL are used to create users and databases.\) + + ![](../figures/zh-cn_image_0000001206809156.jpg) + + 2. Create the environment variable in the **bin** directory of openGauss, so that the client tool gsql can be used. + 3. Change **psql** in **import\_all.sh** to **gsql**. + 4. Add an option for executing the script when data is imported by a common user, and specify the user password to avoid frequent password input. + + Run the import script. In the script, the **testuser** user is used to log in to the **mydb** database with the IP address **127.0.0.1** and the port number **5432**. The **-f** option indicates that the check on whether the user and database need to be created is skipped. + + ``` + sh import_all.sh -d mydb -o testuser –h 127.0.0.1 -p 5432 –f 1 + ``` + + After the script is executed, the system prompts you whether to import the object structures and data, as shown in the following figure. \(In the preceding information, the **-w** option is added by manually modifying the **import\_all.sh** script.\) + + ![](../figures/zh-cn_image_0000001206809160.jpg) + + For table indexes and constraints, you can import them after data is imported. + + Log in to openGauss to view the migration result. + + ![](../figures/zh-cn_image_0000001252249073.jpg) + + + +## **Disadvantages of Ora2Pg** + +1. Ora2Pg uses regular expressions and adopts text replacement to convert the PL/SQL syntax to the PL/pgSQL syntax. However, for design reasons, Ora2Pg supports only conversion of some syntaxes. +2. DDL statements are generated by concatenating character strings. However, this method does not fully support some syntaxes, such as the syntax for creating partitioned tables. + diff --git a/content/en/post/2022/Using-pg_chameleon-to-Migrate-Data-from-MySQL-to-openGauss.md b/content/en/post/2022/Using-pg_chameleon-to-Migrate-Data-from-MySQL-to-openGauss.md new file mode 100644 index 0000000000000000000000000000000000000000..484a8ca496b3fde816340b526599137f090cdc49 --- /dev/null +++ b/content/en/post/2022/Using-pg_chameleon-to-Migrate-Data-from-MySQL-to-openGauss.md @@ -0,0 +1,219 @@ ++++ + +title = "Using pg chameleon to Migrate Data from MySQL to openGauss" + +date = "2021-06-17" + +tags = ["Using pg chameleon to Migrate Data from MySQL to openGauss"] + +archives = "2021-06" + +author = "Ju Peng" + +summary = "Using pg chameleon to Migrate Data from MySQL to openGauss" + +img = "/en/post/2022/title/img15.jpg" + +times = "12:30" + ++++ + +# Using pg\_chameleon to Migrate Data from MySQL to openGauss + +## Introduction to pg\_chameleon + +pg\_chameleon is a real-time replication tool compiled in Python 3 for migrating data from MySQL to PostgreSQL. The tool uses the mysql-replication library to extract row images from MySQL. The row images are stored in PostgreSQL in JSONB format. + +A **pl/pgsql** function in PostgreSQL is executed to decode row images in JSONB format and replay the changes to PostgreSQL. In addition, the tool uses the read-only mode to pull full data from MySQL to PostgreSQL through initial configuration. In this way, the tool provides the function of copying the initial full data and subsequent incremental data online in real time. + +pg\_chameleon has the following features: + +- Provides online real-time replication by reading the MySQL BinLog. +- Supports reading data from multiple MySQL schemas and restoring the data to the target PostgreSQL database. The source schemas and target schemas can use different names. +- Implements real-time replication through a daemon. The daemon consists of two subprocesses. One is responsible for reading logs from MySQL, and the other is responsible for replaying changes to PostgreSQL. + +openGauss is compatible with PostgreSQL communication protocols and most syntaxes. For this reason, you can use pg\_chameleon to migrate data from MySQL to openGauss. In addition, the real-time replication capabilities of pg\_chameleon greatly reduce the service interruption duration during database switchover. + +## pg\_chameleon Issues in openGauss + +1. pg\_chameleon depends on the psycopg2 driver, and the psycopg2 driver uses the pg\_config tool to check the PostgreSQL version and restricts PostgreSQL of earlier versions from using this driver. The pg\_config tool of openGauss returns the version of openGauss \(the current version is openGauss 2.0.0\). As a result, the driver reports a version error " Psycopg requires PostgreSQL client library \(libpq\) \>= 9.1". You need to use psycopg2 through source code compilation and remove related restrictions in the source header file **psycopg/psycopg.h**. +2. pg\_chameleon sets the GUC parameter **LOCK\_TIMEOUT** to limit the timeout for waiting for locks in PostgreSQL. openGauss does not support this parameter. \(openGauss supports the GUC parameter **lockwait\_timeout**, which needs to be set by the administrator.\) You need to delete related settings from the source code of pg\_chameleon. +3. pg\_chameleon uses the syntax of the UPSERT statement to specify the replacement operation when a constraint is violated. The function and syntax of the UPSERT statement supported by openGauss is different from those supported by PostgreSQL. openGauss uses the **ON DUPLICATE KEY UPDATE \{ column\_name = \{ expression | DEFAULT \} \} \[, ...\]** syntax, while PostgreSQL uses the **ON CONFLICT \[ conflict\_target \] DO UPDATE SET \{ column\_name = \{ expression | DEFAULT \} \}** syntax. Therefore, these two databases differ slightly in functions and syntaxes. You need to modify the related UPSERT statement in the source code of pg\_chameleon. +4. pg\_chameleon uses the **CREATE SCHEMA IF NOT EXISTS** and **CREATE INDEX IF NOT EXISTS** syntaxes. openGauss does not support the **IF NOT EXISTS** option of schemas and indexes. You need to modify the logic so that the system checks whether the schemas and indexes exist before creating them. +5. To select the array range, openGauss runs **column\_name\[start, end\]**, while PostgreSQL runs **column\_name\[start:end\]**. You need to modify the array range selection mode in the source code of pg\_chameleon. +6. pg\_chameleon uses the INHERITS function, but openGauss does not support inherited tables. You need to modify the SQL statements and tables that use inherited tables. + +Next, use pg\_chameleon to migrate data from MySQL to openGauss. + +## Configuring pg\_chameleon + +pg\_chameleon uses the **config-example.yaml** configuration file in **\~/.pg\_chameleon/configuration** to define configurations during migration. The configuration file consists of four parts: **global settings**, **type\_override**, **postgres destination connection**, and **sources**. **global settings** is used to set the log file path, log level, and others. **type\_override** allows users to customize type conversion rules and overwrite existing default conversion rules. **postgres destination connection** is used to configure the parameters for connecting to openGauss. **sources** is used to define the parameters for connecting to MySQL and other configurable items during replication. + +For more details about the configuration items, see the official website: + +https://pgchameleon.org/documents\_v2/configuration\_file.html + +The following is an example of the configuration file: + +``` +# global settings +pid_dir: '~/.pg_chameleon/pid/' +log_dir: '~/.pg_chameleon/logs/' +log_dest: file +log_level: info +log_days_keep: 10 +rollbar_key: '' +rollbar_env: '' +# type_override allows the user to override the default type conversion +# into a different one. +type_override: +"tinyint(1)": + override_to: boolean + override_tables: + - "*" +# postgres destination connection +pg_conn: + host: "1.1.1.1" + port: "5432" + user: "opengauss_test" + password: "password_123" + database: "opengauss_database" + charset: "utf8" +sources: + mysql: + db_conn: + host: "1.1.1.1" + port: "3306" + user: "mysql_test" + password: "password123" + charset: 'utf8' + connect_timeout: 10 + schema_mappings: + mysql_database:sch_mysql_database + limit_tables: + skip_tables: + grant_select_to: + - usr_migration + lock_timeout: "120s" + my_server_id: 1 + replica_batch_size: 10000 + replay_max_rows: 10000 + batch_retention: '1 day' + copy_max_memory: "300M" + copy_mode: 'file' + out_dir: /tmp + sleep_loop: 1 + on_error_replay: continue + on_error_read: continue + auto_maintenance: "disabled" + gtid_enable: false + type: mysql +keep_existing_schema: No +``` + +The preceding configuration file indicates that the username and password for connecting to MySQL are **mysql\_test** and **password123** respectively during data migration. The IP address and port number of the MySQL server are 1.1.1.1 and 3306, respectively. The source database is **mysql\_database**. + +The username and password for connecting to openGauss are **opengauss\_test** and **password\_123**, respectively. The IP address and port number of the openGauss server are 1.1.1.1 and 5432, respectively. The target database is **opengauss\_database**. The **sch\_mysql\_database** schema is created in **opengauss\_database**, and all tables to be migrated are in this schema. + +Note that the user must have the permission to remotely connect to MySQL and openGauss as well as the read and write permissions on the corresponding databases. For openGauss, the host where pg\_chameleon runs must be in the remote access whitelist of openGauss. For MySQL, the user must have the **RELOAD**, **REPLICATION CLIENT**, and **REPLICATION SLAVE** permissions. + +The following describes the migration procedure. + +## Creating Users and Databases + +The following shows how to create the users and databases in openGauss required for migration. + +![](../figures/zh-cn_image_0000001252011743.jpg) + +The following shows how to create the users in MySQL required for migration and grant related permissions to the users. + +![](../figures/zh-cn_image_0000001252131781.jpg) + +## Enabling the Replication Function of MySQL + +Modify the MySQL configuration file. Generally, the configuration file is **/etc/my.cnf** or the **cnf** configuration file in the **/etc/my.cnf.d**/ folder. Modify the following configurations in the **\[mysqld\]** configuration block \(if the **\[mysqld\]** configuration block does not exist, add it\): + +``` +[mysqld] +binlog_format= ROW +log_bin = mysql-bin +server_id = 1 +binlog_row_image=FULL + expire_logs_days = 10 +``` + +After the modification, restart MySQL for the configurations to take effect. + +## Runing pg\_chameleon to Migrate Data + +1. Create and activate a virtual Python environment. + + **_python3 -m venv venv_** + + **_source venv/bin/activate_** + +2. Download and install psycopg2 and pg\_chameleon. + + Run the **pip install pip --upgrade** command to upgrade pip. + + Add the folder where the pg\_config tool of openGauss is located to the _$PATH_ environment variable. Example: + + **_export PATH=\{openGauss-server\}/dest/bin:$PATH_** + + Download the source code of psycopg2 at https://github.com/psycopg/psycopg2, remove the restriction of checking the PostgreSQL version, and run the **python setup.py install** command to compile the source code and install the tool. + + Download the source code of pg\_chameleon at https://github.com/the4thdoctor/pg\_chameleon, solve the preceding issues in openGauss, and run the **python setup.py install** command to compile the source code and install the tool. + +3. Create the configuration file directory of pg\_chameleon. + + **_chameleon set\_configuration\_files_** + +4. Modify the configuration file of pg\_chameleon. + + **_cd \~/.pg\_chameleon/configuration_** + + **_cp config-example.yml default.yml_** + + Modify the **default.yml** file as required. Modify the connection configuration information, user information, database information, and schema mapping specified by **pg\_conn** and **mysql**. An example of the configuration file is provided for reference. + +5. Initialize the replication stream. + + **_chameleon create\_replica\_schema --config default_** + + **_chameleon add\_source --config default --source mysql_** + + In this step, an auxiliary schema and table are created for the replication process in openGauss. + +6. Copy basic data. + + **_chameleon init\_replica --config default --source mysql_** + + After this step is complete, the current full data in MySQL is copied to openGauss. + + You can view the replication result in openGauss. + + ![](../figures/zh-cn_image_0000001207291774.jpg) + +7. Enable online real-time replication. + + **_chameleon start\_replica --config default --source mysql_** + + After real-time replication is enabled, insert a data record into MySQL. + + ![](../figures/zh-cn_image_0000001207131798.jpg) + + View the data in the **test\_decimal** table in openGauss. + + ![](../figures/zh-cn_image_0000001252131783.jpg) + + The newly inserted data record is successfully copied to openGauss. + +8. Disable online replication. + + **_chameleon stop\_replica --config default --source mysql_** + + **_chameleon detach\_replica --config default --source mysql_** + + **_chameleon drop\_replica\_schema --config default_** + + diff --git a/content/en/post/2022/Using-pgloader-to-Migrate-Data-from-MySQL-to-openGauss.md b/content/en/post/2022/Using-pgloader-to-Migrate-Data-from-MySQL-to-openGauss.md new file mode 100644 index 0000000000000000000000000000000000000000..e868c717d36d06875f607ef7534604106ecea9bc --- /dev/null +++ b/content/en/post/2022/Using-pgloader-to-Migrate-Data-from-MySQL-to-openGauss.md @@ -0,0 +1,133 @@ ++++ + +title = "Using pgloader to Migrate Data from MySQL to openGauss" + +date = "2021-05-27" + +tags = [ "Using pgloader to Migrate Data from MySQL to openGauss"] + +archives = "2021-05" + +author = "Ju Peng" + +summary = "Using pgloader to Migrate Data from MySQL to openGauss" + +img = "/en/post/2022/title/img13.png" + +times = "12:30" + ++++ + +# Using pgloader to Migrate Data from MySQL to openGauss + +## Introduction to pgloader + +pgloader is a data import tool that uses the COPY command to import data to PostgreSQL. pgloader works in two modes: importing data from files and migrating databases. In both modes, pgloader uses the COPY protocol of PostgreSQL to efficiently transfer data. + +openGauss is compatible with PostgreSQL communication protocols and most syntaxes. For this reason, you can use pgloader to migrate data from MySQL to openGauss. + +## pgloader Issues in openGauss + +openGauss performs security hardening on native PostgreSQL communication protocols. As a result, it is incompatible with the default PostgreSQL communication protocols, and by default, the native PostgreSQL that uses pgloader cannot connect to openGauss. An error similar to the following is reported: + +![](../figures/zh-cn_image_0000001252128947.jpg) + +The solution is to modify the GUC parameter. The involved GUC parameter is **password\_encryption\_type**. By default, PostgreSQL uses MD5 encryption, which is insecure. To improve the security capabilities of openGauss, openGauss uses SHA256 encryption by default. Therefore, the preceding error is reported. openGauss does not delete MD5 encryption and its verification logic. As such, MD5 encryption can be enabled by setting the GUC parameter. + +Method: + +**_gs\_guc reload -D $PGDATA -c "password\_encryption\_type = 1"_** + +**You must create a user after setting the preceding parameter.** Then, you can log in to the database as the user. + +Next, use pgloader to migrate data from MySQL to openGauss. + +## Installing pgloader + +You can install pgloader directly from apt.postgresql.org or the official Debian repository at packages.debian.org/pgloader. + +**_$ apt-get install pgloader_** + +You can also directly use pgloader through Docker images. + +**_$ docker pull dimitri/pgloader_** + +**_$ docker run --rm --name pgloader dimitri/pgloader:latest pgloader --version_** + +**_$ docker run --rm --name pgloader dimitri/pgloader:latest pgloader –help_** + +## Configuring pgloader + +pgloader provides various configuration items for you to customize migration operations. For example, you can run the **include drop** command to delete all tables whose names appear in MySQL in the target database, so that the tool can be started automatically in a clean environment by running the same command for multiple consecutive times. + +The following describes some common configuration items. + +**FROM**: URL of the source database. The format is as follows: + +``` + mysql://[user[:password]@][netloc][:port][/dbname][?option=value&...] +``` + +**INTO**: URL of the target database. The format is as follows: + +``` + postgresql://[user[:password]@][netloc][:port][/dbname][?option=value&...] +``` + +**WITH**: options when data is loaded from MySQL. The options are **include drop**, **create tables**, and **create indexes**. **CAST**: user-defined type conversion rule. You are allowed to overwrite existing default conversion rules or modify them in special cases. + +For partial migration, you can use the configuration items **including only table names matching** and **excluding table names matching** to migrate only specific tables or exclude specific tables during migration. + +For details about the configuration items, see the official website: + +https://pgloader.readthedocs.io/en/latest/ref/mysql.html + +The following is an example of the configuration file for migrating data from MySQL to openGauss: + +``` +LOAD DATABASE +FROM mysql://mysql_test:password123@1.1.1.1:3306/mysql_database +INTO postgresql://opengauss_test:password_123@1.1.1.1:5432/opengauss_database + +WITH include drop, create tables, create indexes, reset no sequences, + workers = 8, concurrency = 1, + multiple readers per thread, rows per range = 50000 + + CAST + type varchar when(= 1 precision) to "boolean" drop typemod keep default keep not null; +``` + +The preceding configuration file indicates that the username and password for connecting to MySQL are **mysql\_test** and **password123** respectively during data migration. The IP address and port number of the MySQL server are **1.1.1.1** and **3306**, respectively. The source database is **mysql\_database**. + +The username and password for connecting to openGauss are **opengauss\_test** and **password\_123**, respectively. The IP address and port number of the openGauss server are **1.1.1.1** and **5432**, respectively, and the target database is **opengauss\_database**. + +Note that the user must have the permission to remotely connect to MySQL and openGauss and the read and write permissions on the corresponding databases. For openGauss, the host where pgloader runs must be in the remote access whitelist of openGauss. + +## Creating Users and Databases + +This section describes how to create users and databases in openGauss required for migration. + +![](../figures/zh-cn_image_0000001251848955.jpg) + +## Runing pgloader to Migrate Data + +The following shows how to install pgloader by using Docker images. Name the prepared configuration file **openGauss.loader**. + +![](../figures/zh-cn_image_0000001251848959.jpg) + +Run the **docker run -tid --name pgloader\_test dimitri/pgloader** command to start Docker. + +Run the **docker cp ./openGauss.loader pgloader\_test:/** command to copy the configuration file to Docker. + +Run the **docker exec -it pgloader\_test /bin/bash** command to access Docker. + +![](../figures/zh-cn_image_0000001252248915.jpg) + +Run the **pgloader openGauss.loader** command to start pgloader, wait until the data migration is complete, and view the migration result report. + +![](../figures/zh-cn_image_0000001252008911.jpg) + +View the migration result in openGauss. + +![](../figures/zh-cn_image_0000001206968992.jpg) + diff --git a/content/en/post/2022/Using-the-Python-Driver-psycopg2-of-openGauss.md b/content/en/post/2022/Using-the-Python-Driver-psycopg2-of-openGauss.md new file mode 100644 index 0000000000000000000000000000000000000000..436f3f1d3c68dc2cfc621e9262af40b48dd83d40 --- /dev/null +++ b/content/en/post/2022/Using-the-Python-Driver-psycopg2-of-openGauss.md @@ -0,0 +1,69 @@ ++++ + +title = "Using the Python Driver psycopg2 of openGauss" + +date = "2021-04-02" + +tags = [ "Using the Python Driver psycopg2 of openGauss"] + +archives = "2021-04" + +author = "Tianqing Wang" + +summary = "Using the Python Driver psycopg2 of openGauss" + +img = "/en/post/2022/title/img12.png" + +times = "12:30" + ++++ + +# Using the Python Driver psycopg2 of openGauss + +## Introduction to psycopg2 + +psycopg2 is a Python driver of PostgreSQL. It is the only Python driver specified and supported by PostgreSQL and is the most widely used and stable Python driver of PostgreSQL. + +## psycopg2 Issues in openGauss + +openGauss is evolved from PostgreSQL XC \(PGXC\) and performs security hardening on native PostgreSQL communication protocols. Therefore, it is incompatible with the default PostgreSQL communication protocols, and by default, the native PostgreSQL that uses psycpog2 cannot connect to GaussDB. An error similar to the following is reported: + +![](../figures/zh-cn_image_0000001252248517.jpg) + +## Solution 1: Modifying the GUC Parameter + +The involved GUC parameter is **password\_encryption\_type**. By default, PostgreSQL uses MD5 encryption, which is insecure. According to Huawei's security and trustworthiness requirements, openGauss uses SHA256 encryption by default. Therefore, the preceding error is reported. openGauss does not delete MD5 encryption and its verification logic. As such, MD5 encryption can be enabled by modifying the GUC parameter. + +Method: + +``` +gs_guc reload -D $PGDATA -c "password_encryption_type = 1" +``` + +You must create a user after setting the preceding parameter. Then, you can log in to the database as the user. + +## Solution 2: Replacing libpq + +MD5 encryption is risky. To use a more secure encryption algorithm, you must replace the original libpq of PostgreSQL. To replace libpq, perform the following steps: + +1. Run the **pip install psycopg2-binary** command to install the Python driver of PostgreSQL. +2. Switch to the installation directory of psycopg2, which is generally **/$PYTHONINSTALL/lib/pythonx.x/site-packages/psycopg2**. +3. Execute the **ldd ./\_psycopg.cpython-37m-x86\_64-linux-gnu.so** file. The file name is for reference only. +4. Copy libpq and related SO files in the **lib** directory of openGauss to replace the original files of PostgreSQL with the same names. + +## Solution 3: Recompiling psycopg2 + +In addition to manually replacing libpq, you can also use the psycopg2 source code to compile a package in the environment with openGauss installed. In this way, the compiled psycopg2 package contains the libpq and its dependency files of openGauss. **Note**: + +1. If PostgreSQL is installed in the environment, ensure that the path of the openGauss library file has a higher priority. Specifically, the path is placed in the front part of _LD\_LIBRARY\_PATH_. +2. The **libpq.so** file has many dependency files which contain some algorithm libraries. These files must be released together with the **libpq.so** file. You can run the **ldd** command to view the dependency file list. + +## Compilation method: + +1. Install openGauss in the environment and configure environment variables. +2. Download the psycopg2 source code and switch to the root directory of the source code. +3. Run the **python setup.py build** command. +4. In this case, an error is reported, indicating that the version does not match. Then, modify the version in the corresponding position in the **setup.py** file to shield the error. You can also run the **sed -i "s/\(pgmajor, pgminor, pgpatch\)/\(9, 2, 4\)/g" setup.py** command to replace the version \(in about line 440 in the file\). +5. Perform step 3 again. +6. After the compilation is complete, the **build** subdirectory is generated in the root directory and it contains the compiled package. + diff --git a/content/en/post/2022/Ustore-Rebuilding-the-Soul-of-openGauss-Data-Storage.md b/content/en/post/2022/Ustore-Rebuilding-the-Soul-of-openGauss-Data-Storage.md new file mode 100644 index 0000000000000000000000000000000000000000..0f7cbe687ea2c927dbaa3b268b42b8db77b7dca3 --- /dev/null +++ b/content/en/post/2022/Ustore-Rebuilding-the-Soul-of-openGauss-Data-Storage.md @@ -0,0 +1,112 @@ ++++ + +title = "Ustore, Rebuilding the 'Soul' of openGauss Data Storage" + +date = "2021-10-11" + +tags = [ "Ustore, Rebuilding the 'Soul' of openGauss Data Storage"] + +archives = "2021-10" + +author = "Qiang Li" + +summary = "Ustore, Rebuilding the ‘Soul’ of openGauss Data Storage" + +img = "/en/post/2022/title/img10.png" + +times = "12:30" + ++++ + +# Ustore, Rebuilding the "Soul" of openGauss Data Storage + +On August 20, 2021, HUAWEI CLOUD GaussDB \(for openGauss\) officially launched a new kernel feature, Ustore, a storage engine that provides high-performance database services for enterprise-level users and further injects energy into enterprise digital transformation. The openGauss community will also release this feature soon to explore the cutting-edge theories and best practices of databases with many database kernel developers. + +The Ustore storage engine, also called in-place update storage engine, is a new storage mode added to the openGauss Kernel. The row storage engine used by the earlier openGauss Kernel versions is in append update mode. The append update mode has good performance in addition, deletion, and HOT \(Heap Only Tuple\) update \(that is, update on the same page\) in the service. However, in a non-HOT UPDATE scenario across data pages, garbage collection is not efficient. Ustore can solve this problem. + +## **Ustore Design Principles** + +Ustore stores valid data of the latest version and junk data of historical versions separately. The valid data of the latest version is stored on the data page, and an independent UNDO space is created for managing the junk data of historical versions in a unified manner. Therefore, the data space does not expand due to frequent updates, and the junk data is recycled more efficiently. Ustore adopts the NUMA-aware UNDO subsystem design, which enables the UNDO subsystem to be effectively expanded on a multi-core platform. In addition, Ustore adopts the multi-version index technology to clear indexes and improve the efficiency of recycling and reusing storage space. + +Ustore works with the UNDO space to implement more efficient and comprehensive flashback query and recycle bin mechanisms, quickly rolls back misoperations, and provides rich enterprise-level functions for openGauss. + +![](../figures/ustore.png) + +Ustore data storage: The latest data is stored on the original page, and the old data is managed in the UNDO space. + +## **Core Advantages of Ustore** + +**High performance**: For services with different loads, such as insertion, update, and deletion, the performance and resource usage are relatively balanced. The in-place update mode is used for update operations. In frequent update scenarios, the performance is higher and more stable. It is suitable for typical OLTP service scenarios that require short transactions, frequent updates, and high performance. + +**Efficient storage**:In-place update is supported to the maximum extent, greatly saving space. The rollback segment and data pages are stored separately to ensure more efficient and stable I/O usage. The UNDO subsystem uses the NUMA-aware design and features better multi-core scalability. The UNDO space is allocated and recycled in a unified manner, which improves reuse efficiency and ensures more efficient and stable storage space usage. + +**Fine-grained resource control**: Ustore provides multi-dimensional transaction monitoring. It can monitor transaction running based on the transaction running duration, size of UNDO space used by a single transaction, and overall UNDO space limitation to prevent abnormal and unexpected behaviors. This helps the database administrator to regulate and restrict the use of database system resources. + +Ustore provides stable performance in scenarios where data is frequently updated, enabling service systems to run more stably and adapt to more service scenarios and workloads, especially core financial service scenarios that have higher requirements on performance and stability. + +In the future, openGauss will use the AI autonomy technology to optimize Ustore to be more intelligent, secure, and efficient, providing customers with more advanced and high-quality database services. + +## **Ustore Usage Guide** + +- **Introduction** + + Ustore coexists with the original append update \(Astore\) storage engine. Ustore shields the implementation details of the storage layer. The SQL syntax is basically the same as that of the original Astore storage engine. The only difference lies in table creation and index creation. + +- **Table Creation Methods** + + Ustore contains UNDO logs. Before creating a table for Ustore, you need to set **undo\_zone\_count** in the **postgresql.conf** file. This parameter indicates the number of UNDO logs. The recommended value is **16384**, that is, **undo\_zone\_count=16384**. After the configuration is complete, restart the database. + + \[postgresql.conf configuration\] + + ``` + undo_zone_count=16384 + ``` + + Method 1: Specify the storage engine type when creating a table. + + ``` + create table test(id int, name varchar(10)) with (storage_type=ustore); + ``` + + Method 2: Specify Ustore by configuring GUC parameters. + + - Step 1: Before starting a database, set **enable\_default\_ustore\_table** to **on** in **postgresql.conf** to specify that Ustore is used when a user creates a table by default. + + \[postgresql.conf configuration\] + + ``` + enable_default_ustore_table=on + ``` + + - Step 2: Create a table. + + ``` + create table test(id int, name varchar(10)); + ``` + + +- **Index Creation Methods** + + UBtree is developed for Ustore and is the only index type supported by Ustore. + + Assume that the **test** table structure is as follows and a UBtree index is to be added to the **age** column of the **test** table. + + ![](../figures/zh-cn_image_0000001207529644.jpg) + + Method 1: If the index type is not specified, a UBtree index is created by default. + + ``` + create index ubt_idx on test(age); + ``` + + ![](../figures/zh-cn_image_0000001252089553.jpg) + + Method 2: When creating an index, use the **using** keyword to set the index type to **ubtree**. + + ``` + create index ubt_idx on test using ubtree(age); + ``` + + ![](../figures/zh-cn_image_0000001207369652.jpg) + + diff --git a/content/en/post/2022/figures/10.png b/content/en/post/2022/figures/10.png new file mode 100644 index 0000000000000000000000000000000000000000..5d61e6965ae3f814132504cfbd8a91f5c7a44268 Binary files /dev/null and b/content/en/post/2022/figures/10.png differ diff --git a/content/en/post/2022/figures/101.png b/content/en/post/2022/figures/101.png new file mode 100644 index 0000000000000000000000000000000000000000..fe28fc91940a3fc0764fde8ea77379c31c25b394 Binary files /dev/null and b/content/en/post/2022/figures/101.png differ diff --git a/content/en/post/2022/figures/102.png b/content/en/post/2022/figures/102.png new file mode 100644 index 0000000000000000000000000000000000000000..64c03cb4c53f4ef0fbd4c40b62a75f342ea22e4a Binary files /dev/null and b/content/en/post/2022/figures/102.png differ diff --git a/content/en/post/2022/figures/110.png b/content/en/post/2022/figures/110.png new file mode 100644 index 0000000000000000000000000000000000000000..4d3611819a7940cc2e8ab40a9a0a2e9d00a0433e Binary files /dev/null and b/content/en/post/2022/figures/110.png differ diff --git a/content/en/post/2022/figures/111.png b/content/en/post/2022/figures/111.png new file mode 100644 index 0000000000000000000000000000000000000000..d33562fee70e4991b075fe3dcc0053043ab5474d Binary files /dev/null and b/content/en/post/2022/figures/111.png differ diff --git a/content/en/post/2022/figures/112.png b/content/en/post/2022/figures/112.png new file mode 100644 index 0000000000000000000000000000000000000000..33418030dc5d44a843b9d6a9e5e41b61ed33a125 Binary files /dev/null and b/content/en/post/2022/figures/112.png differ diff --git a/content/en/post/2022/figures/113.png b/content/en/post/2022/figures/113.png new file mode 100644 index 0000000000000000000000000000000000000000..79e337e8fe9fe608a65e54e98f5f661c42ba00f8 Binary files /dev/null and b/content/en/post/2022/figures/113.png differ diff --git a/content/en/post/2022/figures/114.png b/content/en/post/2022/figures/114.png new file mode 100644 index 0000000000000000000000000000000000000000..ac3d4c30543eba9577568c14a654c48b630d09f5 Binary files /dev/null and b/content/en/post/2022/figures/114.png differ diff --git a/content/en/post/2022/figures/115.png b/content/en/post/2022/figures/115.png new file mode 100644 index 0000000000000000000000000000000000000000..f2d836d73e0562d7ecab5209d5402424083a48d9 Binary files /dev/null and b/content/en/post/2022/figures/115.png differ diff --git a/content/en/post/2022/figures/171.png b/content/en/post/2022/figures/171.png new file mode 100644 index 0000000000000000000000000000000000000000..31a3af3994dc99bfa556b1e1489191261b784448 Binary files /dev/null and b/content/en/post/2022/figures/171.png differ diff --git a/content/en/post/2022/figures/1710.png b/content/en/post/2022/figures/1710.png new file mode 100644 index 0000000000000000000000000000000000000000..b10f33340de65b253f1efa031dba011bb13d200a Binary files /dev/null and b/content/en/post/2022/figures/1710.png differ diff --git a/content/en/post/2022/figures/1711.png b/content/en/post/2022/figures/1711.png new file mode 100644 index 0000000000000000000000000000000000000000..47a426f405ef1fa7b01fbe764f8221c664c77bd3 Binary files /dev/null and b/content/en/post/2022/figures/1711.png differ diff --git a/content/en/post/2022/figures/1712.png b/content/en/post/2022/figures/1712.png new file mode 100644 index 0000000000000000000000000000000000000000..96df9deebfa7abf6856fa4639e8dcc68dd2f4d1d Binary files /dev/null and b/content/en/post/2022/figures/1712.png differ diff --git a/content/en/post/2022/figures/1713.png b/content/en/post/2022/figures/1713.png new file mode 100644 index 0000000000000000000000000000000000000000..45e51fb431dc28322a4cb52721c73060ef6a39b6 Binary files /dev/null and b/content/en/post/2022/figures/1713.png differ diff --git a/content/en/post/2022/figures/1714.png b/content/en/post/2022/figures/1714.png new file mode 100644 index 0000000000000000000000000000000000000000..c0a72b15bcd892ffbbd6a21f729a1346b1d3d560 Binary files /dev/null and b/content/en/post/2022/figures/1714.png differ diff --git a/content/en/post/2022/figures/1715.png b/content/en/post/2022/figures/1715.png new file mode 100644 index 0000000000000000000000000000000000000000..5e810bf3660a524d9363ca6e6b777aa1430b99a0 Binary files /dev/null and b/content/en/post/2022/figures/1715.png differ diff --git a/content/en/post/2022/figures/1716.png b/content/en/post/2022/figures/1716.png new file mode 100644 index 0000000000000000000000000000000000000000..c44b3c438fb8ecca1813f63d028d2692fe7235ef Binary files /dev/null and b/content/en/post/2022/figures/1716.png differ diff --git a/content/en/post/2022/figures/1717.png b/content/en/post/2022/figures/1717.png new file mode 100644 index 0000000000000000000000000000000000000000..cf4c3cff2ed6599688c1873808c0a099d2392b9b Binary files /dev/null and b/content/en/post/2022/figures/1717.png differ diff --git a/content/en/post/2022/figures/1718.png b/content/en/post/2022/figures/1718.png new file mode 100644 index 0000000000000000000000000000000000000000..80651de5479d1567cb6e4755a09bcd38f526c264 Binary files /dev/null and b/content/en/post/2022/figures/1718.png differ diff --git a/content/en/post/2022/figures/1719.png b/content/en/post/2022/figures/1719.png new file mode 100644 index 0000000000000000000000000000000000000000..eb8ad041ac91893b2a698357a734a33caba2a18d Binary files /dev/null and b/content/en/post/2022/figures/1719.png differ diff --git a/content/en/post/2022/figures/172.png b/content/en/post/2022/figures/172.png new file mode 100644 index 0000000000000000000000000000000000000000..a27ed4268cb996c699b3da07b4bf20c76c5b9456 Binary files /dev/null and b/content/en/post/2022/figures/172.png differ diff --git a/content/en/post/2022/figures/1720.png b/content/en/post/2022/figures/1720.png new file mode 100644 index 0000000000000000000000000000000000000000..d487a082859b13163a8b536697364eb419d42cb1 Binary files /dev/null and b/content/en/post/2022/figures/1720.png differ diff --git a/content/en/post/2022/figures/173.png b/content/en/post/2022/figures/173.png new file mode 100644 index 0000000000000000000000000000000000000000..754f5f16506a45d2a2650a2abc240a06f7cf93ab Binary files /dev/null and b/content/en/post/2022/figures/173.png differ diff --git a/content/en/post/2022/figures/174.png b/content/en/post/2022/figures/174.png new file mode 100644 index 0000000000000000000000000000000000000000..2cc90d29f631721724024168a7c9890748810aad Binary files /dev/null and b/content/en/post/2022/figures/174.png differ diff --git a/content/en/post/2022/figures/175.png b/content/en/post/2022/figures/175.png new file mode 100644 index 0000000000000000000000000000000000000000..e54391094200dd111a9e204552ec0fbd4efc8b32 Binary files /dev/null and b/content/en/post/2022/figures/175.png differ diff --git a/content/en/post/2022/figures/176.png b/content/en/post/2022/figures/176.png new file mode 100644 index 0000000000000000000000000000000000000000..45d17662b1a4908f243877970023d8c762ccc5ba Binary files /dev/null and b/content/en/post/2022/figures/176.png differ diff --git a/content/en/post/2022/figures/177.png b/content/en/post/2022/figures/177.png new file mode 100644 index 0000000000000000000000000000000000000000..e1cb44d7ac45a6f54b7eb9576376e938f418f354 Binary files /dev/null and b/content/en/post/2022/figures/177.png differ diff --git a/content/en/post/2022/figures/178.png b/content/en/post/2022/figures/178.png new file mode 100644 index 0000000000000000000000000000000000000000..a9311ba251d1d3362e11567372a45636c6b75ea5 Binary files /dev/null and b/content/en/post/2022/figures/178.png differ diff --git a/content/en/post/2022/figures/179.png b/content/en/post/2022/figures/179.png new file mode 100644 index 0000000000000000000000000000000000000000..4392c3d13133f7c15ef18582ebe827cf850e02b6 Binary files /dev/null and b/content/en/post/2022/figures/179.png differ diff --git a/content/en/post/2022/figures/21.png b/content/en/post/2022/figures/21.png new file mode 100644 index 0000000000000000000000000000000000000000..7fef241d0b69cc8f0b570851fab31d5a33f786b3 Binary files /dev/null and b/content/en/post/2022/figures/21.png differ diff --git a/content/en/post/2022/figures/24.png b/content/en/post/2022/figures/24.png new file mode 100644 index 0000000000000000000000000000000000000000..d3a67599f7acbf50dc5d1cc1cbfab602ab7260a6 Binary files /dev/null and b/content/en/post/2022/figures/24.png differ diff --git a/content/en/post/2022/figures/241.png b/content/en/post/2022/figures/241.png new file mode 100644 index 0000000000000000000000000000000000000000..bc1835110116a2ebb927b25cb875328c1bf54963 Binary files /dev/null and b/content/en/post/2022/figures/241.png differ diff --git a/content/en/post/2022/figures/26-openGauss-Log-Consensus-Framework.png b/content/en/post/2022/figures/26-openGauss-Log-Consensus-Framework.png new file mode 100644 index 0000000000000000000000000000000000000000..b195b70364223a7f8ebb41616be6486cf9dbcfec Binary files /dev/null and b/content/en/post/2022/figures/26-openGauss-Log-Consensus-Framework.png differ diff --git a/content/en/post/2022/figures/28.png b/content/en/post/2022/figures/28.png new file mode 100644 index 0000000000000000000000000000000000000000..0434f76df184b1b47cf2b1fca208b230b9c30a86 Binary files /dev/null and b/content/en/post/2022/figures/28.png differ diff --git a/content/en/post/2022/figures/282.png b/content/en/post/2022/figures/282.png new file mode 100644 index 0000000000000000000000000000000000000000..3fa6fb4876e998a47bc0baf98b51b725c725f7df Binary files /dev/null and b/content/en/post/2022/figures/282.png differ diff --git a/content/en/post/2022/figures/283.png b/content/en/post/2022/figures/283.png new file mode 100644 index 0000000000000000000000000000000000000000..2f63883b3c90c5de5d72929d35775a992a197282 Binary files /dev/null and b/content/en/post/2022/figures/283.png differ diff --git a/content/en/post/2022/figures/284.png b/content/en/post/2022/figures/284.png new file mode 100644 index 0000000000000000000000000000000000000000..2607a6f3c3d7f244ca0b659c5bb25378cfb39759 Binary files /dev/null and b/content/en/post/2022/figures/284.png differ diff --git a/content/en/post/2022/figures/285.png b/content/en/post/2022/figures/285.png new file mode 100644 index 0000000000000000000000000000000000000000..242fa313e1de78abf1a8bbd2c7a1cea2ee147ac1 Binary files /dev/null and b/content/en/post/2022/figures/285.png differ diff --git a/content/en/post/2022/figures/3.png b/content/en/post/2022/figures/3.png new file mode 100644 index 0000000000000000000000000000000000000000..5cce1c14b8225575e12cafa23fb2a6fcba03db65 Binary files /dev/null and b/content/en/post/2022/figures/3.png differ diff --git a/content/en/post/2022/figures/31.png b/content/en/post/2022/figures/31.png new file mode 100644 index 0000000000000000000000000000000000000000..cb1df175f89046f16d3d7bd2831a985aaa7392e1 Binary files /dev/null and b/content/en/post/2022/figures/31.png differ diff --git a/content/en/post/2022/figures/311.png b/content/en/post/2022/figures/311.png new file mode 100644 index 0000000000000000000000000000000000000000..b261164ac2e457dad770e52f7badd2a6b5936cf5 Binary files /dev/null and b/content/en/post/2022/figures/311.png differ diff --git a/content/en/post/2022/figures/312.png b/content/en/post/2022/figures/312.png new file mode 100644 index 0000000000000000000000000000000000000000..bce02962735d39a5f4fc148e995e0b3caa0b421e Binary files /dev/null and b/content/en/post/2022/figures/312.png differ diff --git a/content/en/post/2022/figures/313.png b/content/en/post/2022/figures/313.png new file mode 100644 index 0000000000000000000000000000000000000000..16550186cea3f9e249d0c1931e0c5561210f2264 Binary files /dev/null and b/content/en/post/2022/figures/313.png differ diff --git a/content/en/post/2022/figures/314.png b/content/en/post/2022/figures/314.png new file mode 100644 index 0000000000000000000000000000000000000000..07c1fa36a50f494779c53f164b94f9df348b9869 Binary files /dev/null and b/content/en/post/2022/figures/314.png differ diff --git a/content/en/post/2022/figures/315.png b/content/en/post/2022/figures/315.png new file mode 100644 index 0000000000000000000000000000000000000000..2dbce91a21ceda6c429dab9f1cae74c3b2480833 Binary files /dev/null and b/content/en/post/2022/figures/315.png differ diff --git a/content/en/post/2022/figures/32.png b/content/en/post/2022/figures/32.png new file mode 100644 index 0000000000000000000000000000000000000000..c326161f098cb282f02ffb48c251341a87d45616 Binary files /dev/null and b/content/en/post/2022/figures/32.png differ diff --git a/content/en/post/2022/figures/320.png b/content/en/post/2022/figures/320.png new file mode 100644 index 0000000000000000000000000000000000000000..44949c637ec7bcc1b58b49e046cde0bbfa3e0c7f Binary files /dev/null and b/content/en/post/2022/figures/320.png differ diff --git a/content/en/post/2022/figures/41.png b/content/en/post/2022/figures/41.png new file mode 100644 index 0000000000000000000000000000000000000000..d8bed44c9bff6755f589f40604de04d65d29c61c Binary files /dev/null and b/content/en/post/2022/figures/41.png differ diff --git a/content/en/post/2022/figures/42.png b/content/en/post/2022/figures/42.png new file mode 100644 index 0000000000000000000000000000000000000000..2c7025ca1edbdbbc95d13a04718b99964bba336b Binary files /dev/null and b/content/en/post/2022/figures/42.png differ diff --git a/content/en/post/2022/figures/43.png b/content/en/post/2022/figures/43.png new file mode 100644 index 0000000000000000000000000000000000000000..298eafb7ce915df6b3fd3bddf1acc8f02fd24165 Binary files /dev/null and b/content/en/post/2022/figures/43.png differ diff --git a/content/en/post/2022/figures/44.png b/content/en/post/2022/figures/44.png new file mode 100644 index 0000000000000000000000000000000000000000..f68edfb6099af7289e848ed3bcd08491a92a524f Binary files /dev/null and b/content/en/post/2022/figures/44.png differ diff --git a/content/en/post/2022/figures/45.png b/content/en/post/2022/figures/45.png new file mode 100644 index 0000000000000000000000000000000000000000..3de93e51ee74eedb290bcbd967da09e7cd9ef5f1 Binary files /dev/null and b/content/en/post/2022/figures/45.png differ diff --git a/content/en/post/2022/figures/46.png b/content/en/post/2022/figures/46.png new file mode 100644 index 0000000000000000000000000000000000000000..2cebd77accb5d41fcb9353fa3e2e55ed2454050b Binary files /dev/null and b/content/en/post/2022/figures/46.png differ diff --git a/content/en/post/2022/figures/47.png b/content/en/post/2022/figures/47.png new file mode 100644 index 0000000000000000000000000000000000000000..b954fcd5072264f1a32ea5a47b95a79e6f6d9a3c Binary files /dev/null and b/content/en/post/2022/figures/47.png differ diff --git a/content/en/post/2022/figures/48.png b/content/en/post/2022/figures/48.png new file mode 100644 index 0000000000000000000000000000000000000000..a2370f1c980dbea7b96e3325312786ae48e7131f Binary files /dev/null and b/content/en/post/2022/figures/48.png differ diff --git a/content/en/post/2022/figures/49.png b/content/en/post/2022/figures/49.png new file mode 100644 index 0000000000000000000000000000000000000000..8c3e7fc51156cd890df9ebdbdb1329cc9a85bb39 Binary files /dev/null and b/content/en/post/2022/figures/49.png differ diff --git a/content/en/post/2022/figures/61.png b/content/en/post/2022/figures/61.png new file mode 100644 index 0000000000000000000000000000000000000000..95faa3f21e351f477e7d95b2239e73a4f4491385 Binary files /dev/null and b/content/en/post/2022/figures/61.png differ diff --git a/content/en/post/2022/figures/62-0.png b/content/en/post/2022/figures/62-0.png new file mode 100644 index 0000000000000000000000000000000000000000..3beaa6cba81da70f283788ee53b940019f549f78 Binary files /dev/null and b/content/en/post/2022/figures/62-0.png differ diff --git a/content/en/post/2022/figures/62.png b/content/en/post/2022/figures/62.png new file mode 100644 index 0000000000000000000000000000000000000000..3beaa6cba81da70f283788ee53b940019f549f78 Binary files /dev/null and b/content/en/post/2022/figures/62.png differ diff --git a/content/en/post/2022/figures/7.png b/content/en/post/2022/figures/7.png new file mode 100644 index 0000000000000000000000000000000000000000..3476e4fa7c5da609f4f28088233391042198b259 Binary files /dev/null and b/content/en/post/2022/figures/7.png differ diff --git a/content/en/post/2022/figures/ustore.png b/content/en/post/2022/figures/ustore.png new file mode 100644 index 0000000000000000000000000000000000000000..b74bf5df6e8da4550d034f11ee2098239bd9c2e9 Binary files /dev/null and b/content/en/post/2022/figures/ustore.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206146876.jpg b/content/en/post/2022/figures/zh-cn_image_0000001206146876.jpg new file mode 100644 index 0000000000000000000000000000000000000000..228837de1190f373617c82088cf80a63928b89fd Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206146876.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206167254.jpg b/content/en/post/2022/figures/zh-cn_image_0000001206167254.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a49980daf88fbb5de8a7e155827e723ec0104c26 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206167254.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206317236.gif b/content/en/post/2022/figures/zh-cn_image_0000001206317236.gif new file mode 100644 index 0000000000000000000000000000000000000000..75e84ef5295b3194ad40505c3f5de3d63d279cae Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206317236.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206327254.gif b/content/en/post/2022/figures/zh-cn_image_0000001206327254.gif new file mode 100644 index 0000000000000000000000000000000000000000..84a4ab2c68a128da887432bb6d5d8beaaa79997e Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206327254.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206327256.gif b/content/en/post/2022/figures/zh-cn_image_0000001206327256.gif new file mode 100644 index 0000000000000000000000000000000000000000..6068b15a50a1f3cc0574ab51d66b4a7a6b379f45 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206327256.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206327258.jpg b/content/en/post/2022/figures/zh-cn_image_0000001206327258.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f6fcb791326728455ea10edffca6e3c63acfcc9 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206327258.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206327260.gif b/content/en/post/2022/figures/zh-cn_image_0000001206327260.gif new file mode 100644 index 0000000000000000000000000000000000000000..ae0c3f1f2615bd71a1e12fb24ee70bdd738b8bd0 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206327260.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206487222.jpg b/content/en/post/2022/figures/zh-cn_image_0000001206487222.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d3d17dee6991e253cfb0c5e2ccd30a7d8df86b89 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206487222.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206487224.gif b/content/en/post/2022/figures/zh-cn_image_0000001206487224.gif new file mode 100644 index 0000000000000000000000000000000000000000..10a6848a70d9a5fc11c20f9844a0d8e67b3bc4e6 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206487224.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206626828.jpg b/content/en/post/2022/figures/zh-cn_image_0000001206626828.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c86391c33e778d1167af74bf0d8018970e4e720b Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206626828.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206647202.gif b/content/en/post/2022/figures/zh-cn_image_0000001206647202.gif new file mode 100644 index 0000000000000000000000000000000000000000..dd5a8bceae4d81ffcafc2c0caf79c490f4ef7fad Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206647202.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206647206.gif b/content/en/post/2022/figures/zh-cn_image_0000001206647206.gif new file mode 100644 index 0000000000000000000000000000000000000000..1a6dad0491198086a1b93e478c6d235407541523 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206647206.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206647208.gif b/content/en/post/2022/figures/zh-cn_image_0000001206647208.gif new file mode 100644 index 0000000000000000000000000000000000000000..aa083f31589390271ac3b2017cf7c519ddb4f8bb Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206647208.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206760224.jpg b/content/en/post/2022/figures/zh-cn_image_0000001206760224.jpg new file mode 100644 index 0000000000000000000000000000000000000000..654116daba11cecd31fb45cf56eaf00f0f61c13e Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206760224.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206760226.jpg b/content/en/post/2022/figures/zh-cn_image_0000001206760226.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eca4505048123faef80bb6bc5a113e1ab50c4e41 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206760226.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206760228.jpg b/content/en/post/2022/figures/zh-cn_image_0000001206760228.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d63b73cde195dbbda83bfdc3681946a1cf33bdcb Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206760228.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206760230.gif b/content/en/post/2022/figures/zh-cn_image_0000001206760230.gif new file mode 100644 index 0000000000000000000000000000000000000000..b5540a9d276cf7fb29a04aa0510c55d984842dda Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206760230.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206760240.gif b/content/en/post/2022/figures/zh-cn_image_0000001206760240.gif new file mode 100644 index 0000000000000000000000000000000000000000..17a8b25ba0b9f4ba06d42e9da58911f7e10a0983 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206760240.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206801884.png b/content/en/post/2022/figures/zh-cn_image_0000001206801884.png new file mode 100644 index 0000000000000000000000000000000000000000..da0837a0e01c172b4c7a867c7ef078757d5f4dec Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206801884.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206801888.png b/content/en/post/2022/figures/zh-cn_image_0000001206801888.png new file mode 100644 index 0000000000000000000000000000000000000000..da0837a0e01c172b4c7a867c7ef078757d5f4dec Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206801888.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206801890.png b/content/en/post/2022/figures/zh-cn_image_0000001206801890.png new file mode 100644 index 0000000000000000000000000000000000000000..794bd1c5236a2d365b76a163db009937b64a8fb5 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206801890.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206807380.png b/content/en/post/2022/figures/zh-cn_image_0000001206807380.png new file mode 100644 index 0000000000000000000000000000000000000000..27c6e12bb0c4384b3682fc5c706b1eef20b0ca72 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206807380.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206809156.jpg b/content/en/post/2022/figures/zh-cn_image_0000001206809156.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a77c8c3ac6df6226a7e5f87934dbc688f74faa84 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206809156.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206809160.jpg b/content/en/post/2022/figures/zh-cn_image_0000001206809160.jpg new file mode 100644 index 0000000000000000000000000000000000000000..700877443d4509d152563c4e490675e791726268 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206809160.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206812360.png b/content/en/post/2022/figures/zh-cn_image_0000001206812360.png new file mode 100644 index 0000000000000000000000000000000000000000..0c09237f0dd8bc16bf58815eb9aff422fe31566b Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206812360.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206920214.jpg b/content/en/post/2022/figures/zh-cn_image_0000001206920214.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ca04c936111fd6eaa9802807f2a1366d4f58d518 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206920214.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206920220.gif b/content/en/post/2022/figures/zh-cn_image_0000001206920220.gif new file mode 100644 index 0000000000000000000000000000000000000000..6885eb9a1fc1d52c561790da736f4b9910bfc487 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206920220.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206920224.gif b/content/en/post/2022/figures/zh-cn_image_0000001206920224.gif new file mode 100644 index 0000000000000000000000000000000000000000..547b6a61c94d46c0e71829c8630a19c3488c909a Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206920224.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206961046.jpg b/content/en/post/2022/figures/zh-cn_image_0000001206961046.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30aaded16a123b984b990d35b4a592da88e241d6 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206961046.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206961884.png b/content/en/post/2022/figures/zh-cn_image_0000001206961884.png new file mode 100644 index 0000000000000000000000000000000000000000..da0837a0e01c172b4c7a867c7ef078757d5f4dec Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206961884.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206967370.png b/content/en/post/2022/figures/zh-cn_image_0000001206967370.png new file mode 100644 index 0000000000000000000000000000000000000000..b7ecd3e4db8c7078cf49c979145db58c371d997d Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206967370.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206968992.jpg b/content/en/post/2022/figures/zh-cn_image_0000001206968992.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b5fc6157ce28ed455a19ef657f60b5355cd62de5 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206968992.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206972348.png b/content/en/post/2022/figures/zh-cn_image_0000001206972348.png new file mode 100644 index 0000000000000000000000000000000000000000..a7cef3daed489a6cdd6c92096dbc741966668924 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206972348.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001206972352.png b/content/en/post/2022/figures/zh-cn_image_0000001206972352.png new file mode 100644 index 0000000000000000000000000000000000000000..8c9328586a0b976dee933ef8226d74382fc1c7b0 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001206972352.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207080190.gif b/content/en/post/2022/figures/zh-cn_image_0000001207080190.gif new file mode 100644 index 0000000000000000000000000000000000000000..a79f840b314aa57ee5972549172457ee65545cf2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207080190.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207089084.gif b/content/en/post/2022/figures/zh-cn_image_0000001207089084.gif new file mode 100644 index 0000000000000000000000000000000000000000..09e9fcca18283c4fcf0ea0218b9e60f10c4131c6 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207089084.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207121020.jpg b/content/en/post/2022/figures/zh-cn_image_0000001207121020.jpg new file mode 100644 index 0000000000000000000000000000000000000000..15a89fa9fcc8879918d9698cbdf93204cf34fdba Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207121020.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207121854.png b/content/en/post/2022/figures/zh-cn_image_0000001207121854.png new file mode 100644 index 0000000000000000000000000000000000000000..8c66ceccf7240eb405c23afd58919d1599d8cf68 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207121854.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207121858.png b/content/en/post/2022/figures/zh-cn_image_0000001207121858.png new file mode 100644 index 0000000000000000000000000000000000000000..8c66ceccf7240eb405c23afd58919d1599d8cf68 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207121858.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207131798.jpg b/content/en/post/2022/figures/zh-cn_image_0000001207131798.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94cb8630977c991a55a2eea8f41917a8fdbb4fb1 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207131798.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207132328.png b/content/en/post/2022/figures/zh-cn_image_0000001207132328.png new file mode 100644 index 0000000000000000000000000000000000000000..67fe77a94e77f60888a616acdf35756a2154454d Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207132328.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207138590.jpg b/content/en/post/2022/figures/zh-cn_image_0000001207138590.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc924b238f05ed6c9a29dde627d99d6f2abbbd8e Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207138590.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207240170.gif b/content/en/post/2022/figures/zh-cn_image_0000001207240170.gif new file mode 100644 index 0000000000000000000000000000000000000000..f4f5fcac4770318fdc202729c05c29b08f93c2fb Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207240170.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207249058.gif b/content/en/post/2022/figures/zh-cn_image_0000001207249058.gif new file mode 100644 index 0000000000000000000000000000000000000000..1480cb5987cda99a0f9f83a6c12e46a0f0961084 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207249058.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207280996.jpg b/content/en/post/2022/figures/zh-cn_image_0000001207280996.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3c2b120df77d6834c9c2a8496d29b5caa916eb8c Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207280996.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207280998.jpg b/content/en/post/2022/figures/zh-cn_image_0000001207280998.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d0ee4bb373bf011373a10fab492eda1ad7b463d7 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207280998.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207289100.jpg b/content/en/post/2022/figures/zh-cn_image_0000001207289100.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f407f575746cda2f62075eefb9d0316e29635d96 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207289100.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207291774.jpg b/content/en/post/2022/figures/zh-cn_image_0000001207291774.jpg new file mode 100644 index 0000000000000000000000000000000000000000..36cdd3ce6397e97c87a437e1ec45264eabdd184f Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207291774.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207302704.gif b/content/en/post/2022/figures/zh-cn_image_0000001207302704.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207302704.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207302710.gif b/content/en/post/2022/figures/zh-cn_image_0000001207302710.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207302710.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207369652.jpg b/content/en/post/2022/figures/zh-cn_image_0000001207369652.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b4dbb2590c6667efb62a1132fd166a2b89e4476 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207369652.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207462684.gif b/content/en/post/2022/figures/zh-cn_image_0000001207462684.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207462684.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207462688.gif b/content/en/post/2022/figures/zh-cn_image_0000001207462688.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207462688.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207462698.gif b/content/en/post/2022/figures/zh-cn_image_0000001207462698.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207462698.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207462700.gif b/content/en/post/2022/figures/zh-cn_image_0000001207462700.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207462700.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207516746.png b/content/en/post/2022/figures/zh-cn_image_0000001207516746.png new file mode 100644 index 0000000000000000000000000000000000000000..9bb3c60728e778a9c2bd906d42d6751f8705eb6d Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207516746.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207529644.jpg b/content/en/post/2022/figures/zh-cn_image_0000001207529644.jpg new file mode 100644 index 0000000000000000000000000000000000000000..27c5e483e2dfd78b2870fd1de41e202eb2bbf6d5 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207529644.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207539820.jpg b/content/en/post/2022/figures/zh-cn_image_0000001207539820.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f2331927a09915200151614b250d0aa4f30b2977 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207539820.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207622650.gif b/content/en/post/2022/figures/zh-cn_image_0000001207622650.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207622650.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207622660.gif b/content/en/post/2022/figures/zh-cn_image_0000001207622660.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207622660.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207677032.jpg b/content/en/post/2022/figures/zh-cn_image_0000001207677032.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d310d14aedf33b41d6c6191a886a6e980238cf8 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207677032.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207699778.jpg b/content/en/post/2022/figures/zh-cn_image_0000001207699778.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f3b7fd5149932cc3db01d1968d33b88364cf3b71 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207699778.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207699780.jpg b/content/en/post/2022/figures/zh-cn_image_0000001207699780.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6f82934da20727218ccca9fa29ce456a3735de49 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207699780.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207772870.png b/content/en/post/2022/figures/zh-cn_image_0000001207772870.png new file mode 100644 index 0000000000000000000000000000000000000000..d91afda7c16497164886e3a8c8255009482b5fab Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207772870.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207782632.gif b/content/en/post/2022/figures/zh-cn_image_0000001207782632.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207782632.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207782634.gif b/content/en/post/2022/figures/zh-cn_image_0000001207782634.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207782634.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207782636.gif b/content/en/post/2022/figures/zh-cn_image_0000001207782636.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207782636.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207782640.gif b/content/en/post/2022/figures/zh-cn_image_0000001207782640.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207782640.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207782650.gif b/content/en/post/2022/figures/zh-cn_image_0000001207782650.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207782650.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207863420.png b/content/en/post/2022/figures/zh-cn_image_0000001207863420.png new file mode 100644 index 0000000000000000000000000000000000000000..c6bcab86885267de1ba5c1842191dfda57d64f5c Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207863420.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001207963344.png b/content/en/post/2022/figures/zh-cn_image_0000001207963344.png new file mode 100644 index 0000000000000000000000000000000000000000..168fe7a5ed2faf1cac1e912a310eac8253c03250 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001207963344.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001208124506.png b/content/en/post/2022/figures/zh-cn_image_0000001208124506.png new file mode 100644 index 0000000000000000000000000000000000000000..8374d59a434b465d6c14a71ff88fd85dbd43c859 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001208124506.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001208315958.gif b/content/en/post/2022/figures/zh-cn_image_0000001208315958.gif new file mode 100644 index 0000000000000000000000000000000000000000..d5f34e46bc5949d7b185c1f8837f4d3251467161 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001208315958.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001208473690.png b/content/en/post/2022/figures/zh-cn_image_0000001208473690.png new file mode 100644 index 0000000000000000000000000000000000000000..19d132b17f4f0ebbecb007ab43fe828785780881 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001208473690.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001208491336.png b/content/en/post/2022/figures/zh-cn_image_0000001208491336.png new file mode 100644 index 0000000000000000000000000000000000000000..c1a5116fddd120281ef5bd081d6cf75850e7b2c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001208491336.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001211903080.jpg b/content/en/post/2022/figures/zh-cn_image_0000001211903080.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9cdb72b536cff313b1fb1f8aaab0504378605770 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001211903080.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001211903084.gif b/content/en/post/2022/figures/zh-cn_image_0000001211903084.gif new file mode 100644 index 0000000000000000000000000000000000000000..884cb96cf8aaa5087e8aa1b5b7251933566b5832 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001211903084.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001212062138.gif b/content/en/post/2022/figures/zh-cn_image_0000001212062138.gif new file mode 100644 index 0000000000000000000000000000000000000000..1a6dad0491198086a1b93e478c6d235407541523 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001212062138.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001212063076.gif b/content/en/post/2022/figures/zh-cn_image_0000001212063076.gif new file mode 100644 index 0000000000000000000000000000000000000000..3f547552be03f11e95859e714b6c2b192c50ec63 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001212063076.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001212089804.png b/content/en/post/2022/figures/zh-cn_image_0000001212089804.png new file mode 100644 index 0000000000000000000000000000000000000000..c2d9e09460039e616a587d46edccea8974961635 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001212089804.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001212222114.gif b/content/en/post/2022/figures/zh-cn_image_0000001212222114.gif new file mode 100644 index 0000000000000000000000000000000000000000..84a4ab2c68a128da887432bb6d5d8beaaa79997e Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001212222114.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001212223058.gif b/content/en/post/2022/figures/zh-cn_image_0000001212223058.gif new file mode 100644 index 0000000000000000000000000000000000000000..c4c2808701d1b65c787f0750231c77a5f97370f9 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001212223058.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001212382082.gif b/content/en/post/2022/figures/zh-cn_image_0000001212382082.gif new file mode 100644 index 0000000000000000000000000000000000000000..3cf82a6400b989b25079d51716b8cb359cacddca Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001212382082.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001234914846.png b/content/en/post/2022/figures/zh-cn_image_0000001234914846.png new file mode 100644 index 0000000000000000000000000000000000000000..f4ab2dc650b33b2ea0cef158021090f049578191 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001234914846.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001235074794.png b/content/en/post/2022/figures/zh-cn_image_0000001235074794.png new file mode 100644 index 0000000000000000000000000000000000000000..e793845012589b55c232bd25e311894c27419b70 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001235074794.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001235076358.jpg b/content/en/post/2022/figures/zh-cn_image_0000001235076358.jpg new file mode 100644 index 0000000000000000000000000000000000000000..98e7ea5e683f31be75b5a69ddf5bc94761b07a4c Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001235076358.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251117213.gif b/content/en/post/2022/figures/zh-cn_image_0000001251117213.gif new file mode 100644 index 0000000000000000000000000000000000000000..7c62269304f4445a76ca89694e7dfa26674f2a4e Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251117213.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251127227.gif b/content/en/post/2022/figures/zh-cn_image_0000001251127227.gif new file mode 100644 index 0000000000000000000000000000000000000000..c04be5cb6f6d2e0a974aa0f9a9133f6435f93efb Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251127227.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251127229.gif b/content/en/post/2022/figures/zh-cn_image_0000001251127229.gif new file mode 100644 index 0000000000000000000000000000000000000000..f53c01bb97471dcd52f9d4571d9b4159c1739d55 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251127229.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251127237.gif b/content/en/post/2022/figures/zh-cn_image_0000001251127237.gif new file mode 100644 index 0000000000000000000000000000000000000000..3f547552be03f11e95859e714b6c2b192c50ec63 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251127237.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251237167.gif b/content/en/post/2022/figures/zh-cn_image_0000001251237167.gif new file mode 100644 index 0000000000000000000000000000000000000000..8c50b89bbfbe0aa69fcd90c2b2169e6c5241770f Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251237167.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251247175.jpg b/content/en/post/2022/figures/zh-cn_image_0000001251247175.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9cdb72b536cff313b1fb1f8aaab0504378605770 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251247175.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251327177.jpg b/content/en/post/2022/figures/zh-cn_image_0000001251327177.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb6c0db5a8dbb4c25d3f520aadb509f2c242308e Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251327177.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251327179.gif b/content/en/post/2022/figures/zh-cn_image_0000001251327179.gif new file mode 100644 index 0000000000000000000000000000000000000000..3cf82a6400b989b25079d51716b8cb359cacddca Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251327179.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251447219.gif b/content/en/post/2022/figures/zh-cn_image_0000001251447219.gif new file mode 100644 index 0000000000000000000000000000000000000000..92332dbc496b8f77bc8e4241a6075f47ef0ab113 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251447219.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251458611.jpg b/content/en/post/2022/figures/zh-cn_image_0000001251458611.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d62974f01edb70935412c5a4a16742240fa6fd2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251458611.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251538613.gif b/content/en/post/2022/figures/zh-cn_image_0000001251538613.gif new file mode 100644 index 0000000000000000000000000000000000000000..54e8ac2186e10d828b6a2e28aff9fa7571c388a4 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251538613.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251538617.jpg b/content/en/post/2022/figures/zh-cn_image_0000001251538617.jpg new file mode 100644 index 0000000000000000000000000000000000000000..97179c0a1aa8a711a45fd653bcf9175163e5be9b Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251538617.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251640179.jpg b/content/en/post/2022/figures/zh-cn_image_0000001251640179.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2c3bde43e77f33ea9c4f6510c1cdf0200af92a64 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251640179.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251640181.jpg b/content/en/post/2022/figures/zh-cn_image_0000001251640181.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ef8f45700bbc2fe67006be4cfc2d8a9e8007639a Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251640181.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251754525.gif b/content/en/post/2022/figures/zh-cn_image_0000001251754525.gif new file mode 100644 index 0000000000000000000000000000000000000000..a279e25c47f88ae57424695ffeddffecd8458f29 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251754525.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251760151.jpg b/content/en/post/2022/figures/zh-cn_image_0000001251760151.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1ff4ebfd5c9b880aca09f24608ff0fd4ebd88729 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251760151.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251760153.jpg b/content/en/post/2022/figures/zh-cn_image_0000001251760153.jpg new file mode 100644 index 0000000000000000000000000000000000000000..56b7a448855acb1f4adb50bec4f6c118707a1a49 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251760153.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251800179.gif b/content/en/post/2022/figures/zh-cn_image_0000001251800179.gif new file mode 100644 index 0000000000000000000000000000000000000000..a74e9002808e88e7bddd126586f61ac054c110cc Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251800179.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251800185.jpg b/content/en/post/2022/figures/zh-cn_image_0000001251800185.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5f58e43aa09bf7387a67c344dfe7496e87a5bd96 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251800185.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251800187.jpg b/content/en/post/2022/figures/zh-cn_image_0000001251800187.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3fde9e339e1aabcbe4304c411bb7edbc4bba9b72 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251800187.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251839693.png b/content/en/post/2022/figures/zh-cn_image_0000001251839693.png new file mode 100644 index 0000000000000000000000000000000000000000..5d39aa1e622cf0fbd4acee0b4e35fd01e08c03d8 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251839693.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251841009.jpg b/content/en/post/2022/figures/zh-cn_image_0000001251841009.jpg new file mode 100644 index 0000000000000000000000000000000000000000..118e670379797b46fd17863507ffcb89b1e9829c Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251841009.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251841849.png b/content/en/post/2022/figures/zh-cn_image_0000001251841849.png new file mode 100644 index 0000000000000000000000000000000000000000..da0837a0e01c172b4c7a867c7ef078757d5f4dec Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251841849.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251847329.png b/content/en/post/2022/figures/zh-cn_image_0000001251847329.png new file mode 100644 index 0000000000000000000000000000000000000000..87e57f270b9bd9442d10d4ac62b4c75554119cba Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251847329.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251848955.jpg b/content/en/post/2022/figures/zh-cn_image_0000001251848955.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e52cfb46a93a085d139243e7a4d81990d785775c Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251848955.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251848959.jpg b/content/en/post/2022/figures/zh-cn_image_0000001251848959.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f425f2126e2e3478d12b6b68e5cfdcf9d164c5e Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251848959.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251852313.png b/content/en/post/2022/figures/zh-cn_image_0000001251852313.png new file mode 100644 index 0000000000000000000000000000000000000000..2ff382c8db1c58558d7cc60201865a05ad5f53b8 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251852313.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251862655.gif b/content/en/post/2022/figures/zh-cn_image_0000001251862655.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251862655.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251862661.gif b/content/en/post/2022/figures/zh-cn_image_0000001251862661.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251862661.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251862669.gif b/content/en/post/2022/figures/zh-cn_image_0000001251862669.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251862669.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251894929.jpg b/content/en/post/2022/figures/zh-cn_image_0000001251894929.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c9b35c13e54358bc85b10783f9163cc001179641 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251894929.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251917015.jpg b/content/en/post/2022/figures/zh-cn_image_0000001251917015.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b88aa4c452e7ee95ed84cb0228af984a0030c0a8 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251917015.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251920351.png b/content/en/post/2022/figures/zh-cn_image_0000001251920351.png new file mode 100644 index 0000000000000000000000000000000000000000..6d2460cc06ae9f72080e754800680feb5c9899ad Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251920351.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251954519.gif b/content/en/post/2022/figures/zh-cn_image_0000001251954519.gif new file mode 100644 index 0000000000000000000000000000000000000000..409fca5e88397484714f9b9892ba1776f166220f Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251954519.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251960129.jpg b/content/en/post/2022/figures/zh-cn_image_0000001251960129.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4f550712f0a788e3bc9e133cf9fe82946402aab4 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251960129.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251960131.gif b/content/en/post/2022/figures/zh-cn_image_0000001251960131.gif new file mode 100644 index 0000000000000000000000000000000000000000..eb0ab38988bbb9750e7d3f79ec73c5ce456e1bde Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251960131.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251960133.gif b/content/en/post/2022/figures/zh-cn_image_0000001251960133.gif new file mode 100644 index 0000000000000000000000000000000000000000..05d560db8c8e933b1ef0a6b97e5f494f3e5882b0 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251960133.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251960135.gif b/content/en/post/2022/figures/zh-cn_image_0000001251960135.gif new file mode 100644 index 0000000000000000000000000000000000000000..dfc418872a748fb17d0935caa5492e94d7afb341 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251960135.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001251969031.jpg b/content/en/post/2022/figures/zh-cn_image_0000001251969031.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bdc1176d998aff1dad60e108312951a96e522feb Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001251969031.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252008911.jpg b/content/en/post/2022/figures/zh-cn_image_0000001252008911.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fd39b5b03365ff8aef55fb1e9b3f5cd11dcc8679 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252008911.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252009063.jpg b/content/en/post/2022/figures/zh-cn_image_0000001252009063.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c5f70d464d6f586efaa14aec019d04fc906b827e Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252009063.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252011743.jpg b/content/en/post/2022/figures/zh-cn_image_0000001252011743.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3f287b0c0453b163744545ae173e3bda1ffc2e50 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252011743.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252022603.gif b/content/en/post/2022/figures/zh-cn_image_0000001252022603.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252022603.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252022613.gif b/content/en/post/2022/figures/zh-cn_image_0000001252022613.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252022613.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252065761.gif b/content/en/post/2022/figures/zh-cn_image_0000001252065761.gif new file mode 100644 index 0000000000000000000000000000000000000000..16e890b20e4844d82ac516c3636100279a338f45 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252065761.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252089553.jpg b/content/en/post/2022/figures/zh-cn_image_0000001252089553.jpg new file mode 100644 index 0000000000000000000000000000000000000000..00bf030e77b27468c7716503a3dc6e226b46bcc0 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252089553.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252121009.png b/content/en/post/2022/figures/zh-cn_image_0000001252121009.png new file mode 100644 index 0000000000000000000000000000000000000000..fa0b36a2b947332196dccc17457f9b88d9dde7fb Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252121009.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252127325.png b/content/en/post/2022/figures/zh-cn_image_0000001252127325.png new file mode 100644 index 0000000000000000000000000000000000000000..5d6ef1abffc202469448466d450404cbab350105 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252127325.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252128947.jpg b/content/en/post/2022/figures/zh-cn_image_0000001252128947.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0926c62ee39836d310974a2fcd320bcc98c9d242 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252128947.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252129111.jpg b/content/en/post/2022/figures/zh-cn_image_0000001252129111.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5377e9910e91ab654dad433d24a144dc8c545b7a Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252129111.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252131781.jpg b/content/en/post/2022/figures/zh-cn_image_0000001252131781.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c18dee87cc5d4bfc66442620ae5ef365c380f748 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252131781.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252131783.jpg b/content/en/post/2022/figures/zh-cn_image_0000001252131783.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e451ddcc59c67f213fbadb0873b9658ae40dec50 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252131783.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252142637.gif b/content/en/post/2022/figures/zh-cn_image_0000001252142637.gif new file mode 100644 index 0000000000000000000000000000000000000000..70646e3aebed5ba53deea2b4bc3ffdf91805ed64 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252142637.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252142639.gif b/content/en/post/2022/figures/zh-cn_image_0000001252142639.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252142639.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252142643.gif b/content/en/post/2022/figures/zh-cn_image_0000001252142643.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252142643.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252142645.gif b/content/en/post/2022/figures/zh-cn_image_0000001252142645.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252142645.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252142649.gif b/content/en/post/2022/figures/zh-cn_image_0000001252142649.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252142649.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252142659.gif b/content/en/post/2022/figures/zh-cn_image_0000001252142659.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252142659.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252142663.gif b/content/en/post/2022/figures/zh-cn_image_0000001252142663.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252142663.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252197021.jpg b/content/en/post/2022/figures/zh-cn_image_0000001252197021.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bab2e9ebcbd4e0149c9c29dd89b5a47e3c278f08 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252197021.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252248517.jpg b/content/en/post/2022/figures/zh-cn_image_0000001252248517.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ec0921eb10ba4b47ce5bcbed2f37671684b91220 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252248517.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252248915.jpg b/content/en/post/2022/figures/zh-cn_image_0000001252248915.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30d56f5fd034312c2a6de097ea0da02872a62cde Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252248915.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252249073.jpg b/content/en/post/2022/figures/zh-cn_image_0000001252249073.jpg new file mode 100644 index 0000000000000000000000000000000000000000..32267eb3adccc4e8da8ba7ef5a6fee4dc73491fe Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252249073.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252252279.png b/content/en/post/2022/figures/zh-cn_image_0000001252252279.png new file mode 100644 index 0000000000000000000000000000000000000000..ee03260c1114017c098616ad6919496bf4c36eb0 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252252279.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252252281.png b/content/en/post/2022/figures/zh-cn_image_0000001252252281.png new file mode 100644 index 0000000000000000000000000000000000000000..5551f4b9e9fd8b94d27a67f072eca449d4e84757 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252252281.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252262611.gif b/content/en/post/2022/figures/zh-cn_image_0000001252262611.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252262611.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252262625.gif b/content/en/post/2022/figures/zh-cn_image_0000001252262625.gif new file mode 100644 index 0000000000000000000000000000000000000000..f1f4ef0c05460c220b09ccd897f0336e3e3265c2 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252262625.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252341007.gif b/content/en/post/2022/figures/zh-cn_image_0000001252341007.gif new file mode 100644 index 0000000000000000000000000000000000000000..10cf74ea0fd6c4f98f395574611710262cc2f51d Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252341007.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252343171.png b/content/en/post/2022/figures/zh-cn_image_0000001252343171.png new file mode 100644 index 0000000000000000000000000000000000000000..50f6b660a383f2190e3f0e0c1424825ab2d15cd5 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252343171.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252343211.png b/content/en/post/2022/figures/zh-cn_image_0000001252343211.png new file mode 100644 index 0000000000000000000000000000000000000000..df0575d8469a6813bfa359d5bf596a80933ccba5 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252343211.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252343303.png b/content/en/post/2022/figures/zh-cn_image_0000001252343303.png new file mode 100644 index 0000000000000000000000000000000000000000..550d3e815c93582cd76716d6e5f87ecdf5ca5cd7 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252343303.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252343507.png b/content/en/post/2022/figures/zh-cn_image_0000001252343507.png new file mode 100644 index 0000000000000000000000000000000000000000..7de7abad590c2246b95d3f559a7d0bfa1f291692 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252343507.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252412855.png b/content/en/post/2022/figures/zh-cn_image_0000001252412855.png new file mode 100644 index 0000000000000000000000000000000000000000..c42ff339d428983b28f0f19ed990bf743a457cc7 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252412855.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252463513.png b/content/en/post/2022/figures/zh-cn_image_0000001252463513.png new file mode 100644 index 0000000000000000000000000000000000000000..54dc20ba7b906ff980fd8c78029fc3475e0e33f6 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252463513.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252563289.png b/content/en/post/2022/figures/zh-cn_image_0000001252563289.png new file mode 100644 index 0000000000000000000000000000000000000000..b9b46d25da629f089c73bfbf6c85ae3cb13350de Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252563289.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252579733.jpg b/content/en/post/2022/figures/zh-cn_image_0000001252579733.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e8ece6ba0c196a037ef02c33657c65d6fb6c0f09 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252579733.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252700965.gif b/content/en/post/2022/figures/zh-cn_image_0000001252700965.gif new file mode 100644 index 0000000000000000000000000000000000000000..bf93b81bb2dcc230e81ff930c06b0d97b899c711 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252700965.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252703087.png b/content/en/post/2022/figures/zh-cn_image_0000001252703087.png new file mode 100644 index 0000000000000000000000000000000000000000..7191ba77b86241a7388983442ba811a84a12fc2b Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252703087.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252703155.png b/content/en/post/2022/figures/zh-cn_image_0000001252703155.png new file mode 100644 index 0000000000000000000000000000000000000000..1decdffa2c685fbae2f39fbc25b2b664524f46aa Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252703155.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252703473.png b/content/en/post/2022/figures/zh-cn_image_0000001252703473.png new file mode 100644 index 0000000000000000000000000000000000000000..23105e8c997e1127c35cf871d1ac0d235a95aed1 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252703473.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001252803745.png b/content/en/post/2022/figures/zh-cn_image_0000001252803745.png new file mode 100644 index 0000000000000000000000000000000000000000..4180157997ea0d2ec282f60a4a3d5e7a9b534ac3 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001252803745.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001253422853.png b/content/en/post/2022/figures/zh-cn_image_0000001253422853.png new file mode 100644 index 0000000000000000000000000000000000000000..3b10332be8f58e0c03af456e0604109ee5db4e2b Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001253422853.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001256862067.gif b/content/en/post/2022/figures/zh-cn_image_0000001256862067.gif new file mode 100644 index 0000000000000000000000000000000000000000..c04be5cb6f6d2e0a974aa0f9a9133f6435f93efb Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001256862067.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001256862995.jpg b/content/en/post/2022/figures/zh-cn_image_0000001256862995.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a49980daf88fbb5de8a7e155827e723ec0104c26 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001256862995.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001256862999.gif b/content/en/post/2022/figures/zh-cn_image_0000001256862999.gif new file mode 100644 index 0000000000000000000000000000000000000000..5118bcf791e22a98df4d4df0d8da0879de1bb2d5 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001256862999.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001256981999.gif b/content/en/post/2022/figures/zh-cn_image_0000001256981999.gif new file mode 100644 index 0000000000000000000000000000000000000000..6068b15a50a1f3cc0574ab51d66b4a7a6b379f45 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001256981999.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001256982939.png b/content/en/post/2022/figures/zh-cn_image_0000001256982939.png new file mode 100644 index 0000000000000000000000000000000000000000..04a7bdd493c1557410c903c0d58b22ae0930edb1 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001256982939.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001257063005.gif b/content/en/post/2022/figures/zh-cn_image_0000001257063005.gif new file mode 100644 index 0000000000000000000000000000000000000000..1d26cd06f041eafd518d7bc15a7e924913ad563e Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001257063005.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001257142015.gif b/content/en/post/2022/figures/zh-cn_image_0000001257142015.gif new file mode 100644 index 0000000000000000000000000000000000000000..f53c01bb97471dcd52f9d4571d9b4159c1739d55 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001257142015.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001257142943.jpg b/content/en/post/2022/figures/zh-cn_image_0000001257142943.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f6fcb791326728455ea10edffca6e3c63acfcc9 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001257142943.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001257142945.gif b/content/en/post/2022/figures/zh-cn_image_0000001257142945.gif new file mode 100644 index 0000000000000000000000000000000000000000..72bae1d09b9e4a5803363a6aebae0ed7718cfc82 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001257142945.gif differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001278996725.jpg b/content/en/post/2022/figures/zh-cn_image_0000001278996725.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b9cf3ce39a9d748ce506ff3cfda39c51cbc803c0 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001278996725.jpg differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001279274373.png b/content/en/post/2022/figures/zh-cn_image_0000001279274373.png new file mode 100644 index 0000000000000000000000000000000000000000..443c4327143c5225557e35a85c2d72252d7b0829 Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001279274373.png differ diff --git a/content/en/post/2022/figures/zh-cn_image_0000001279474617.png b/content/en/post/2022/figures/zh-cn_image_0000001279474617.png new file mode 100644 index 0000000000000000000000000000000000000000..6b1226a31b6dbc83050524e4c164c5608b415e0c Binary files /dev/null and b/content/en/post/2022/figures/zh-cn_image_0000001279474617.png differ diff --git "a/content/en/post/2022/figures/\345\233\276\347\211\2071.png" "b/content/en/post/2022/figures/\345\233\276\347\211\2071.png" new file mode 100644 index 0000000000000000000000000000000000000000..281105f5e1958b671e00ab2c8a925a98404deca2 Binary files /dev/null and "b/content/en/post/2022/figures/\345\233\276\347\211\2071.png" differ diff --git a/content/en/post/2022/openGauss-AI-Capability-Upgrade-Building-a-New-AI-Native-Database.md b/content/en/post/2022/openGauss-AI-Capability-Upgrade-Building-a-New-AI-Native-Database.md new file mode 100644 index 0000000000000000000000000000000000000000..49092fd0039e0d20c5743bb6509ee33a92fe0d58 --- /dev/null +++ b/content/en/post/2022/openGauss-AI-Capability-Upgrade-Building-a-New-AI-Native-Database.md @@ -0,0 +1,72 @@ ++++ + +title = "openGauss AI Capability Upgrade, Building a New AI-Native Database" + +date = "2022-03-15" + +tags = [" AI-Native Database"] + +archives = "2022-03" + +author = "Tianqing Wang" + +summary = "openGauss AI Capability Upgrade, Building a New AI-Native Database" + +img = "/en/post/2022/title/img16.png" + +times = "17:30" + ++++ + +# openGauss AI Capability Upgrade, Building a New AI-Native Database + + + +What will happen when databases are combined with AI? The answers may vary among different database vendors, open-source communities, and teachers and students. Although it is difficult to form a uniform accurate concept, their answers all point to the same direction. Since the first version was released in the open-source community, openGauss has continuously evolved and contributed code in this field. openGauss 3.0.0 continues in this vein. + +In this release, the openGauss AI capability is changed in the following ways: + +1. The AI4DB functions are integrated into the open-source openGauss database autonomous platform. +2. The AI4DB capabilities are refactored to run plug-ins as services. +3. The Prometheus ecosystem is supported. +4. New features such as slow SQL root cause analysis and time series forecast are added to optimize the existing AI capabilities. +5. DB4AI supports more algorithms. + +## **Upgrading the Native DB4AI Engine** + +In this release of openGauss, the native DB4AI engine further supports more machine learning algorithms, such as the non-linear kernel function of SVM and XGBoost. In addition, openGauss provides the Explain API to view model information. + +## **Supporting AI4DB Plug-ins as Services** + +The original openGauss AI4DB capability is an offline tool. It cannot monitor the database completely in the background or periodically detect database problems. Therefore, in this release, the openGauss implements the background monitoring service and periodically checks the database system status in the background. In this way, the autonomous database platform DBMind is formed. The diagnosis results are saved in offline calculation mode. Users can use software such as Grafana to visualize the results so that problems can be detected and root causes can be obtained in a timely manner. + +The running status of the openGauss database system needs to be periodically monitored in the background. Therefore, you need to connect to the monitoring platform to collect database monitoring metrics and perform offline computation. In this release, openGauss provides two types of exporters to interconnect with the Prometheus platform. The architecture is as follows: + +![](../figures/zh-cn_image_0000001235076358.jpg) + +openGauss-exporter is used to obtain monitoring metrics of the database system, and reprocessing-exporter is used to perform secondary processing on data stored in Prometheus. The data of the preceding two exporters can be periodically collected by Prometheus. The DBMind system periodically obtains time series data from Prometheus and performs parallel computing on the DBMind deployment machine. After the computing is complete, the computing result is stored in the meta-database. Then, you may obtain the diagnosis result from the metabase, and further, may perform visualization by configuring Grafana or the like. + +![](../figures/zh-cn_image_0000001278996725.jpg) + +The preceding figure shows an example of visualization using Grafana based on data in the meta-database. + +In this release, openGauss fully integrates the existing AI capabilities and redesigns them in plug-ins. For example, if you want to call the parameter tuning function to debug database parameters based on reinforcement learning, run the following command: + +gs\_dbmind component xtuner tune ... + +That is, the **gs\_dbmind** command is used to call all AI functions, and the **component** subcommand is used to call a specific AI function. You can run the following command to view the help information: + +gs\_dbmind component --help + +Based on the preceding design, if developers in the openGauss community want to contribute a certain database AI function, they only need to ensure that the API can be obtained by gs\_dbmind. In addition, the developed plug-ins can call all APIs provided by DBMind, for example, the data access interface \(DAI\) for obtaining data from Prometheus and the database access object \(DAO\) API for inserting data into the meta-database. + +## **Comprehensively Improving the AI4DB AI Capabilities** + +In this release, the openGauss upgrades the existing functions such as index recommendation and time series forecast. In addition, it provides root cause analysis and recommendation for slow SQL statements to help DBAs quickly identify slow SQL statements. Based on the monitored database running metrics, it uses the AI feature library identification algorithm to innovatively provide the causes and confidence of slow SQL statements, and provides optimization suggestions. + +## **Laying a Foundation for the Development of a Comprehensive Database AI Autonomous Platform** + +As mentioned above, openGauss 3.0.0 innovatively integrates historical AI capabilities and discards the burden left over from the historical R&D process, the innovative DBMind platform is service-oriented, offline, plug-in-based, and freely assembled. It is released with the database installation package. You can use tools such as Grafana to customize and visualize the diagnosis result. \(We will provide a Grafana example.\) + +This lays a foundation for us to further upgrade the DBMind platform in the future. It is estimated that openGauss will enrich more AI functions to the platform this year, separate the platform from the existing code repository, and provide a native web front-end and back-end display platform. In addition, the automatic recovery function is supported, enabling users to experience one-click and out-of-the-box database autonomous driving. + diff --git a/content/en/post/2022/openGauss-AI4DB-and-DB4AI.md b/content/en/post/2022/openGauss-AI4DB-and-DB4AI.md new file mode 100644 index 0000000000000000000000000000000000000000..d41eef5d668fbe944e192c68591f1f29b7bd089b --- /dev/null +++ b/content/en/post/2022/openGauss-AI4DB-and-DB4AI.md @@ -0,0 +1,113 @@ ++++ + +title = "openGauss AI4DB and DB4AI" + +date = "2021-01-19" + +tags = [ "openGauss AI4DB and DB4AI"] + +archives = "2021-01" + +author = "Tianqing Wang" + +summary = "openGauss AI4DB and DB4AI" + +img = "/en/post/2022/title/img9.png" + +times = "12:30" + ++++ + +# openGauss AI4DB and DB4AI + +The AI feature is one of the key features of openGauss. In the earlier versions, openGauss provided the open-source AI parameter self-tuning and slow SQL discovery functions, which attracted attention from developers and users. To further improve AI capabilities, new functions are added to the latest openGauss based on the AI4DB and DB4AI features. + +The AI4DB feature enables openGauss through AI technologies, simplifies database operation and management, and provides users with end-to-end self-O&M and self-optimization suites. In the new version, the **database intelligent index recommendation** and **database monitoring and exception detection** functions are added. The DB4AI feature provides the AI computing capability in the database. The built-in AI algorithm of the database provides users with inclusive AI capabilities. In the new version, the **machine learning algorithm framework DeepSQL** in the database is added. The following describes the foregoing new functions in detail. + +## 1. AI4DB + +- **Intelligent Database Index Recommendation** + + In a large-scale relational database, index design and optimization are critical to the execution efficiency of SQL statements. For a long time, database administrators often manually design and adjust indexes based on previous knowledge and experience. This consumes a lot of time and manpower, and manual design cannot ensure the optimal index. + + openGauss provides the intelligent index recommendation function. This function automates and standardizes the index design process and recommends the optimal index for a single query statement or workload, improving job efficiency and reducing O&M operations of database management personnel. The intelligent index recommendation function of the openGauss covers multiple application scenarios and includes the following features: + +- 1. Index recommendation for a single query statement + + This feature generates recommended indexes for a single query statement entered by a user based on the semantic information of the query statement and database statistics. + + You can use the **gs\_index\_advise** system function of openGauss to recommend single-column and combined indexes. A specific example is as follows. The recommendation result of the index includes the corresponding table name and column name. + + ![](../figures/zh-cn_image_0000001207280996.jpg) + +- 2. Virtual index + + This feature can simulate the creation of real indexes and avoid the time and space overhead required for creating real indexes. You can use the optimizer to evaluate the impact of virtual indexes on specified query statements. + + This feature provides a series of operations, including creating and deleting virtual indexes, and evaluating performance and storage space overhead. You can use the openGauss system functions to flexibly operate virtual indexes. Examples of some operations are as follows: + + - Use the **hypopg\_create\_index** system function to create a virtual index. The input parameter is the SQL statement for creating the index. + + ![](../figures/zh-cn_image_0000001251841009.jpg) + + - By executing EXPLAIN for a specific query statement, you can evaluate the index performance based on the execution plan provided by the optimizer. + + The execution plan before the virtual index is created is as follows: + + ![](../figures/zh-cn_image_0000001207121020.jpg) + + After the virtual index is created, the execution plan is changed as follows: + + ![](../figures/zh-cn_image_0000001206961046.jpg) + + By comparing the two execution plans, you can find that the index will reduce the execution time of the specified query statement. Based on this conclusion, you can create a real index. + + +- 3. Workload-based index recommendation + + With the workload consisting of multiple DML statements as the input, the algorithm can recommend a batch of indexes to be created. These indexes can be used to optimize the overall workload. The following figure shows the process of the algorithm. + + ![](../figures/zh-cn_image_0000001252121009.png) + + Based on the preceding two features, this feature compresses the workload, filters out a batch of typical SQL statements, and generates candidate indexes for each SQL statement using the index recommendation function of a single query statement. Finally, further filtering is performed by using the virtual index function, and the index that has the largest positive contribution to the workload is used as the output. + + **Database Metric Monitoring and Exception Detection** + + Database metrics can reflect the health status of the database. Abnormal user operations or database performance deterioration may cause changes in database metrics. Therefore, it is necessary to monitor database metrics. The benefits are as follows: + + (1) This helps you understand the running status of the database from multiple perspectives and better plan the database. + + (2) This helps users detect database exceptions and potential performance problems in advance and report the situation to users in a timely manner to avoid unnecessary loss. + + Anomaly-detection is an AI tool integrated into openGauss. It can be used to collect and predict database metrics, as well as monitor and diagnose exceptions. + + The following figure shows the anomaly-detection structure. + + ![](../figures/zh-cn_image_0000001207280998.jpg) + + This tool consists of an agent and a detector. Agent is a data collection module that collects database metric data and pushes the data to the detector. Detector is an exception detection module. It has three functions: 1. Collect data pushed by the agent and store the data locally. 2. Perform exception detection on the collected metric data. 3. Push exception information to users. + + Currently, the database indicators collected by the tool by default include IO\_Read, IO\_Write, CPU\_Usage, Memory\_Usage, and disk space occupied by the database. Based on the collected data, the tool predicts the change trend of indicators and detects exceptions to implement functions such as insufficient disk space warning, memory leakage warning, and CPU resource consumption warning, preventing unnecessary loss caused by database exceptions. + + Anomaly-detection provides functions such as one-click deployment, one-click startup or shutdown, and metric prediction, which is easy to use. In addition, you can quickly add new monitoring parameters or time series prediction algorithms based on service scenario requirements. + + +## 2. DB4AI + +The database management system can add, delete, modify, and query data records conveniently by constructing an ordered file organization structure. In the AI field, people use the computing power provided by computers to analyze and mine data. Data storage and computing are the key to data governance. + +In traditional scenarios, when a data user wants to analyze and train data stored in a database, the data user usually needs to extract the data from the storage system, cache the data to the memory, and then use a third-party package of Python, such as TensorFlow and scikit-learn, to perform data analysis or model training. This development process is not efficient. First, this process involves the Python language, third-party machine learning packages, and databases. The developed technology stack is fragmented. Second, performance of the process is not excellent. Data of a training model usually needs to be transmitted by using a network. When the data volume is large, a relatively large quantity of network transmission overheads are caused, and data localization calculation is not implemented. In addition, the technical standards of developers are often uneven, which cannot fully exploit the computing power of the CPU or GPU. In some data-sensitive fields, data extraction requires operations such as permission obtaining and anonymization, which further increases costs. Therefore, the AI computing capability is integrated into the database, and the computing power of the database is very advantageous. On one hand, data can be calculated locally. On the other hand, the optimization capability of the database can be used to select the optimal execution plan. Finally, you only need one SQL statement to implement faster model training and prediction than implementing algorithms by yourself. + +DeepSQL is compatible with the Apache MADlib ecosystem and supports more than 60 common algorithms, including regression algorithms \(such as linear regression, logistic regression, and random forest\), classification algorithms \(such as KNN\), and clustering algorithms \(such as K-means\). In addition to basic machine learning algorithms, graph-related algorithms are also included, such as algorithms about the shortest path and graph diameter. Also, it supports data processing methods \(such as PCA\), sparse vector data format, common statistical algorithms \(such as covariance and Pearson coefficient calculation\), training set and test set segmentation, and cross validation. + +In addition to the preceding algorithms obtained through compatibility with MADLib, DeepSQL also supports three common algorithms: prophet, GBDT, and XGBoost. + +The time series prediction algorithm prophet is implemented based on time series data decomposition and local Bayesian. It is an open-source algorithm provided by Facebook and is a practical time series prediction algorithm in engineering scenarios. Compared with other time series prediction algorithms, prophet is faster, more accurate, and more robust, the computation workload is much less than that of the RNN. + +GBDT and XGBoost belong to the Boosting algorithm that uses the regression tree to fit residuals. + +The GBDT algorithm uses the tree module in the MADlib framework to complete the algorithm. It inherits the style of the MADlib function API and uses input parameters to set the hyperparameters of the model. The algorithm supports regression and classification tasks. By default, the model is a regression model. Mean squared error \(MSE\) is used to calculate the residual of the previous base learner in each iteration. For the regression tree computation, the branching strategy is selected by minimizing the square error of each node. + +After the XGBoost algorithm is integrated, the gs\_boost module is implemented to provide SQL-like APIs which are compatible with the MADlib style and support classification and regression tasks. The gs\_xgboost module supports model hyperparameter selection and model evaluation through grid search. + +These are the latest open-source AI features of openGauss. Come and experience these features. If you have any comments or suggestions, feel free to contact us in the open source community. We'd love to hear your thoughts, and we will take this as the direction and motivation for improvement. We believe that with the joint efforts of developers and users, the convergence of openGauss and AI will be continuously strengthened to bring more intelligent and excellent services to users. diff --git a/content/en/post/2022/openGauss-Database-Performance-Optimization.md b/content/en/post/2022/openGauss-Database-Performance-Optimization.md new file mode 100644 index 0000000000000000000000000000000000000000..7b5a4e3cba6782dfc51e26f33896d3016c712c53 --- /dev/null +++ b/content/en/post/2022/openGauss-Database-Performance-Optimization.md @@ -0,0 +1,557 @@ ++++ + +title = "openGauss Database Performance Optimization" + +date = "2020-08-13" + +tags = [ "openGauss Database Performance Optimization"] + +archives = "2020-08" + +author = "Yansong LI" + +summary = "openGauss Database Performance Optimization" + +img = "/en/post/2022/title/img7.png" + +times = "12:30" + ++++ + +# openGauss Database Performance Optimization + +## Overview + +This document describes the key system-level optimization configurations required by the openGauss database to achieve optimal database performance on the openEuler OS based on the TaiShan server. + +## Hardware Specifications + +CPU: Kunpeng 920 \(Hi1620\) ARM AArch64 \(64 cores\) x 2 + +Memory: ≥ 512 GB + +Disk: NVMe SSD \(\> 1 TB\) x 4 + +NIC: 1822 10GE NICEthernet controller: Huawei Technologies Co., Ltd. Hi1822 Family \(4\*25GE\) \(rev 45\) + +## Software Specifications + +OS: openEuler 20.03 \(LTS\) + +Database: openGauss 1.0.0 + +Benchmark: benchmarksql-5.0 + +JDK: jdk1.8.0\_212 + +Ant: apache-ant-1.9.15 + +The following optimizes the database by configuring BIOS, operating system, file system, network, core binding, and constructing TPCC test data. - Third-party tool: JDK ant benchmark- Linux tool: htop iostat + +For details about how to install and use the benchmark htop iostat tool, see _Benchmark Usage_. \(https://opengauss.org/zh/blogs/blogs.html?post/optimize/opengauss-tpcc/\) + +## BIOS Settings + +Log in to a server management system, restart a server, enter the BIOS screen, modify BIOS settings, and restart the server. \(The server management system depends on the actual situation.\) + +- 1. After the machine self-check, startup options are displayed. + + ![](../figures/zh-cn_image_0000001251960129.jpg) + +- 2. Press **Del** to enter the BIOS screen. + + ![](../figures/zh-cn_image_0000001206760224.jpg) + +- 3. Enter the BIOS password. + + ![](../figures/zh-cn_image_0000001206920214.jpg) + +- 4. Restore to factory settings. + + Press **F9** to restore to the factory settings. It is recommended that you restore to the factory settings first because many default BIOS settings may have been changed. + +- **5. **Modify BIOS settings. + + The modification includes: + + ``` + # Choose BIOS > Advanced > MISC Config and set Support Smmu to Disabled. + # Choose BIOS > Advanced > MISC Config and set CPU Prefetching Configuration to Disabled. + # Choose BIOS > Advanced > Memory Config and set Die Interleaving to Disable. + ``` + + ![](../figures/zh-cn_image_0000001251640179.jpg)![](../figures/zh-cn_image_0000001251640181.jpg) + +- **6. **Save the BIOS settings and restart the server. + + Press **F10** to save the settings and exit. Restart the system. + + ![](../figures/zh-cn_image_0000001206760228.jpg) + + +## OS Configuration + +- Optimizing OS Configuration + + **irqbalance** disabled: If a GaussDB process and a client preempt CPU resources, the CPU usage is unbalanced. If the htop shows that some CPUs are overloaded and some are idle, check whether **irqbalance** is disabled. + + ![](../figures/zh-cn_image_0000001206760226.jpg) + + ``` + service irqbalance stop + echo 0 > /proc/sys/kernel/numa_balancing + echo 'never' > /sys/kernel/mm/transparent_hugepage/enabled + echo 'never' > /sys/kernel/mm/transparent_hugepage/defrag + echo none > /sys/block/nvme*n*/queue/scheduler ## Setting the I/O queue scheduling mechanism for NVMe drives + ``` + + +## File System Configuration + +- Change the block size of the XFS file system to 8 KB. + + \(1\) Check the existing block sizes of the mount points corresponding to the NVMe drives. Run the following command to check the NVMe drives that are mounted: + + ``` + df -h | grep nvme + /dev/nvme0n1 3.7T 2.6T 1.2T 69% /data1 + /dev/nvme1n1 3.7T 1.9T 1.8T 51% /data2 + /dev/nvme2n1 3.7T 2.2T 1.6T 59% /data3 + /dev/nvme3n1 3.7T 1.4T 2.3T 39% /data4 + ``` + + You can run the **xfs\_info** command to view information about the NVMe drives. + + xfs\_info /data1 + + ![](../figures/zh-cn_image_0000001251800179.gif) + + In the preceding figure, the block size is 8 KB and does not need to be changed. If the data block size is not 8 KB, back up and format the data. + + \(2\) Back up the data on the disk to be formatted. + + Back up the required data to other disks or machines as required. + + \(3\) Format the disk and set the block size to 8 KB. + + Take the **/dev/nvme0n1** disk and the **/data1** mount point as an example. The commands are as follows: + + ``` + umount /data1 + mkfs.xfs -b size=8192 /dev/nvme0n1 -f + mount /dev/nvme0n1 /data1 + ``` + + \(4\) Run the **xfs\_info** command again to check whether the block size is set correctly. + + +## Network Configuration + +- **1. **Multi-Queue Interrupt Settings + + As TaiShan servers have a large number of cores, NIC multi-queues need to be configured on servers and clients. The recommended configuration is as follows: 16 interrupt queues are configured for NICs on servers, and 48 interrupt queues are configured for NICs on clients. + + Multi-queue Interrupt Setting Tool \(1822-FW\) + + You can obtain the released Hi1822 NIC version from the following link: https://support.huawei.com/enterprise/en/intelligent-accelerator-components/in500-solution-pid-23507369/software. IN500 solution 5.1.0.SPC401 and later versions support multi-queues. + + - \(1\) Decompress **Hi1822-NIC-FW.zip**, go to the directory, and install hinicadm as user **root**. + + ![](../figures/zh-cn_image_0000001251960131.gif) + + - \(2\) Determine the NIC to which the currently connected physical port belongs. The network port and NIC name vary according to the hardware platform. In the following example, the private network port enp3s0 is used and belongs to the hinic0 NIC. + + ![](../figures/zh-cn_image_0000001251960133.gif)![](../figures/zh-cn_image_0000001206920220.gif) + + - \(3\) Go to the **config** directory and use the hinicconfig tool to configure the interrupt queue firmware configuration file. + + 64-queue configuration file: std\_sh\_4x25ge\_dpdk\_cfg\_template0.ini; + + 16-queue configuration file: std\_sh\_4x25ge\_nic\_cfg\_template0.ini; + + Set the number of queues for hinic0 to different values. \(The default value is **16** and it can be changed as needed.\) + + ./hinicconfig hinic0 -f std\_sh\_4x25ge\_dpdk\_cfg\_template0.ini + + Restart the OS for the modification to take effect. Run the **ethtool -l enp3s0** command to view the result. In the following figure, 32 is displayed. + + ![](../figures/zh-cn_image_0000001206760230.gif) + + Run the **ethtool -L enp3s0 combined 48** command to change the value of **combined**. \(The optimized value varies according to the platform and application. For the 128-core platform, the optimized value on the server is **16** and that on the client is **48**.\) + + +- **2. **Interrupt Tuning + + When the openGauss database is fully loaded \(the CPU usage is greater than 90%\), the CPU becomes the bottleneck. In this case, offload network slices to NICs. + + ``` + ethtool –K enp3s0 tso on + ethtool –K enp3s0 lro on + ethtool –K enp3s0 gro on + ethtool –K enp3s0 gso on + ``` + + Take the 1620 platform as an example. The NIC interrupts are bound to the last four cores on each NUMA node, and each core is bound to three interrupts. The core binding interrupt script is as follows. This script is called by gs\_preinstall during the openGauss installation. For details, see the product installation guide. + + ![](../figures/zh-cn_image_0000001251960135.gif) + + ``` + sh bind_net_irq.sh 16 + ``` + +- **3. **Confirming and Updating the NIC Firmware + + Check whether the firmware version of the private NIC in the current environment is 2.5.0.0. + + ``` + ethtool -i enp3s0 + driver: hinic + version: 2.3.2.11 + firmware-version: 2.5.0.0 + expansion-rom-version: + bus-info: 0000:03:00.0 + ``` + + If the version is 2.5.0.0, you are advised to replace it with 2.4.1.0 for better performance. + + NIC Firmware Update Procedure + + \(1\) Upload the NIC firmware driver to the server. The firmware file is **Hi1822\_nic\_prd\_1h\_4x25G.bin**. + + \(2\) Run the following command as user **root**: + + **hinicadm updatefw -i **__** -f **__ + + _Physical NIC device name_ indicates the NIC name in the system. For example, **hinic0** indicates the first NIC, and **hinic1** indicates the second NIC. For details about how to query the NIC name, see "Multi-Queue Interrupt Settings." For example: + + ``` + # hinicadm updatefw -i -f + Please do not remove driver or network device + Loading... + [>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] [100%] [\] + Loading firmware image succeed. + Please reboot OS to take firmware effect. + ``` + + \(3\) Restart the server and check that whether the firmware version of the private NIC is updated to 2.4.1.0. + + ``` + ethtool -i enp3s0 + driver: hinic + version: 2.3.2.11 + firmware-version: 2.4.1.0 + expansion-rom-version: + bus-info: 0000:03:00.0 + ``` + + The firmware version of the private NIC is successfully updated. + + +## Core Binding on the Database Server and Client + +Install the database by referring to the openGauss installation document. + +The general procedure is as follows: + +◾ Stop a database. + +◾ Modify **postgresql.conf** parameters. + +◾ Start the database in core binding mode by running the **numactl --interleave=all bin/gaussdb -D $\{DATA\_DIR\} --single\_node** command. + +◾ Start the benchmark in core binding mode by running the **numactl -C 0-19,32-51,64-83,96-115 ./runBenchmark.sh props.pg** command. + +Run the preceding command based on the core binding configuration and benchmark configuration file. Cores bound to the benchmark are different from cores bound to the database. + +- **1. **Core Binding Settings on the Server + + \(1\) During the running of service processes, the network interruption reported by the hardware causes frequent context switching, which severely affects the efficiency. Therefore, the network interruption and services must be bound to different cores. For details about the core binding for network interruption, see the previous section. + + \(2\) The thread pool mechanism is introduced in openGauss. When the database is started, the thread pool creates a specified number of threads to provide services. When a thread is created, it is bound to a core. Therefore, the core binding information of the NIC needs to be transferred through the GUC parameter, to facilitate core binding configuration during system running. The following figure shows the parameters when 128 cores are used. + + ![](../figures/7.png) + + Total number of threads = \(Number of CPUs – Number of CPUs processing the network\) x Number of threads per core \(7.25 is recommended\) = \(128 – 16\) x 7.25 = 812. The number of NUMA nodes is 4, and the number of cores for processing interrupts is 16. + + The following is an example of CPU binding for auxiliary allocation: + + ``` + numactl -C 0-27,32-59,64-91,96-123 gaussdb --single_node -D {DATA_DIR} -p {PORT} & + ``` + + Or + + ``` + numactl --interleave=all gaussdb --single_node -D {DATA_DIR} -p {PORT} & + ``` + +- **2. **Server Parameter Setting + + The **- advance\_xlog\_file\_num = 10** parameter is added to the **postgresql.conf** file. + + This parameter indicates that the background thread BackgroundWALWriter periodically checks and initializes the next 10 XLogs in advance to avoid initializing XLogs only when transactions are committed, reducing the transaction commit delay. This parameter is valid only in the performance pressure test. Generally, you do not need to set this parameter. The default value is **0**, indicating that no initialization is performed in advance. - numa\_distribute\_mode = 'all' + + This parameter can be set to **all** or **none**. The value **all** indicates that NUMA optimization is enabled. Working threads and corresponding PGPROC and WALInsertlock are grouped and bound to corresponding NUMA nodes to reduce CPU remote memory access on key paths. The default value is **none**, indicating that the NUMA distribution feature is disabled. It is used only when multiple NUMA nodes are involved and the cost of remote fetch is obviously higher than that of local fetch. You are advised to enable this function during performance pressure tests. + + **thread\_pool\_attr** configuration: + + thread\_pool\_attr = '812,4,\(cpubind: 0-27,32-59,64-91,96-123\)' + + Parameter description: + + ``` + max_connections = 4096 + allow_concurrent_tuple_update = true + audit_enabled = off + checkpoint_segments = 1024 + checkpoint_timeout = 15min + cstore_buffers = 16MB + enable_alarm = off + enable_codegen = false + enable_data_replicate = off + full_page_writes = on + max_files_per_process = 100000 + max_prepared_transactions = 2048 + shared_buffers = 350GB + use_workload_manager = off + wal_buffers = 1GB + work_mem = 1MB + log_min_messages = FATAL + transaction_isolation = 'read committed' + default_transaction_isolation = 'read committed' + synchronous_commit = on + fsync = on + maintenance_work_mem = 2GB + vacuum_cost_limit = 2000 + autovacuum = on + autovacuum_mode = vacuum + autovacuum_max_workers = 5 + autovacuum_naptime = 20s + autovacuum_vacuum_cost_delay = 10 + xloginsert_locks = 48 + update_lockwait_timeout = 20min + + enable_mergejoin = off + enable_nestloop = off + enable_hashjoin = off + enable_bitmapscan = on + enable_material = off + + wal_log_hints = off + log_duration = off + checkpoint_timeout = 15min + autovacuum_vacuum_scale_factor = 0.1 + autovacuum_analyze_scale_factor = 0.02 + enable_save_datachanged_timestamp = false + + log_timezone = 'PRC' + timezone = 'PRC' + lc_messages = 'C' + lc_monetary = 'C' + lc_numeric = 'C' + lc_time = 'C' + + enable_thread_pool = on + thread_pool_attr = '812,4,(cpubind:0-27,32-59,64-91,96-123)' + enable_double_write = off + enable_incremental_checkpoint = on + enable_opfusion = on + advance_xlog_file_num = 10 + numa_distribute_mode = 'all' + + track_activities = off + enable_instr_track_wait = off + enable_instr_rt_percentile = off + track_counts = on + track_sql_count = off + enable_instr_cpu_timer = off + + plog_merge_age = 0 + session_timeout = 0 + + enable_instance_metric_persistent = off + enable_logical_io_statistics = off + enable_page_lsn_check = off + enable_user_metric_persistent = off + enable_xlog_prune = off + + enable_resource_track = off + instr_unique_sql_count=0 + enable_beta_opfusion=on + enable_beta_nestloop_fusion=on + ``` + + +- **3. **Configuring Core Binding for the TPC-C Client + + The client uses numactl to bind the client to cores except the NIC. The following figure uses a 128-core environment as an example. A total of 80 cores are used to process service logic, and the remaining 48 cores are used to process network interruption. + + ![](../figures/zh-cn_image_0000001207080190.gif) + + The corresponding tpmC program is as follows: + + ``` + numactl -C 0-19,32-51,64-83,96-115 ./runBenchmark.sh props.pg + ``` + + Other cores are used to process network interruptions. + + +## Constructing TPC-C Initial Data + +- **1. **Modify benchmark configurations. + + Copy **props.pg** and rename it **props.opengauss.1000w**. Edit the file and replace the following configuration in the file: + + ``` + cp props.pg props.opengauss.1000w + vim props.opengauss.1000w + db=postgres + driver=org.postgresql.Driver + // Modify the connection string, including the IP address, port number, and database. + conn=jdbc:postgresql://ip:port/tpcc1000?prepareThreshold=1&batchMode=on&fetchsize=10 + // Set the user name and password for logging in to the database. + user=user + password=****** + + warehouses=1000 + loadWorkers=200 + + // Set the maximum number of concurrent tasks, which is the same as the maximum number of work tasks on the server. + terminals=812 + //To run specified transactions per terminal- runMins must equal zero + runTxnsPerTerminal=0 + //To run for specified minutes- runTxnsPerTerminal must equal zero + runMins=5 + //Number of total transactions per minute + limitTxnsPerMin=0 + + //Set to true to run in 4.x compatible mode. Set to false to use the + //entire configured database evenly. + terminalWarehouseFixed=false + + //The following five values must add up to 100 + //The default percentages of 45, 43, 4, 4 & 4 match the TPC-C spec + newOrderWeight=45 + paymentWeight=43 + orderStatusWeight=4 + deliveryWeight=4 + stockLevelWeight=4 + + // Directory name to create for collecting detailed result data. + // Comment this out to suppress. + resultDirectory=my_result_%tY-%tm-%td_%tH%tM%tS + osCollectorScript=./misc/os_collector_linux.py + osCollectorInterval=1 + // Collect OS load information. + //osCollectorSSHAddr=osuer@10.44.133.78 + //osCollectorDevices=net_enp3s0 blk_nvme0n1 blk_nvme1n1 blk_nvme2n1 blk_nvme3n1 + ``` + +- **2. **Prepare for importing TPC-C data. + + \(1\) Replace the **tableCreats.sql** file. + + Download the **tableCreates.sql** file \(at https://blog.opengauss.org/zh/post/optimize/images/tableCreates.sql\). Use this file to replace the corresponding file in **benchmarksql-5.0/run/sql.common/** of the benchmark SQL. + + The file is modified as follows: + + ◾ Two tablespaces are added. + + ``` + CREATE TABLESPACE example2 relative location 'tablespace2'; + CREATE TABLESPACE example3 relative location 'tablespace3'; + ``` + + ◾ The **bmsql\_hist\_id\_seq** sequence is deleted. + + ◾ The FACTOR attribute is added to each table. + + ``` + create table bmsql_stock ( + s_w_id integer not null, + ..... + s_dist_10 char(24) + ) WITH (FILLFACTOR=80) tablespace example3; + ``` + + \(2\) Modify the **indexCreates.sql** file. + + Modify the **run/sql.common/indexCreates.sql** file. + + ![](../figures/zh-cn_image_0000001207240170.gif) + + Modify the content in the red box in the preceding figure as follows: + + ![](../figures/zh-cn_image_0000001206920224.gif) + + Add the content in red in the following figure to the file so that the data can be automatically generated in different data tablespaces when the benchmark tool automatically generates data. If the content is not added, modify the data in the database after the benchmark tool generates data for disk division. + + ![](../figures/zh-cn_image_0000001251800185.jpg) + + \(3\) Modify the **runDatabaseBuild.sh** file. Modify the content in the following figure to avoid unsupported foreign keys during data generation. + + ![](../figures/zh-cn_image_0000001251800187.jpg) + +- **3. **Import data. + + Execute **runDatabaseBuild.sh** to import data. + +- **4. **Back up data. + + To facilitate multiple tests and reduce the time for importing data, you can back up the exported data. A common method is to stop the database and copy the entire data directory. The reference script for restoration is as follows: + + ``` + #!/bin/bash + rm -rf /ssd/omm108/gaussdata + rm -rf /usr1/omm108dir/tablespace2 + rm -rf /usr2/omm108dir/tablespace3 + rm -rf /usr3/omm108dir/pg_xlog + cp -rf /ssd/omm108/gaussdatabf/gaussdata /ssd/omm108/ & + job0=$! + cp -rf /usr1/omm108dir/tablespace2bf/tablespace2 /usr1/omm108dir/ & + job1=$! + cp -rf /usr2/omm108dir/tablespace3bf/tablespace3 /usr2/omm108dir/ & + job2=$! + cp -rf /usr3/omm108dir/pg_xlogbf/pg_xlog /usr3/omm108dir/ & + job3=$! + wait $job1 $job2 $job3 $job0 + ``` + +- **5. **Partition data disks. + + During the performance test, data needs to be distributed to different storage media to increase the I/O throughput. The data can be distributed to the four NVMe drives on the server. Place the **pg\_xlog**, **tablespace2**, and **tablespace3** directories on the other three NVMe drives and provide the soft link pointing to the actual location in the original location. **pg\_xlog** is in the database directory, and **tablespace2** and **tablespace3** are in the **pg\_location** directory. For example, run the following commands to partition **tablespace2**: + + ``` + mv $DATA_DIR/pg_location/tablespace2 $TABSPACE2_DIR/tablespace2 + cd $DATA_DIR/pg_location/ + ln -svf $TABSPACE2_DIR/tablespace2 ./ + ``` + +- **6. **Run the TPC-C program. + + ``` + numactl –C 0-19,32-51,64-83,96-115 ./runBenchmark.sh props.opengauss.1000w + ``` + +- **7. **Monitor performance. + + Use htop to monitor the CPU usage of the database server and TPC-C client. In the extreme performance test, the CPU usage of each service is greater than 90%. If the CPU usage does not meet the requirement, the core binding mode may be incorrect and needs to be adjusted. + + ![](../figures/zh-cn_image_0000001251760151.jpg) + + In the preceding figure, the CPU in the yellow box is used to process network interruption. + +- **8. **View the monitoring status after tuning. + + The htop state after tuning is reliable. + + ![](../figures/zh-cn_image_0000001251760153.jpg) + + Database tuning is a tedious task. You need to continuously modify configurations, run TPC-C, and perform commissioning to achieve the optimal performance configuration. + + TPC-C running result: + + ![](../figures/zh-cn_image_0000001206760240.gif) + + diff --git a/content/en/post/2022/openGauss-Log-Consensus-Framework.md b/content/en/post/2022/openGauss-Log-Consensus-Framework.md new file mode 100644 index 0000000000000000000000000000000000000000..caa2831f64a67a426c702229aac1b6e4f26c07d6 --- /dev/null +++ b/content/en/post/2022/openGauss-Log-Consensus-Framework.md @@ -0,0 +1,230 @@ ++++ + +title = "openGauss Log Consensus Framework" + +date = "2021-09-29" + +tags = [ "openGauss Log Consensus Framework"] + +archives = "2021-09" + +author = "Xilin Hu" + +summary = "openGauss Log Consensus Framework" + +img = "/en/post/2022/title/img9.png" + +time = "12:30" + ++++ + +# openGauss Log Consensus Framework + +The distributed consistency algorithm is a basic problem of a distributed system. What needs to be resolved is how a distributed system achieves strong consistency on a value \(resolution\), so as to resolve the high availability problem of the system. Paxos is the most important distributed consistency algorithm, and many people use it as a synonym of distributed consistency protocols. + +The Paxos theory has been put forward for many years and products using Paxos and its variant protocols are emerging one after another. However, there are few industrial-grade third-party independent libraries and open-source projects. Common open-source products that refer to the Paxos protocol include ZooKeeper and etcd. The protocol does not support high-throughput state machine replication and does not provide an independent third-party library for other systems to quickly access. + +Therefore, the DCF feature is designed and implemented to support the distributed strong consistency scenario involved in openGauss. + +## 1 What is DCF? + +Its full name is distributed consensus framework. Typical algorithms for resolving distributed consistency problems are Paxos and Raft. DCF implements the Paxos algorithm. DCF provides capabilities such as log replication and cluster HA. It supports multiple types of nodes based on the Paxos protocol and the node roles can be adjusted as required. Log replication supports dynamic traffic adjustment, minority forcible startup, and automatic primary selection. + +DCF is a high-performance, highly mature, reliable, scalable, and easy-to-use independent basic library. Other systems can easily interconnect with DCF through APIs to obtain the strong consistency, high availability, and automatic disaster recovery capabilities provided by the Paxos algorithm. + +![](../figures/图片1.png) + +As shown in the preceding figure, DCF consists of the algorithm module, storage module, communication module, and service layer. + +- Algorithm module: + + The algorithm module is implemented based on the Multi-Paxos protocol. Based on the service scenarios, and requirements for high performance and ecosystem, DCF has made many function extensions and performance optimization to enrich the functions compared with the basic Multi-Paxos protocol, and the performance is significantly improved in multiple deployment scenarios. It mainly includes a leader election module, a log replication module, a metadata module, and a cluster management module. + +- Storage module: + + For specific service scenarios and optimal performance, DCF extracts a set of public interfaces for log storage and implements a default high-performance storage module. If you have specific scenarios or requirements for optimal performance and cost, you can connect the existing storage system to the log storage interface of DCF to meet specific requirements. This is one of the advantages of DCF as a third-party independent library. + +- Communication module: + + The communication module is implemented based on the message exchange component \(MEC\), provides the communication capability between DCF component instances, and provides an asynchronous event processing framework. The main functions are as follows: multiple extensible communication protocols, unicast, broadcast, and loopback sending APIs, asynchronous message processing framework, multi-channel mechanism, multi-priority queues, compression, and batch sending. + + +- Service layer: + + The service layer is the basis for driving the running of the entire DCF and provides various basic services required for program running, such as lock, asynchronous task scheduling, thread pool service, and timer capability. + + +## 2 What Can DCF Do? + +- 2.1 Adding and Deleting Nodes Online and Transferring the Leader Capability Online + + Based on the standard multi-paxos, DCF can add or delete nodes online and transfer the leader capability to other nodes online. This is more suitable for a wide range of service scenarios and helps build a development ecosystem. + +- 2.2 Priority-based Primary Selection and Policy-based Majority + + **Policy-based majority:** In the classic Paxos theory, data can be submitted after the majority faction reaches an agreement. However, the majority faction is not specific and cannot ensure that one or more nodes can obtain complete data. In actual applications, nodes that are geographically close to each other usually have strongly consistent data. The nodes that are far away from each other are always in the non-strongly consistent state and when city-level disaster recovery occurs, they cannot be activated as primary nodes. The policy-based majority capability allows users to dynamically configure one or more nodes to ensure data consistency. When a disaster recovery requirement occurs, the node can be activated as the primary node immediately. + + **Priority-based primary selection:** You can specify the priority of each node. DCF selects the primary node based on the specified priority. DCF activates the node with a lower priority only when all nodes with a higher priority are unavailable. + +- 2.3 Diversified Node Roles + + In addition to the typical leader, follow, and candidate roles, DCF provides custom roles, such as the passive role\(with logs and data, but without the right to be elected or participate in the majority voting\),and the log role \(with logs and the right to participate in the majority voting, but without data or the right to be elected\). With the support of these node roles, DCF supports multi-cluster deployment modes, such as synchronous deployment and synchronous/asynchronous hybrid deployment. + +- 2.4 Batch & Pipeline + + Batch: DCF supports multi-level batch operations, including: \(1\) Combine multiple logs into a single message for sending. \(2\) Combine multiple logs and writes them to disks. \(3\) Combine and replicate multiple logs. Batch can effectively reduce the extra loss caused by the message granularity and improve the throughput. + + Pipeline: Before a result of a previous message is returned, the message is concurrently sent to the corresponding node. By increasing the number of concurrent messages \(pipelines\), the delay of a single concurrent request can be effectively reduced and the performance can be improved. DCF uses the asynchronous mode in multiple stages, such as log persistence, network transmission, and log replication, to maximize the pipeline performance. + +- 2.5 Efficient Flow Control Algorithm + + Batching and pipelining can improve the throughput and performance of the entire system. However, if the batch size is too large, the delay of a single request is too long. As a result, the number of concurrent requests is too large, affecting the throughput and request delay. Therefore, DCF designs a set of efficient and adaptive flow control algorithms, automatically detects parameters such as the network bandwidth, network sending delay, and number of concurrent requests, and adjusts batch and pipeline parameters to control service traffic injection. + + +The flow control algorithm process is as follows: + +![](../figures/26-openGauss-Log-Consensus-Framework.png) + +The core algorithm process is as follows: + +1. The DCF primary node periodically samples and calculates consensus information, including the end-to-end consensus latency, end-to-end consensus log bandwidth, and overall log playback bandwidth. +2. The primary node obtains the performance change trend based on the sampling result and historical result, adjusts the control direction and step based on the historical control value and change trend, and calculates a new control value for better performance. +3. After the control period expires, the control value is updated. +4. The control value is continuously applied to service traffic to control the frequency of service traffic injection. + +DCF will continue to evolve in scenarios such as data communication, multiple log streams, and parallel large-capacity replication to provide users with efficient, reliable, and easy-to-manage log multi-copy replication and backup capabilities, meeting users' requirements for database disaster recovery and high availability. + +## 3 How Do We Use DCF? + +Assume that there are three nodes in the cluster and their IP addresses are 192.168.0.11, 192.168.0.12, and 192.168.0.13. + +The node IDs are 1, 2, and 3, and the node roles are LEADER, FOLLOWER, and FOLLOWER. + +To use DCF, set **enable\_dcf** to **on** during FusionSphere OpenStack OM installation and deployment. This parameter disabled by default. For example: + +Obtain the XML file template from **script/gspylib/etc/conf/centralized/cluster\_config\_template\_HA.xml**. + +The following values are examples and can be replaced as required. Each line is described with a comment. + +``` + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +- 3.1 Querying the Cluster Status After the Installation Is Complete + + Use **gs\_ctl** to query the cluster status. + + ``` + # gs_ctl query –D + # gs_ctl query -D /nvme0/gaussdb/cluster/nvme0/dn1 + ``` + + ![](../figures/zh-cn_image_0000001251920351.png) + + In the preceding information, **dcf\_replication\_info** indicates the DCF information of the current node. + + **role**: role of the current node. The value can be **LEADER**, **FOLLOWER**, **LOGGER**, **PASSIVE**, **PRE\_CANDICATE**, **CANDIDATE**, or **UNKNOW**. The preceding figure shows that the current node is a leader node. + + **term**: election term. + + **run\_mode**: DCF running mode. The value **0** indicates that the automatic election mode is enabled, and the value **2** indicates that the automatic election mode is disabled. + + **work\_mode**: DCF working mode. + + **hb\_interval**: heartbeat interval between DCF nodes, in milliseconds. + + **elc\_timeout**: DCF election timeout period, in milliseconds. + + **applied\_index**: log location that is applied to the state machine. + + **commit\_index**: log location that has been saved by most DCF nodes. Logs before **commit\_index** have been made persistent. + + **first\_index**: location of the first log saved on the DCF node. This location is moved backward when the DN calls **dcf\_truncate**. The previous logs will be cleared. + + **last\_index**: location of the last log saved by the DCF node. This log location contains the logs that are stored in the memory of the DCF node but are not made persistent. Therefore, last\_index ≥ commit\_index. + + **cluster\_min\_apply\_idx**: location of the smallest applied log in the cluster. + + **leader\_id**: leader node ID. + + **leader\_ip**: IP address of the leader node. + + **leader\_port**: port of the leader node, for DCF internal use. + + **nodes**: information about other nodes in the cluster. + +- 3.2 Online Cluster Scale Adjustment + + To add a copy online, run the following command: + + ``` + # gs_ctl member --opration=add --nodeid= --ip= --port= -D + ``` + + To reduce the number of copies online, run the following command: + + ``` + # gs_ctl member --operation=remove --nodeid= -D + ``` + + If the cluster is normal, a single copy can be deleted within 5 minutes. + +- 3.3 Minority Forcible Startup + + In the majority fault scenario, no agreement can be reached based on the normal Paxos protocol. As a result, the system cannot continue to provide services. In this case, minority forcible startup is required to provide emergency services. + + Run the following command: + + ``` + # cm_ctl setrunmode –n -D --xmode=minority --votenum= + ``` + + In the three-copy cluster scenario, if two copies are faulty, data can be committed with one copy. + +- 3.4 Switchover + + Switchover between the primary and standby database instances is supported in one-primary and multiple-standby deployment mode to implement switchover between AZs. Switchover is performed for maintenance. Before a switchover, ensure that the cluster instances are running properly, all services are stopped, and the **pgxc\_get\_senders\_catchup\_time\(\)** view shows no ongoing catchup between the primary and standby nodes. + + For example, run the following command to switch the standby node to the primary node: + + ``` + # cm_ctl switchover –n -D + ``` + +- 3.5 Standby Node Rebuild + + The full build is supported in primary/standby mode. After receiving a full build request, the primary DN is blocked from reclaiming DCF logs, and the standby DN replicates Xlogs and data files from the primary DN. After the kernel of the standby DN is started, DCF starts to replicate log points. + + The following is an example: + + ``` + # gs_ctl build –b full –Z datanode –D + ``` + + The open-source DCF feature is another exploration of openGauss in the distributed field and another substantial contribution to open-source technologies. openGauss has been committed to promoting in-depth innovation of database technologies, increasing investment in basic database research and database theory innovation, fully opening up top-notch technical capabilities, and working with developers around the world to promote the innovation and development of database production, learning, and research. + + diff --git a/content/en/post/2022/openGauss-Supports-SM3-and-SM4-Algorithms.md b/content/en/post/2022/openGauss-Supports-SM3-and-SM4-Algorithms.md new file mode 100644 index 0000000000000000000000000000000000000000..05a883870bbf3c21afd685564c363c781863667d --- /dev/null +++ b/content/en/post/2022/openGauss-Supports-SM3-and-SM4-Algorithms.md @@ -0,0 +1,150 @@ ++++ + +title = "openGauss Supports SM3 and SM4 Algorithms" + +date = "2021-11-15" + +tags = ["openGauss Supports SM3 and SM4 Algorithms"] + +archives = "2021-11" + +author = "Xin Dou" + +summary = "openGauss Community Developer Guide" + +img = "/en/post/2022/title/img16.png" + +times = "17:30" + ++++ + +# openGauss Supports SM3 and SM4 Algorithms + +## 1. Introduction to the Chinese Cryptographic Algorithms + +Chinese cryptographic algorithms are Chinese algorithms issued by the State Cryptography Administration Office of Security Commercial Code Administration \(OSCCA\). Common algorithms include SM1, SM2, SM3, and SM4. The key length and block length are both 128 bits. To meet bank customers' requirements for database security capabilities, openGauss 2.0.0 and later versions support Chinese cryptographic algorithms to enhance enterprise-level security capabilities of databases and improve product security competitiveness, including the SM3 algorithm \(http://www.gmbz.org.cn/main/viewfile/20180108023812835219.html\) for user authentication, and the SM4 algorithm for data encryption and decryption \(http://www.gmbz.org.cn/main/viewfile/20180108015408199368.html\) + +## 2. SM3 Algorithm – User Authentication + +- 2.1 Usage + +openGauss supports four user authentication methods, which are determined by the **password\_encryption\_type** parameter in the **postgresql.conf** file. The following table lists the mapping between authentication methods and **password\_encryption\_type**. + +``` +| Authentication Method | Parameter | +| ---------- | -------------------------- | +| md5 | password_encryption_type=0 | +| sha256+md5 | password_encryption_type=1 | +| sha256 | password_encryption_type=2 | +| sm3 | password_encryption_type=3 | +``` + +The SM3 algorithm supports three connection modes: gsql, JDBC, and ODBC. + +To create a user supporting SM3 authentication, perform the following steps: + +\(1\) Set **password\_encryption\_type** to **3** in the **postgresql.conf** file and restart the database for the parameter to take effect. Then, the SM3 algorithm will be used to encrypt plaintext passwords for newly created users. + +![](../figures/zh-cn_image_0000001252703087.png) + +\(2\) Create a user. + +In the following example, a user **test** is created. You can view the encryption type during user creation in the **rolpassword** field of the **pg\_authid** system catalog. The following figure shows that the SM3 algorithm is used for encryption. + +![](../figures/zh-cn_image_0000001252343171.png)\(3\) In the **pg\_hba.conf** file, set the authentication method to SM3. + +![](../figures/zh-cn_image_0000001252703155.png) + +In this case, the **test** user can pass the authentication through remote login. + +![](../figures/zh-cn_image_0000001252343211.png) + +A user created by using the SM3 encryption algorithm can pass the authentication only when both the encryption algorithm and authentication method are SM3. + +For SM3 users, when JDBC is used for remote connection, you need to manually download the **bcprov-jdk15on** JAR package and import it to the application. + +\[Download Link\] \(https://mvnrepository.com/artifact/org.bouncycastle/bcprov-jdk15on/1.68\) + +The procedure for creating a user by using other authentication methods is similar to that for creating a user by using SM3 authentication. + +- 2.2 Implementation Principle + + openGauss uses the RFC 5802 password authentication solution. + + - User key generation + + The following figure shows the RFC 5802 key derivation process. + + ![](../figures/zh-cn_image_0000001252343303.png) + + ``` + SaltedPassword := PBKDF2 (password, salt, i) + ClientKey := HMAC(SaltedPassword, "Client Key") + StoredKey := Hash(ClientKey) + ``` + + StoredKey and ServerKey are stored on the server. + + 1\) The StoredKey is used to authenticate the client. + + The server authenticates the client by performing the exclusive OR operation on the ClientSignature and ClientProof sent by the client to obtain the ClientKey, performing the hash operation on the ClientKey, and comparing the obtained value with the StoredKey. If they are the same, the client passes the authentication. + + 2\) ServerKey is used to identify the client + + Similarly, the client authenticates the server by comparing ServerSignature with the value sent by the server. If they are the same, the client authenticates the server. + + 3\) During the authentication, the server can calculate the ClientKey. After the authentication is complete, the ClientKey is discarded and does not need to be stored. + + To ensure legal login, you must obtain the Password, SaltedPassword, or ClientKey. If the StoryKey and ServerKey are disclosed, illegal login may occur. + + - Authentication process + + The following figure shows the standard RFC 5802 password authentication process. + + ![](../figures/320.png) + + 1. The client sends the username to the server. + + 2. The server returns the AuthMessage and calculated ServerSignature to the client. + + 3. After receiving the message, the client uses the salt and iteration count in AuthMessage to calculate SaltedPassword based on the Password, and then calculates all lower-layer keys. The client checks whether the values of HMAC\(ServerKey, AuthMessage\) equals ServerSignature. If they are equal, the client authenticates the server. + + 4. The client sends the calculated ClientProof to the server. + + 5. The server uses the saved StoredKey and AuthMessage to calculate the HMAC, performs the exclusive OR operation on the HMAC and the ClientProof received from the client to obtain the ClientKey, and then performs the hash operation on the ClientKey to check whether the ClientKey is the same as the saved StoredKey. If they are the same, the client passes the authentication. + + After receiving the request from the client, the server interacts with the client for authentication based on the authentication method configured in the **pg\_hba.conf** file. + + +## 3. SM4 Algorithm – Data Encryption and Decryption + +Chinese cryptographic algorithm SM4 can be used to encrypt or decrypt data in a column of a table. The newly added encryption and decryption functions gs\_decrypt and gs\_decrypt are compatible with gs\_encrypt\_aes128 and sgs\_decrypt\_aes128, and supports encryption and decryption using AES128 and SM4. The SM4 algorithm invokes the EVP\_sm4\_cbc\(\) interface of OpenSSL. + +The gs\_encrypt\_aes128 and gs\_decrypt\_aes128 functions are described as follows: + +- gs\_encrypt\_aes128\(encryptstr, keystr\) + +​ Description: Encrypts **encryptstr** strings using **keystr** as the key and returns encrypted strings. + +- gs\_decrypt\_aes128\(decryptstr,keystr\) + +​ Description: Decrypts **decryptstr** strings using **keystr** as the key and returns decrypted strings. + +![](../figures/zh-cn_image_0000001207863420.png)The gs\_encrypt and gs\_decrypt functions are described as follows: + +- gs\_encrypt\(encryptstr, keystr, algorithm\) + +​ Description: Encrypts **encryptstr** strings using **keystr** as the key and returns encrypted strings. The options are **sm4** and **aes128**. + +- gs\_decrypt\_aes128\(decryptstr,keystr, algorithm\) + +​ Description: Decrypts **decryptstr** strings using **keystr** as the key and returns the decrypted strings. The options are **sm4** and **aes128**. + +![](../figures/zh-cn_image_0000001252343507.png)The following figures show how to encrypt and decrypt table data using the SM4 algorithm. + +![](../figures/zh-cn_image_0000001252463513.png) + +![](../figures/zh-cn_image_0000001252703473.png) + +openGauss supports SM3 for user authentication and SM4 for data encryption and decryption. + diff --git a/content/en/post/2022/title/img1.png b/content/en/post/2022/title/img1.png new file mode 100644 index 0000000000000000000000000000000000000000..2af578504062e5fa7a7aaf7e1c2014531e51e9c2 Binary files /dev/null and b/content/en/post/2022/title/img1.png differ diff --git a/content/en/post/2022/title/img10.png b/content/en/post/2022/title/img10.png new file mode 100644 index 0000000000000000000000000000000000000000..ce35c3cd313c8e4ed939ae18b91b9a64767ab504 Binary files /dev/null and b/content/en/post/2022/title/img10.png differ diff --git a/content/en/post/2022/title/img11.png b/content/en/post/2022/title/img11.png new file mode 100644 index 0000000000000000000000000000000000000000..7ebe22cb03c6ee1e735b29bce766c1e10d334f0c Binary files /dev/null and b/content/en/post/2022/title/img11.png differ diff --git a/content/en/post/2022/title/img12.png b/content/en/post/2022/title/img12.png new file mode 100644 index 0000000000000000000000000000000000000000..0ec8535146c6a1d5e0b78ee6c1a6b3a8ede1cdf3 Binary files /dev/null and b/content/en/post/2022/title/img12.png differ diff --git a/content/en/post/2022/title/img13.png b/content/en/post/2022/title/img13.png new file mode 100644 index 0000000000000000000000000000000000000000..86a420b92fb8289658d807d49f137b6d13862f6d Binary files /dev/null and b/content/en/post/2022/title/img13.png differ diff --git a/content/en/post/2022/title/img14.png b/content/en/post/2022/title/img14.png new file mode 100644 index 0000000000000000000000000000000000000000..1da9e55bd25cbc7cfc6fdef1800b4c95b077829b Binary files /dev/null and b/content/en/post/2022/title/img14.png differ diff --git a/content/en/post/2022/title/img15.jpg b/content/en/post/2022/title/img15.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ebe22cb03c6ee1e735b29bce766c1e10d334f0c Binary files /dev/null and b/content/en/post/2022/title/img15.jpg differ diff --git a/content/en/post/2022/title/img16.png b/content/en/post/2022/title/img16.png new file mode 100644 index 0000000000000000000000000000000000000000..2af578504062e5fa7a7aaf7e1c2014531e51e9c2 Binary files /dev/null and b/content/en/post/2022/title/img16.png differ diff --git a/content/en/post/2022/title/img17.png b/content/en/post/2022/title/img17.png new file mode 100644 index 0000000000000000000000000000000000000000..b903c7f8d5a3ba8b66b2d6be883a4bac7230915e Binary files /dev/null and b/content/en/post/2022/title/img17.png differ diff --git a/content/en/post/2022/title/img18.png b/content/en/post/2022/title/img18.png new file mode 100644 index 0000000000000000000000000000000000000000..1697caef6995dd16977bb9aa96af762e19fb7102 Binary files /dev/null and b/content/en/post/2022/title/img18.png differ diff --git a/content/en/post/2022/title/img19.png b/content/en/post/2022/title/img19.png new file mode 100644 index 0000000000000000000000000000000000000000..5537c95b900978a3020269be7ec52ce914224844 Binary files /dev/null and b/content/en/post/2022/title/img19.png differ diff --git a/content/en/post/2022/title/img2.png b/content/en/post/2022/title/img2.png new file mode 100644 index 0000000000000000000000000000000000000000..5537c95b900978a3020269be7ec52ce914224844 Binary files /dev/null and b/content/en/post/2022/title/img2.png differ diff --git a/content/en/post/2022/title/img20.png b/content/en/post/2022/title/img20.png new file mode 100644 index 0000000000000000000000000000000000000000..ce35c3cd313c8e4ed939ae18b91b9a64767ab504 Binary files /dev/null and b/content/en/post/2022/title/img20.png differ diff --git a/content/en/post/2022/title/img21.png b/content/en/post/2022/title/img21.png new file mode 100644 index 0000000000000000000000000000000000000000..b71bb7d740d0f375bbea6116ffde9175c0dbcacf Binary files /dev/null and b/content/en/post/2022/title/img21.png differ diff --git a/content/en/post/2022/title/img22.png b/content/en/post/2022/title/img22.png new file mode 100644 index 0000000000000000000000000000000000000000..31e776c19ddc9b62b4b88171d015b1b94ff2b022 Binary files /dev/null and b/content/en/post/2022/title/img22.png differ diff --git a/content/en/post/2022/title/img3.png b/content/en/post/2022/title/img3.png new file mode 100644 index 0000000000000000000000000000000000000000..b903c7f8d5a3ba8b66b2d6be883a4bac7230915e Binary files /dev/null and b/content/en/post/2022/title/img3.png differ diff --git a/content/en/post/2022/title/img4.png b/content/en/post/2022/title/img4.png new file mode 100644 index 0000000000000000000000000000000000000000..6b7b474933a31c6a20d0d1708e8909163293b4ad Binary files /dev/null and b/content/en/post/2022/title/img4.png differ diff --git a/content/en/post/2022/title/img5.png b/content/en/post/2022/title/img5.png new file mode 100644 index 0000000000000000000000000000000000000000..830c8bc490a1b830e759df1f04b453909a097406 Binary files /dev/null and b/content/en/post/2022/title/img5.png differ diff --git a/content/en/post/2022/title/img6.png b/content/en/post/2022/title/img6.png new file mode 100644 index 0000000000000000000000000000000000000000..b71bb7d740d0f375bbea6116ffde9175c0dbcacf Binary files /dev/null and b/content/en/post/2022/title/img6.png differ diff --git a/content/en/post/2022/title/img7.png b/content/en/post/2022/title/img7.png new file mode 100644 index 0000000000000000000000000000000000000000..830c8bc490a1b830e759df1f04b453909a097406 Binary files /dev/null and b/content/en/post/2022/title/img7.png differ diff --git a/content/en/post/2022/title/img8.png b/content/en/post/2022/title/img8.png new file mode 100644 index 0000000000000000000000000000000000000000..31e776c19ddc9b62b4b88171d015b1b94ff2b022 Binary files /dev/null and b/content/en/post/2022/title/img8.png differ diff --git a/content/en/post/2022/title/img9.png b/content/en/post/2022/title/img9.png new file mode 100644 index 0000000000000000000000000000000000000000..1da9e55bd25cbc7cfc6fdef1800b4c95b077829b Binary files /dev/null and b/content/en/post/2022/title/img9.png differ diff --git a/content/en/post/public_sys-resources/icon-caution.gif b/content/en/post/public_sys-resources/icon-caution.gif new file mode 100644 index 0000000000000000000000000000000000000000..6e90d7cfc2193e39e10bb58c38d01a23f045d571 Binary files /dev/null and b/content/en/post/public_sys-resources/icon-caution.gif differ diff --git a/content/en/post/public_sys-resources/icon-danger.gif b/content/en/post/public_sys-resources/icon-danger.gif new file mode 100644 index 0000000000000000000000000000000000000000..6e90d7cfc2193e39e10bb58c38d01a23f045d571 Binary files /dev/null and b/content/en/post/public_sys-resources/icon-danger.gif differ diff --git a/content/en/post/public_sys-resources/icon-note.gif b/content/en/post/public_sys-resources/icon-note.gif new file mode 100644 index 0000000000000000000000000000000000000000..6314297e45c1de184204098efd4814d6dc8b1cda Binary files /dev/null and b/content/en/post/public_sys-resources/icon-note.gif differ diff --git a/content/en/post/public_sys-resources/icon-notice.gif b/content/en/post/public_sys-resources/icon-notice.gif new file mode 100644 index 0000000000000000000000000000000000000000..86024f61b691400bea99e5b1f506d9d9aef36e27 Binary files /dev/null and b/content/en/post/public_sys-resources/icon-notice.gif differ diff --git a/content/en/post/public_sys-resources/icon-tip.gif b/content/en/post/public_sys-resources/icon-tip.gif new file mode 100644 index 0000000000000000000000000000000000000000..93aa72053b510e456b149f36a0972703ea9999b7 Binary files /dev/null and b/content/en/post/public_sys-resources/icon-tip.gif differ diff --git a/content/en/post/public_sys-resources/icon-warning.gif b/content/en/post/public_sys-resources/icon-warning.gif new file mode 100644 index 0000000000000000000000000000000000000000..6e90d7cfc2193e39e10bb58c38d01a23f045d571 Binary files /dev/null and b/content/en/post/public_sys-resources/icon-warning.gif differ diff --git a/content/en/prepare/prepare.md b/content/en/prepare/prepare.md index a21fc8c6e250e790a8998556997ebeae1e2830cb..c13054258c2ea38f19ff0800f05cc0ff4613bfbe 100644 --- a/content/en/prepare/prepare.md +++ b/content/en/prepare/prepare.md @@ -1,5 +1,5 @@ -1. Please refer to the http://git.mydoc.io/?t=179267 to register for Gitee account. +1. Please refer to the https://gitee.com/help/articles/4113 to register for Gitee account. 2. Setting your main E-mail in gitee(http://gitee.com/profile/emails). 3. Signing CLA in the https://opengauss.org/en/cla.html. -4. Refer to the http://git.mydoc.io/?t=180692 to prepare the git environment. +4. Refer to the https://gitee.com/help/articles/4107 to prepare the git environment. 5. Understanding the blog format. \ No newline at end of file diff --git a/content/zh/guidance/index_.md b/content/zh/guidance/index_.md index be91b91e4f601001c62bff3198738b81cb849acd..72d998c7d3b09ac03c4d685e18eeec2374dc9a8c 100644 --- a/content/zh/guidance/index_.md +++ b/content/zh/guidance/index_.md @@ -6,13 +6,13 @@ title = "Guidance to Post a Blog" ## Preparation -1. Refer to http: //git.mydoc.io/?t=179267 to register Gitee account. +1. Refer to https://gitee.com/help/articles/4113 to register Gitee account. -2. Set your primary mail box in gitee settings https: //gitee.com/profile/emails. +2. Set your primary mail box in gitee settings https://gitee.com/profile/emails. -3. Sign your CLA in . +3. Sign your CLA in . -4. Prepare your git environment refering to http: //git.mydoc.io/?t=180692. +4. Prepare your git environment refering to https://gitee.com/help/articles/4107. ## Understand blog format @@ -39,7 +39,7 @@ Tips: you can copy content/_example/2020-03-03-sample-post.md to your folder and The blog posting follows the pull request of Gitee. -1. Fork the blog project to your own gitee. Refer to for detailed guidance. +1. Fork the blog project to your own gitee. Refer to for detailed guidance. 2. Clone the code to your local environment. @@ -82,6 +82,6 @@ git commit -m "" git push origin : ``` -7. Refer to http: //git.mydoc.io/?t=153749to submit your Pull Request +7. Refer to https://gitee.com/help/articles/4122to submit your Pull Request 8. Wait for reviewing and merging. diff --git "a/content/zh/post/2022/CentOS-7-9-\345\256\211\350\243\205-openGauss-2-1-0-\344\271\213\345\211\245\350\214\247\346\212\275\344\270\235.md" "b/content/zh/post/2022/CentOS-7-9-\345\256\211\350\243\205-openGauss-2-1-0-\344\271\213\345\211\245\350\214\247\346\212\275\344\270\235.md" new file mode 100644 index 0000000000000000000000000000000000000000..5667380efc2d7621b334658f02c0569fb7906647 --- /dev/null +++ "b/content/zh/post/2022/CentOS-7-9-\345\256\211\350\243\205-openGauss-2-1-0-\344\271\213\345\211\245\350\214\247\346\212\275\344\270\235.md" @@ -0,0 +1,221 @@ ++++ + +title = "CentOS 7.9 安装 openGauss 2.1.0 之剥茧抽丝" + +date = "2021-12-23" + +tags = [ "CentOS 7.9 安装 openGauss 2.1.0 之剥茧抽丝"] + +archives = "2021-12" + +author = "问天的天问" + +summary = "CentOS 7.9 安装 openGauss 2.1.0 之剥茧抽丝" + +img = "/zh/post/2022/title/img2.png" + +times = "12:30" + ++++ + +# CentOS 7.9 安装 openGauss 2.1.0 之剥茧抽丝 + +问天的天问 2021/12/23 + +本文是在参考官方的安装文档后,提取总结出的关键安装步骤。 + +## \[1\] 基础环境安装 + +``` +# timedatectl set-timezone Asia/Shanghai +# hostnamectl set-hostname gauss01 + +# nmcli con mod enp0s3 ipv4.method manual ipv4.address 192.168.2.131/24 +# nmcli con mod enp0s3 ipv4.gateway 192.168.2.1 +# nmcli con mod enp0s3 ipv4.dns 192.168.1.1 +# nmcli con mod enp0s3 connection.autoconnect yes +``` + +## \[2\] Disable SElinux + +``` +# sed -i 's@\(^SELINUX=\).*@\1disabled@g' /etc/selinux/config +``` + +## \[3\] Disable Firewall + +``` +# systemctl disable firewalld +``` + +## \[4\] Disable Transparent HugePages + +``` +# sed -i '/linux16.*$/s//& transparent_hugepage=never/g' /boot/grub2/grub.cfg + +Reboot and Confirm +# cat /sys/kernel/mm/transparent_hugepage/enabled +always madvise [never] +``` + +## \[5\] 文件句柄设置 + +``` +# cat >> /etc/security/limits.conf << EOF + +*`echo -e "\t"`soft`echo -e "\t"`nofile`echo -e "\t"`1000000 +*`echo -e "\t"`hard`echo -e "\t"`nofile`echo -e "\t"`1000000 +EOF + +``` + +## \[6\] 网卡设置 + +在网卡配置文件中最后一行添加 MTU=8192 + +``` +# cat /etc/sysconfig/network-scripts/ifcfg-enp0s3 +…… +MTU=8192 +``` + +## \[7\] 修改默认版本号 + +``` +# cat /etc/redhat-release +CentOS Linux release 7.9.2009 (Core) +修改为 +CentOS Linux release 7.6.1810 (Core) +``` + +## \[8\] 系统参数 + +在内核方面,官方给出的建议值基本上与系统 CentOS 7.9 的默认值相同,不相同的只有4项,如下: + +- net.ipv4.tcp\_retries1 +- net.ipv4.tcp\_syn\_retries +- net.ipv4.ip\_local\_port\_range +- vm.overcommit\_ratio + +根据实际情况判定是否需要修改。 + +## \[9\] 安装 python + +``` +# yum install -y python36 +``` + +## \[10\] 安装软件包 + +官方建议软件包 + +``` +# yum install -y libaio-devel flex bison ncurses-devel glibc-devel patch redhat-lsb readline-devel +``` + +个人建议软件包 + +``` +# yum install -y bzip2 net-tools lrzsz +``` + +## \[11\] 关闭 RemoveIPC + +CentOS 默认关闭,无需要配置。 + +## \[12\] 创建组和用户 + +组和用户都可以不用提前创建,在安装时会自动创建。 + +## \[13\] 解压安装包 + +``` +# mkdir -p /opt/software/openGauss +# chmod 755 -R /opt/software +# cd /opt/software/openGauss + +上传源码 openGauss-2.1.0-CentOS-64bit-all.tar.gz 并解压 +# tar -zxvf openGauss-2.1.0-CentOS-64bit-all.tar.gz +# tar -zxvf openGauss-2.1.0-CentOS-64bit-om.tar.gz +``` + +## \[14\] 编辑配置脚本 + +``` +# cp script/gspylib/etc/conf/cluster_config_template.xml cluster_config.xml +# vi /opt/software/openGauss/cluster_config.xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +根据实际情况进行相应修改。 + +## \[15\] 执行安装和初始化 + +以 root 用户安装,安装脚本自行创建 dbgrp 组和 omm 用户 + +``` +# cd script +# python3 gs_preinstall -U omm -G dbgrp -X /opt/software/openGauss/cluster_config.xml +``` + +以 omm 用户初始化数据库 + +``` +# chown -R omm:dbgrp /opt/software/openGauss +# su - omm +$ gs_install -X /opt/software/openGauss/cluster_config.xml +``` + +初始化成功后连接数据库 + +``` +$ gsql -d postgres -p 15400 +gsql ((openGauss 2.1.0 build 590b0f8e) compiled at 2021-09-30 14:29:04 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +openGauss=# +``` + +其中,postgres为需要连接的数据库名称,15400为数据库节点的端口号,即 cluster\_config.xml 配置文件中的dataPortBase的值。 + +## \[16\] 卸载openGauss + +``` +# su - omm +$ gs_uninstall --delete-data +命令卸载并不全面,还需要手工删除,也可不经命令卸载直接手工删除。 +# userdel -r omm +# groupdel dbgrp +# rm -rf /opt/software /opt/huawei +# rm -rf /var/log/omm +``` + diff --git "a/content/zh/post/2022/CentOs\350\231\232\346\213\237\346\234\272\344\270\213opengauss\347\232\204\351\205\215\347\275\256\344\275\277\347\224\250.md" "b/content/zh/post/2022/CentOs\350\231\232\346\213\237\346\234\272\344\270\213opengauss\347\232\204\351\205\215\347\275\256\344\275\277\347\224\250.md" new file mode 100644 index 0000000000000000000000000000000000000000..89cbbb7a3adb6617504810e9bedd3d6c4ed9b405 --- /dev/null +++ "b/content/zh/post/2022/CentOs\350\231\232\346\213\237\346\234\272\344\270\213opengauss\347\232\204\351\205\215\347\275\256\344\275\277\347\224\250.md" @@ -0,0 +1,488 @@ ++++ + +title = "CentOs虚拟机下opengauss的配置使用" + +date = "2021-12-11" + +tags = [ "CentOs虚拟机下opengauss的配置使用"] + +archives = "2021-12" + +author = "parker" + +summary = "CentOs虚拟机下opengauss的配置使用" + +img = "/zh/post/2022/title/img3.png" + +times = "12:30" + ++++ + +# CentOs虚拟机下opengauss的配置使用 + +## 环境说明 + +虚拟机平台 VMware + +服务器端 CentOS 7.9 + +本机系统 Windows 10 + +部署版本 OpenGauss 1.1.0 + +## 安装详细步骤 + +- 虚拟机VMware + + 本机已配置,该部分省略 + + +- CentOS 7.9 安装 + + 下载镜像源CentOS-7-x86\_64-DVD-2009.iso + + ![](figures/2c62c125feb04ff89234abf76991601e.png) + +- 虚拟机中选中镜像进行安装 + + ![](figures/7294465883ce45ac80a371f63dfe9659.png) + + ![](figures/356c385d615b442e951be7d27f00702e.png) + +- 设置 + + 内存设置为2GB + + 处理器设置为2 + + 网络默认即可 + + 声卡和打印机不使用直接进行了移除 + + +启动后进入系统安装,注意的点如下: + +- 分区 + + 选择系统-安装位置-手动分区进行分区如下: + + ![](figures/5d3d9f82ce164b08a6866a606fd7e03d.png) + + ![](figures/f569229a746940cba90ed0cda6fd1d2f.png) + +- 网络和主机名 + + 选择系统-网络和主机名进行设置如下: + + ![](figures/0bacb67d8b9d4ff6b786b2b734458b10.png) + + ![](figures/5e12f329abe74ed38ae99d8828adaa5d.png) + + 记录ip和主机名,之后配置需要用到 + + ``` + ip 192.168.201.131 + 主机名 db1 + ``` + +- 软件选择 + + 选择软件-软件选择设置如下: + + ![](figures/721e491c70e948abadf18b2eda7ce76f.png) + +- 用户设置 + + 上述设置完成后点击开始安装,该期间根据提示完成用户设置即可 + + ![](figures/22b37a0e95ea4472b4d331064192382c.png) + + 安装完成进行重启,登录系统完成安装 + + ![](figures/1e1aea950edc44d99adc91c658a9e14a.png) + +- 上网测试 + + ![](figures/0feab0d29d324acc9c4e87ffc7a3e826.png) + +- 修改操作系统版本\(CentOS 7.6可省略\) + + 通过 + + vi /etc/redhat-releas打开编辑文件,修改内容如下\(请使用su root切换至root用户进行操作\) + + ![](figures/c726f71fc88c4015b1d89f4586dfe290.png) + +- 关闭防火墙 + + 执行以下命令关闭防火墙 + + ``` + systemctl stop firewalld.service + + systemctl disable firewalld.service + ``` + + ![](figures/614036c6b5d84a0c86de61b3cbf88b78.png) + +- 设置字符集及环境变量 + + ![](figures/ba1ea7c4485b4830b21538d56ecac309.png) + +- 关闭swap交换内存 + + ![](figures/2775a3f24eb44c02931d63e302a4bf9c.png) + +- yum环境配置 + + 备份yum配置文件 + + ![](figures/27b944a22e1d45b39a0167b83e4d55a0.png) + +- 下载可用源的repo文件 + + ![](figures/3507d173b3e24d9f94dd543947ae33ef.png) + +- 查看repo文件是否正确 + + ![](figures/1e185faf72d14f6bb07e527d753614ed.png) + +- yum安装相关包 + + ``` + yum install -y libaio-devel flex bison ncurses-devel glibc.devel patch lsb_release wget python3 + ``` + + ![](figures/dc1c632c7c0f49f2ab7ebd57f78915d6.png) + + 设置python版本为3.x + + ![](figures/641abf7f6c9642b188ade66b1c8d25ee.png) + +- 修改完成后,确认yum是否使用,若不能使用,如本例中。修改/usr/bin/yum文件,修改\#!/usr/bin/python为\#!/usr/bin/python2.7 + + ![](figures/61364d2741cc46f7802cb48cc75571fe.png) + + +## 数据库安装 + +- 创建存放数据库安装目录 + + ![](figures/cd094375c2b44a8383694267e492fc63.png) + +- 下载数据库安装包 + + ![](figures/a6d0fc02a8c948f2b43e4ef47cecd731.png) + + +- 创建xml配置文件,用于数据库安装 + + 在openGauss文件夹下 + + vi clusterconfig.xml编辑以下内容 + + ``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ``` + + 其中ip设置为之前的192.168.201.131,主机名为db1,如下: + + ![](figures/d21813079e7b40a1b9edde6b9298d2f3.png) + + +- 解压安装包 + + ![](figures/7a7b1fc98317411a9a18982e944ba5c2.png) + + +- 解压后查看并修改文件权限 + + ![](figures/128f20b65c554c85bbcda62acad5616e.png) + +- 执行初始化脚本 + + ``` + cd /opt/software/openGauss/script + + python gs_preinstall -U omm -G dbgrp -X /opt/software/openGauss/clusterconfig.xml + ``` + + 返回Preinstallation succeeded内容时,初始化完成 + + ![](figures/ee22045a1dca446b925881137106db5c.png) + +- 初始化数据库 + + 重启虚拟机后使用omm用户进行数据库初始化 + + ``` + gs_install -X /opt/software/openGauss/clusterconfig.xml --gsinit-parameter="--encoding=UTF8" --dn-guc="max_process_memory=2GB" --dn-guc="shared_buffers=128MB" --dn-guc="bulk_write_ring_size=128MB" --dn-guc="cstore_buffers=16MB" + ``` + + 其中对应的参数内存大小须根据虚拟机情况进行设置 + + ![](figures/816de1e0a8c04796a4f3478eff37baed.png) + +- 安装完成后清理软件安装包 + + ![](figures/387c8fc827e34000936c977270c10f22.png) + + +## 连接数据库 + +![](figures/faa8002b28d94f5b9408f0e251daebc7.png) + +- JDBC配置 + + 从官方网站选取对应版本的jar包并解压,在eclipse上配置加载驱动类。 + + 第一次连接后操作数据库需要修改omm用户密码 + + ![](figures/0497eb639cb14b5182dc5b2aff97a757.png) + + 根据官方文档提供的demo程序修改后进行连接测试,连接成功如下: + + ![](figures/cb8039252a6b45e99d8ff682fb9df992.png) + +- demo程序: + + ``` + package gaussjdbc; + + import java.sql.Connection; + import java.sql.DriverManager; + import java.sql.PreparedStatement; + import java.sql.SQLException; + import java.sql.Statement; + import java.sql.Types; + import java.sql.CallableStatement; + + public class Gaussjdbc { + + //创建数据库连接。 + public static Connection GetConnection(String username, String passwd) { + String driver = "org.postgresql.Driver"; + String sourceURL = "jdbc:postgresql://192.168.201.131:26000/postgres"; + Connection conn = null; + try { + //加载数据库驱动。 + Class.forName(driver).newInstance(); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + + try { + //创建数据库连接。 + conn = DriverManager.getConnection(sourceURL, username, passwd); + System.out.println("Connection succeed!"); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + + return conn; + }; + + //执行普通SQL语句,创建customer_t1表。 + public static void CreateTable(Connection conn) { + Statement stmt = null; + try { + stmt = conn.createStatement(); + + //执行普通SQL语句。 + int rc = stmt + .executeUpdate("CREATE TABLE customer_t1(c_customer_sk INTEGER, c_customer_name VARCHAR(32));"); + + stmt.close(); + } catch (SQLException e) { + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } + e.printStackTrace(); + } + } + + //执行预处理语句,批量插入数据。 + public static void BatchInsertData(Connection conn) { + PreparedStatement pst = null; + + try { + //生成预处理语句。 + pst = conn.prepareStatement("INSERT INTO customer_t1 VALUES (?,?)"); + for (int i = 0; i < 3; i++) { + //添加参数。 + pst.setInt(1, i); + pst.setString(2, "data " + i); + pst.addBatch(); + } + //执行批处理。 + pst.executeBatch(); + pst.close(); + } catch (SQLException e) { + if (pst != null) { + try { + pst.close(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } + e.printStackTrace(); + } + } + + //执行预编译语句,更新数据。 + public static void ExecPreparedSQL(Connection conn) { + PreparedStatement pstmt = null; + try { + pstmt = conn + .prepareStatement("UPDATE customer_t1 SET c_customer_name = ? WHERE c_customer_sk = 1"); + pstmt.setString(1, "new Data"); + int rowcount = pstmt.executeUpdate(); + pstmt.close(); + } catch (SQLException e) { + if (pstmt != null) { + try { + pstmt.close(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } + e.printStackTrace(); + } + } + + + //执行存储过程。 + public static void ExecCallableSQL(Connection conn) { + CallableStatement cstmt = null; + try { + + cstmt=conn.prepareCall("{? = CALL TESTPROC(?,?,?)}"); + cstmt.setInt(2, 50); + cstmt.setInt(1, 20); + cstmt.setInt(3, 90); + cstmt.registerOutParameter(4, Types.INTEGER); //注册out类型的参数,类型为整型。 + cstmt.execute(); + int out = cstmt.getInt(4); //获取out参数 + System.out.println("The CallableStatment TESTPROC returns:"+out); + cstmt.close(); + } catch (SQLException e) { + if (cstmt != null) { + try { + cstmt.close(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } + e.printStackTrace(); + } + } + + + /** + * 主程序,逐步调用各静态方法。 + * @param args + */ + public static void main(String[] args) { + //创建数据库连接。 + Connection conn = GetConnection("parker", "parker@123"); + + //创建表。 + CreateTable(conn); + + //批插数据。 + BatchInsertData(conn); + + //执行预编译语句,更新数据。 + ExecPreparedSQL(conn); + + //执行存储过程。 + //ExecCallableSQL(conn);//这部分在运行时有问题,直接注释掉了 + + //关闭数据库连接。 + try { + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + + } + + } + ``` + + +## 安装中遇到的问题与解决过程 + +- 初始化脚本失败报错 + + ![](figures/a662d9a9a96b40d089a6d9c68788bf3d.png) + + ![](figures/dbc89373c5734638a51add74523f640c.png) + +- CentOS上配置JAVA + + 自带的java路径寻找: + + ![](figures/480ae4bbdd664652af43663f061aae84.png) + + 配置CentOS环境变量: + + ![](figures/17fb09d479354307b7e2a8b27cbd2f7e.png) + + 而后期验证javac时发现CentOS其自带的java仅有运行环境,改用windows作为客户端。 + +- 也可以自行下载java环境配置进行解决配置: + + ![](figures/05476910e9e44c9fb0723d26b0f467f4.png) + +- 数据库连接问题 + + 修改后ip未放行错误 + + ![](figures/591c2725601c492cbccf312e9b2a7a11.png) + + 放行ip命令\(在官方文档客户端接入验证处可以查询\)如下 + + ``` + gs_guc set -N all -I all -h "host all parker 192.168.201.1/32 sha256" + ``` + + 具体的接入ip若不清楚可以通过报错信息或本地的ipconfig进行查看 + + diff --git "a/content/zh/post/2022/Go\350\257\255\350\250\200\350\277\236\346\216\245openGauss\347\216\257\345\242\203\346\220\255\345\273\272\350\277\207\347\250\213\357\274\210\345\220\253OG\345\256\211\350\243\205\357\274\211.md" "b/content/zh/post/2022/Go\350\257\255\350\250\200\350\277\236\346\216\245openGauss\347\216\257\345\242\203\346\220\255\345\273\272\350\277\207\347\250\213\357\274\210\345\220\253OG\345\256\211\350\243\205\357\274\211.md" new file mode 100644 index 0000000000000000000000000000000000000000..2b097fbb8dd9d12ecff4c5ec1afd1dc674f68283 --- /dev/null +++ "b/content/zh/post/2022/Go\350\257\255\350\250\200\350\277\236\346\216\245openGauss\347\216\257\345\242\203\346\220\255\345\273\272\350\277\207\347\250\213\357\274\210\345\220\253OG\345\256\211\350\243\205\357\274\211.md" @@ -0,0 +1,590 @@ ++++ + +title = "Go语言连接openGauss环境搭建过程(含OG安装)" + +date = "2021-12-24" + +tags = [ "Go语言连接openGauss环境搭建过程(含OG安装)"] + +archives = "2021-12" + +author = "葛二萌" + +summary = "Go语言连接openGauss环境搭建过程(含OG安装)" + +img = "/zh/post/2022/title/img4.png" + +times = "12:30" + ++++ + +# Go语言连接openGauss环境搭建过程(含OG安装) + +## 1.前言 + +本文共分为openGauss单机版安装部分和连接环境搭建部分,提供了通过go语言来连接openGauss的一种方案。openGauss现在也有了基于go的驱动,但是我觉得ODBC的方式更为通用一些,也不应被丢弃,因此本文使用go通过ODBC来连接openGauss。 + +- 硬件及软件环境: + + 硬件环境:虚拟机的内存8GB,4核心CPU,900G磁盘(非必须) + + 软件环境:CentOS7.6 + + 数据库版本:opengauss2.0企业版:openGauss-2.0.0-CentOS-64bit-all.tar.gz + + +## 2.openGauss单机版安装: + +- 2.1. 关闭防火墙 + + ``` + #停止firewall + systemctl stop firewalld.service + #禁止firewall开机启动 + systemctl disable firewalld.service + #检查防火墙是否关闭。 + systemctl status firewalld + ``` + + - 说明: + + 若防火墙状态显示为active \(running\),则表示防火墙未关闭。 + + 若防火墙状态显示为inactive \(dead\),则无需再关闭防火墙。 + + +- 2.2. 设置时区和时间 + + 将各数据库节点的时区设置为相同时区,可以将/usr/share/zoneinfo/目录下的时区文件拷贝为/etc/localtime文件。 + + ``` + cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime + ``` + +- 2.3. 关闭SELinux + + ``` + [root@node1 ~]# + + getenforce + sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config + setenforce 0 + getenforce + #检查 + cat /etc/selinux/config|grep SELINUX + 输出: + + # SELINUX= can take one of these three values: + + SELINUX=disabled + + # SELINUXTYPE= can take one of three values: + + SELINUXTYPE=targeted + + [root@node1 ~]# + ``` + +- 2.4. 修改/etc/hosts + + ``` + #添加一行 + + cat >>/etc/hosts <> /etc/profile<>/etc/ssh/sshd_config<>/etc/security/limits.conf + echo "* hard nofile 1000000" >>/etc/security/limits.conf + echo "* soft nproc unlimited" >>/etc/security/limits.conf + echo "* hard nproc unlimited" >>/etc/security/limits.conf + ``` + +- 2.9. 安装python3.6.x + + ``` + yum install openssl* -y + yum install python3* -y + ``` + + 检查 + + ``` + [omm@node1 dn]$ python3 -V + Python 3.6.8 + ``` + + 其他软件包,如需要可以安装(也可以直接使用安装): + + ``` + yum install -y libaio-devel flex bison ncurses-devel glibc-devel patch redhat-lsb-core readline-devel + yum install openssl* -y + yum install -y java-1.8.0-openjdk* psmisc bzip2 python3 python3-devel lksctp* + reboot #重新启动服务器 + ``` + +- 2.10. 创建安装包的存放目录 + + ``` + mkdir -p /opt/software/openGauss + chmod 755 -R /opt/software + ``` + +- 2.11. 下载openGauss数据库软件 + + 下载地址为:[https://opengauss.org/zh/download.html](https://opengauss.org/zh/download.html) + + 下载完成后上传到centos中 + +- 2.12. 解压缩openGauss DBMS介质 + + ``` + cd /opt/software/openGauss + tar -zxvf openGauss-2.0.0-CentOS-64bit-all.tar.gz + tar -zxvf openGauss-2.0.0-CentOS-64bit-om.tar.gz + ``` + +- 2.13. 创建XML文件 + + 下面是xml文件官方模板,一般只需要改一下自己centos机器的IP + + ``` + cat > clusterconfig.xml< + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + EOF + ``` + +- 2.14. 检查环境变量 + + ``` + echo $LD_LIBRARY_PATH + ``` + +- 2.15. 安装前进行交互式检查 + + ``` + [root@node1 script]# ./gs_preinstall -U omm -G dbgrp -X /opt/software/openGauss/clusterconfig.xml + ``` + + 出现以下内容代表检查没问题。 + + Parsing the configuration file. + + ``` + Successfully parsed the configuration file. + Installing the tools on the local node. + Successfully installed the tools on the local node. + Setting pssh path + Successfully set core path. + Are you sure you want to create the user[omm] and create trust for it (yes/no)? yes + Please enter password for cluster user. + Password: omm123 + Please enter password for cluster user again. + Password: omm123 + Successfully created [omm] user on all nodes. + Preparing SSH service. + Successfully prepared SSH service. + Checking OS software. + Successfully check os software. + Checking OS version. + Successfully checked OS version. + Creating cluster's path. + Successfully created cluster's path. + Setting SCTP service. + Successfully set SCTP service. + Set and check OS parameter. + Setting OS parameters. + Successfully set OS parameters. + Warning: Installation environment contains some warning messages. + Please get more details by "/opt/software/openGauss/script/gs_checkos -i A -h node1 --detail". + Set and check OS parameter completed. + Preparing CRON service. + Successfully prepared CRON service. + Setting user environmental variables. + Successfully set user environmental variables. + Setting the dynamic link library. + Successfully set the dynamic link library. + Setting Core file + Successfully set core path. + Setting pssh path + Successfully set pssh path. + Set ARM Optimization. + No need to set ARM Optimization. + Fixing server package owner. + Setting finish flag. + Successfully set finish flag. + Preinstallation succeeded + ``` + +- 2.16. 开始安装openGauss DBMS和创建数据库 + + 使用root执行如下命令 + + ``` + cd /opt/software/openGauss/script + chmod -R 755 /opt/software/openGauss/script + chown -R omm:dbgrp /opt/software/openGauss/script + ``` + + 使用omm用户安装openGauss DBMS和创建openGauss数据库 + + ``` + su - omm + cd /opt/software/openGauss/script + cp ../clusterconfig.xml . + gs_install -X /opt/software/openGauss/script/clusterconfig.xml + ``` + + 重要提示:用户需根据提示输入数据库的密码,密码需要具有一定的复杂度,为保证用户正常使用该数据库,请记住输入的数据库密码。此处建议密码设置为huawei@1234 + + ``` + [omm@node1 ~]$ cd /opt/software/openGauss/script + [omm@node1 script]$ cp ../clusterconfig.xml . + [omm@node1 script]$ gs_install -X /opt/software/openGauss/script/clusterconfig.xml + ``` + + ``` + Parsing the configuration file. + Check preinstall on every node. + Successfully checked preinstall on every node. + Creating the backup directory. + Successfully created the backup directory. + begin deploy.. + Installing the cluster. + begin prepare Install Cluster.. + Checking the installation environment on all nodes. + begin install Cluster.. + Installing applications on all nodes. + Successfully installed APP. + begin init Instance.. + encrypt cipher and rand files for database. + Please enter password for database:huawei@1234 + Please repeat for database:huawei@1234 + begin to create CA cert files + The sslcert will be generated in /opt/huawei/install/app/share/sslcert/om + Cluster installation is completed. + Configuring. + Deleting instances from all nodes. + Successfully deleted instances from all nodes. + Checking node configuration on all nodes. + Initializing instances on all nodes. + Updating instance configuration on all nodes. + Check consistence of memCheck and coresCheck on database nodes. + Configuring pg_hba on all nodes. + Configuration is completed. + Successfully started cluster. + Successfully installed application. + end deploy.. + ``` + + 查看数据库状态: + + ``` + [omm@node1 script]$ gs_om -t status + ``` + + 会出现以下内容: + + ----------------------------------------------------------------------- + + ``` + cluster_name : dbCluster + cluster_state : Normal + redistributing : No + --------------------------------------------------------------------- + [omm@node1 script]$ + ``` + + 启动数据库(安装完默认已经启动): + + ``` + [omm@node1 db1]$ gs_om -t start + Starting cluster. + + ========================================= + + [SUCCESS] node1: + + [2021-04-01 16:50:13.969][29784][][gs_ctl]: gs_ctl started,datadir is /opt/huawei/install/data/dn + + [2021-04-01 16:50:13.974][29784][][gs_ctl]: another server might be running; Please use the restart command + + ========================================= + + Successfully started. + ``` + + - 使用omm用户执行下面的操作。 + + 登录到数据库, + + ``` + gsql -d postgres -p 26000 -r + ``` + + \#执行上条命令会出现以下提示内容。 + + ``` + gsql ((openGauss 2.0.0 build 78689da9) compiled at 2021-03-31 21:04:03 commit 0 last mr ) + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + ``` + + 其他常见的基本命令 + + ``` + \q #退出会话 + gs_om -t stop #关闭数据库 + gs_om -t restart #重启数据库 + ``` + + 更多操作使用说明见官方文档:[https://opengauss.org/zh/docs/2.1.0/docs/Quickstart/Quickstart.html](https://opengauss.org/zh/docs/2.1.0/docs/Quickstart/Quickstart.html) + + + +## 3. 连接环境搭建 + +- 3.1 安装go语言环境 + + 安装go语言,安装包下载地址:[https://dl.google.com/go/go1.17.5.linux-amd64.tar.gz](https://dl.google.com/go/go1.17.5.linux-amd64.tar.gz) + + 上传压缩包后解压: tar -C /usr/local/ -xvf go1.11.4.linux-amd64.tar.gz + + 配置环境变量 + + ``` + vi /etc/profile + ``` + + 追加以下内容: + + ``` + export PATH=$PATH:/usr/local/go/bin + ``` + + 让环境变量配置生效: + + ``` + source /etc/profile + ``` + + 验证go是否安装成功: + + ``` + go version + ``` + + 出现以下内容代表安装成功(我装的是1.16版本,以你下载的安装包版本为准): + + ![](figures/我装的是1-16版本.png) + + +- 3.2 搭建unixODBC环境 + + 下载安装unixODBC: + + ``` + yum install unixODBC + yum install unixODBC-devel #非必须 + ``` + + 下载openGauss ODBC驱动ODBC\_2.1.0:[https://opengauss.org/zh/download.html](https://opengauss.org/zh/download.html) + + 下载之后上传到centos机器上/soft路径(该路径不是必须) ,把驱动拷贝到指定目录下: + + ``` + cd /soft + tar -xzvf openGauss-2.1.0-ODBC.tar.gz + cp lib/* /usr/local/lib + cp odbc/lib/* /usr/local/lib + ``` + + 安装完unixODBC后,/etc目录下会有一个文件: odbcinst.ini ,接下来配置这个文件 + + ``` + vim odbcinst.ini + ``` + + 将下列内容添加进去: + + ``` + [openGauss] + Driver64=/usr/local/lib/psqlodbcw.so + setup=/usr/local/lib/psqlodbcw.so + ``` + + 另外在/etc下编辑odbc.ini\(如没有则新建\): + + ``` + vim /etc/odbc.ini + ``` + + 将下列内容拷贝进去: + + ``` + [openGaussODBC] + Driver=openGauss + Servername=192.168.183.28 + Database=postgres + Username=gem + Password=huawei@1234 + Port=26000 + DatabaseName=postgres + ``` + + 注,上面要填自己的IP,数据库用户名和口令,且不能是omm初始用户。 + + 到此配置基本完成了,下面验证odbc是否可以openGauss(数据库要开启): + + ``` + isql -v openGaussODBC + ``` + + 若出现以下内容代表配置ok,连接没有问题: + + ![](figures/连接没有问题.png) + + +- 3.3 编写go语言通过ODBC连接openGauss的测试脚本并执行验证 + + ``` + vim test_gauss.go : + package main + import ( + "database/sql" + "fmt" + _ "odbc/driver" + ) + + func main() { + fmt.Printf("%s\n", "创建数据库链接") + conn, err := sql.Open("odbc","DSN=openGaussODBC;UID=gem;PWD=huawei@1234") + if err != nil { + fmt.Println("链接错误") + return + } + defer conn.Close() + fmt.Printf("%s\n", "构建查询") + stmt, err := conn.Prepare("select 666;") + if err != nil { + fmt.Println("查询异常:", err) + return + } + defer stmt.Close() + row, err := stmt.Query() + if err != nil { + fmt.Println("查询错误:", err) + } + defer row.Close() + fmt.Printf("%s\n", "数据集显示") + for row.Next() { + var id int + if err := row.Scan(&id); err == nil { + fmt.Println(id) + } + ``` + + 在脚本路径下执行测试: + + ``` + go run test_gauss.go + ``` + + 若出现以下结果表明连接成功。 + + ![](figures/若出现以下结果表明连接成功.png) + + +## 4.总结 + +个人认为,比较容易出错的点是,虚拟机内存要大于等于8GB,unixODBC两个配置文件odbc.ini和odbcinst.ini的配置稍麻烦,最后,数据库一定要start才能连接成功,个人有一次犯了一次这个错误,虽然是很浅显的道理哈哈。 + diff --git "a/content/zh/post/2022/MOGDB-openGauss\346\225\260\346\215\256\345\272\223gs_dump\345\244\207\344\273\275\350\204\232\346\234\254\345\217\212\345\244\207\344\273\275\346\270\205\347\220\206.md" "b/content/zh/post/2022/MOGDB-openGauss\346\225\260\346\215\256\345\272\223gs_dump\345\244\207\344\273\275\350\204\232\346\234\254\345\217\212\345\244\207\344\273\275\346\270\205\347\220\206.md" new file mode 100644 index 0000000000000000000000000000000000000000..9e6f28013c2980a52b453db77b1e24651613e0a1 --- /dev/null +++ "b/content/zh/post/2022/MOGDB-openGauss\346\225\260\346\215\256\345\272\223gs_dump\345\244\207\344\273\275\350\204\232\346\234\254\345\217\212\345\244\207\344\273\275\346\270\205\347\220\206.md" @@ -0,0 +1,130 @@ ++++ + +title = "MOGDB/openGauss数据库gs dump备份脚本及备份清理" + +date = "2022-01-07" + +tags = [ "MOGDB/openGauss数据库gs dump备份脚本及备份清理"] + +archives = "2022-01" + +author = "阎书利" + +summary = "MOGDB/openGauss数据库gs dump备份脚本及备份清理" + +img = "/zh/post/2022/title/img8.png" + +times = "12:30" + ++++ + +# MOGDB/openGauss数据库gs\_dump备份脚本及备份清理 + +需要对MOGDB/openGauss进行每天逻辑备份。如下脚本分享给大家。 + +## 一、备份脚本 + +- 1.脚本 + + ``` + c.sh (可以改名字) + + # database dump shell + # you should change the GAUSSHOME GAUSSPORT GAUSSDATA DUMP_USER DUMP_PASSWORD + #!/bin/bash + source /etc/profile + source /home/omm/.bash_profile + export GAUSSHOME=/opt/gaussdb/app + export GAUSSPORT=26000 + export GAUSSDATA=/gaussdb/data/dn1 + export PATH=$PGHOME/bin:$PATH + DUMP_USER=ysla + DUMP_PASSWORD='1qaz@WSX' + CUR_DATE=`date "+%Y-%m-%d-%H%M"` + dbnamelist=`cat oo.txt` + + #Loading DBLIST + gsql -p ${GAUSSPORT} postgres -c "select datname from pg_database where datname not in ('template1','template0','postgres')" -t | grep -v '^$' >oo.txt + + #save directory + SAVE_BASE_DIR="/gaussdb/dump_dir" + DAT_FILE_DIR="${SAVE_BASE_DIR}/${CUR_DATE}" + if [ -d ${DAT_FILE_DIR} ] + then : + else + mkdir -p ${DAT_FILE_DIR} + fi + # The real backup step! + echo "`date "+%Y-%m-%d-%H%M"` begin backup db " + for dbname in ${dbnamelist} + do + gs_dump -E UTF8 ${dbname} -U ${DUMP_USER} -W ${DUMP_PASSWORD} -p ${GAUSSPORT} -F p -f ${DAT_FILE_DIR}/${dbname}_${CUR_DATE}.sql + gs_dumpall -l ${dbname} -U ${DUMP_USER} -W ${DUMP_PASSWORD} -p ${GAUSSPORT} -g -f ${DAT_FILE_DIR}/global_data_${dbname}_${CUR_DATE}.sql + done + tar -cjvf ${DAT_FILE_DIR}.tar.gz /${DAT_FILE_DIR} --remove-files + echo "`date "+%Y-%m-%d-%H%M"` end backup db " + ``` + + 这个脚本需要修改GAUSSHOME GAUSSPORT GAUSSDATA DUMP\_USER DUMP\_PASSWORD这几个(数据库家目录,端口,数据目录,做dump的用户,以及密码),之后直接执行就可以。脚本会自动查询数据库,并把’template1’,'template0’和’postgres’数据库排除掉,然后把其他数据库的数据和全局对象各自备份一份,最后把备份的.sql文件进行压缩。 + +- 2,脚本执行 + + 执行结果如下: + + ![](figures/20220107-39368262-8b82-4c5d-973d-c268dab99042.png) + + 去备份保存的目录下 + + ![](figures/20220107-154aa7ec-6a24-41aa-8fe4-0eee137d0982.png) + + 解压一个文件 + + ![](figures/20220107-2073c9b3-0749-4d3d-a577-cf9467225d37.png) + + 如下数据库的数据保存的文件名格式为(数据库名\_日期时间.sql) + + 全局对象保存的文件名格式为(global\_data\_数据库名\_日期时间.sql) + + ![](figures/20220107-6c96183e-8ed2-4eac-840d-6de2b6c9e746.png) + + ![](figures/20220107-62164f26-2335-4465-ad23-47148ecae8a1.png) + + 查看数据库对应的数据备份 + + ``` + [omm@node1 2022-01-07-1634]$ vim ysla_2022-01-07-1634.sql + ``` + + ![](figures/20220107-ee45e332-8e56-4b07-a765-b1e5ce6df6b9.png) + + 查看数据库对应的全局对象备份 + + ``` + [omm@node1 2022-01-07-1634]$ vim global_data_ysla_2022-01-07-1634.sql + ``` + + ![](figures/20220107-7b9036fd-66fb-44ff-9ad6-61a878d5940b.png) + + +## 二、备份脚本加到crontab + +将如下一行加入crontab,让脚本每天凌晨12:30执行,这里的flock是为了防止一个脚本没执行完就到了这个脚本下一次执行的周期,可能会导致并发问题,严重时会导致出现脏数据性能瓶颈等恶性循环,所以使用flock建立排它锁 + +``` +-x 独占锁 -n 直接失败 如发现被锁如就失败不等待,除非解锁 +``` + +``` +30 00 * * * /usr/bin/flock -xn /tmp/test.lock -c 'sh /home/omm/c.sh >> c.log' +``` + +## 三、备份保留,清理 + +每天都进行备份,如果备份天数过多不清理,可能使目录打满,因此需要添加备份清理策略,我这里用的是crontab的方式,每天凌晨3:30清理过期数据。 + +如下一行加入到crontab里边,也是用了flock,这一行命令会找备份目录下的文件,排除掉隐藏文件,并把30天之前的.tar.gz文件删除。 + +``` +30 03 * * * /usr/bin/flock -xn /tmp/test1.lock -c "find /gaussdb/dump_dir -not -path '*/\.*' -mtime +30 -type f -name *.tar.gz -exec rm -rf {} \;" +``` + diff --git "a/content/zh/post/2022/MOGDB-openGauss\347\232\204txid_snapshot-\346\225\260\346\215\256\347\261\273\345\236\213\345\222\214\347\233\270\345\205\263\345\207\275\346\225\260.md" "b/content/zh/post/2022/MOGDB-openGauss\347\232\204txid_snapshot-\346\225\260\346\215\256\347\261\273\345\236\213\345\222\214\347\233\270\345\205\263\345\207\275\346\225\260.md" new file mode 100644 index 0000000000000000000000000000000000000000..361dc89bc030dfccaf0d39a100ddc60010de3543 --- /dev/null +++ "b/content/zh/post/2022/MOGDB-openGauss\347\232\204txid_snapshot-\346\225\260\346\215\256\347\261\273\345\236\213\345\222\214\347\233\270\345\205\263\345\207\275\346\225\260.md" @@ -0,0 +1,257 @@ ++++ + +title = "MOGDB/openGauss的txid snapshot 数据类型和相关函数" + +date = "2021-12-20" + +tags = [ "MOGDB/openGauss的txid snapshot 数据类型和相关函数"] + +archives = "2021-12" + +author = "阎书利" + +summary = "MOGDB/openGauss的txid snapshot 数据类型和相关函数" + +img = "/zh/post/2022/title/img5.png" + +times = "12:30" + ++++ + +# MOGDB/openGauss的txid\_snapshot 数据类型和相关函数 + +txid\_snapshot的文本表示为:xmin:xmax:xip\_list。 + +``` + 名称 描述 + xmin 最早的事务ID(txid)仍然活动。所有较早事务将是已经提交可见的,或者是直接回滚。 + xmax 作为尚未分配的txid。所有大于或等于此txids的都是尚未开始的快照时间,因此不可见。 + xip_list 当前快照中活动的txids。这个列表只包含在xmin和xmax之间活动的txids;有可能活动的txids高于xmax。 介于大于等于xmin、小于xmax,并且不在这个列表中的txid,在这个时间快照已经完成的,因此按照提交状态查看他是可见还是回滚。这个列表不包含子事务的txids。 +``` + +示例:10:20:10,13,15意思为:xmin=10, xmax=20, xip\_list=10, 13, 15。 + +测试如下: + +## 1.通过设置强制对临时对象使用COMMIT而不是2PC + +``` +SET enforce_two_phase_commit TO off; +``` + +## 2.正常案例演示 + +``` + postgres=# select '12:13:'::txid_snapshot; + ## txid_snapshot + 12:13: + (1 row) + + postgres=# select '12:18:14,16'::txid_snapshot; + ## txid_snapshot + 12:18:14,16 + (1 row) +``` + +## 3.错误案例演示 + +``` + postgres=# select '31:12:'::txid_snapshot; + ERROR: invalid input for txid_snapshot: "31:12:" + LINE 1: select '31:12:'::txid_snapshot; + ^ + CONTEXT: referenced column: txid_snapshot +------------------------------------------------------------------------------- + postgres=# select '0:1:'::txid_snapshot; + ERROR: invalid input for txid_snapshot: "0:1:" + LINE 1: select '0:1:'::txid_snapshot; + ^ + CONTEXT: referenced column: txid_snapshot +------------------------------------------------------------------------------- +postgres=# select '12:13:0'::txid_snapshot; + ERROR: invalid input for txid_snapshot: "12:13:0" + LINE 1: select '12:13:0'::txid_snapshot; + ^ + CONTEXT: referenced column: txid_snapshot +------------------------------------------------------------------------------- + postgres=# select '12:16:14,13'::txid_snapshot; + ERROR: invalid input for txid_snapshot: "12:16:14,13" + LINE 1: select '12:16:14,13'::txid_snapshot; + ^ + CONTEXT: referenced column: txid_snapshot +------------------------------------------------------------------------------- +postgres=# select '12:16:14,14'::txid_snapshot; + ERROR: invalid input for txid_snapshot: "12:16:14,14" + LINE 1: select '12:16:14,14'::txid_snapshot; + ^ + CONTEXT: referenced column: txid_snapshot +``` + +通过测试看出xmax应该大于xmin,不可为0,tixds应该按增序排列,且不为0,并且不能有重复的tixds,在使用的时候应当尽量避免。 + +## 4.创建测试表及测试数据导入 + +``` +postgres=# insert into snapshot_test values (4, '100:150:101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131'); +postgres=# create temp table snapshot_test(nr integer,snap txid_snapshot); + CREATE TABLE + postgres=# insert into snapshot_test values (1, '12:13:'); + INSERT 0 1 + postgres=# insert into snapshot_test values (2, '12:20:13,15,18'); + INSERT 0 1 + postgres=# insert into snapshot_test values (3, '100001:100009:100005,100007,100008'); + INSERT 0 1 + postgres=# insert into snapshot_test values (4, '100:150:101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131'); + INSERT 0 1 +``` + +查询数据情况: + +``` +postgres=# select snap from snapshot_test order by nr; + snap + ------------------------------------------------------------------------------------------------------- + ------------------------------ + 12:13: + 12:20:13,15,18 + 100001:100009:100005,100007,100008 + 100:150:101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,12 + 4,125,126,127,128,129,130,131 + (4 rows) +``` + +## 5.函数测试 + +txid\_snapshot\_xmin\(\)为会返回快照的xmin, + +txid\_snapshot\_xmax\(\)会返回快照的xmax, + +txid\_snapshot\_xip\(\)获取正在进行的事务ip,即txids。 + +``` +postgres=# select txid_snapshot_xmin(snap), + postgres-# txid_snapshot_xmax(snap), + postgres-# txid_snapshot_xip(snap) + postgres-# from snapshot_test order by nr, 1, 2, 3; + txid_snapshot_xmin | txid_snapshot_xmax | txid_snapshot_xip + --------------------+--------------------+------------------- + 12 | 20 | 13 + 12 | 20 | 15 + 12 | 20 | 18 + 100001 | 100009 | 100005 + 100001 | 100009 | 100007 + 100001 | 100009 | 100008 + 100 | 150 | 101 + 100 | 150 | 102 + 100 | 150 | 103 + 100 | 150 | 104 + 100 | 150 | 105 +``` + +txid\_visible\_in\_snapshot\(\)会查看在快照中事务ID是否可见\(不使用子事务ID\) + +``` +postgres=# select id, txid_visible_in_snapshot(id, snap) + postgres-# from snapshot_test, generate_series(11, 21) id + postgres-# where nr = 2; + id | txid_visible_in_snapshot + ----+-------------------------- + 11 | t + 12 | t + 13 | f + 14 | t + 15 | f + 16 | t + 17 | t + 18 | f + 19 | t + 20 | f + 21 | f + (11 rows) +``` + +## 6.其他测试 + +- 测试二分查找 + + ``` + postgres=# select id, txid_visible_in_snapshot(id, snap) + postgres-# from snapshot_test, generate_series(90, 160) id + postgres-# where nr = 4; + id | txid_visible_in_snapshot + -----+-------------------------- + 90 | t + 91 | t + 92 | t + 93 | t + 94 | t + 95 | t + 96 | t + 97 | t + 98 | t + 99 | t + 100 | t + 101 | f + ``` + + +- 测试当前值 + + ``` + postgres=# select txid_current() >= txid_snapshot_xmin(txid_current_snapshot()); + ## ?column? + t + (1 row) + ``` + + 我们不能假设当前值总是小于xmax + + ``` + postgres=# select txid_visible_in_snapshot(txid_current(), txid_current_snapshot()); + ## txid_visible_in_snapshot + f + (1 row) + ``` + + 测试64bitness(MOGDB/openGauss将transactionid由int32改为了int64,64位的xid永远不可能耗尽,虽然xid改为了64位,但是过期的xid依旧需要freeze清理,只是永远不用担心会发生xid回卷宕机的风险。 ) + + ``` + postgres=# select txid_snapshot '1000100010001000:1000100010001100:1000100010001012,1000100010001013'; + + ## txid_snapshot + 1000100010001000:1000100010001100:1000100010001012,1000100010001013 + (1 row) + + postgres=# select txid_visible_in_snapshot('1000100010001012', '1000100010001000:1000100010001100:1000100010001012,1000100010001013'); + + ## txid_visible_in_snapshot + + f + (1 row) + + + + postgres=# select txid_visible_in_snapshot('1000100010001015', '1000100010001000:1000100010001100:1000100010001012,1000100010001013'); + + ## txid_visible_in_snapshot + + t + (1 row) + ``` + + 测试溢出64bit,9223372036854775807是是263-1,是乘方 也就是63位的最大二进制数字 。 + + ``` + postgres=# SELECT txid_snapshot '1:9223372036854775807:3'; + ## txid_snapshot + 1:9223372036854775807:3 + (1 row) + + postgres=# SELECT txid_snapshot '1:9223372036854775808:3'; + ERROR: invalid input for txid_snapshot: "1:9223372036854775808:3" + LINE 1: SELECT txid_snapshot '1:9223372036854775808:3'; + ^ + CONTEXT: referenced column: txid_snapshot + ``` + + diff --git "a/content/zh/post/2022/MogDB-openGauss-\350\207\252\345\256\232\344\271\211snmptrapd\345\221\212\350\255\246\344\277\241\346\201\257.md" "b/content/zh/post/2022/MogDB-openGauss-\350\207\252\345\256\232\344\271\211snmptrapd\345\221\212\350\255\246\344\277\241\346\201\257.md" new file mode 100644 index 0000000000000000000000000000000000000000..b54366f0a0bfd7677cd2b01d4a5df08cbdd994c1 --- /dev/null +++ "b/content/zh/post/2022/MogDB-openGauss-\350\207\252\345\256\232\344\271\211snmptrapd\345\221\212\350\255\246\344\277\241\346\201\257.md" @@ -0,0 +1,78 @@ ++++ + +title = "MogDB/openGauss 自定义snmptrapd告警信息" + +date = "2022-01-06" + +tags = [ "MogDB/openGauss 自定义snmptrapd告警信息"] + +archives = "2022-01" + +author = "高云龙" + +summary = "MogDB/openGauss 自定义snmptrapd告警信息" + +img = "/zh/post/2022/title/img9.png" + +times = "12:30" + ++++ + +# MogDB/openGauss 自定义snmptrapd告警信息 + +在实际使用中,默认的报警规则信息并不能很好的满足snmp服务端的需求,需要定制化报警信息,这里以添加ip为例,看似一个简单的将IP一行信息单独在报警展示出来,涉及到的配置文件修改还是挺多的。 + +![](figures/20220106-03097507-8fce-424c-8c74-969e1fb06f16.png) + +## 修改prometheus.yml文件 + +首先需要修改prometheus.yml文件,在对应的实例下添加ip标签 + +![](figures/20220106-36068d2a-eccf-45ff-89df-c994c6331802.png) + +## 修改规则报警文件 + +对应的报警规则文件也同样需要修改,我这里使用的是服务器磁盘使用率做报警测试,所以直接修改node\_rules.yml文件,添加ip信息。 + +![](figures/20220106-a00fdef7-cefb-4775-bd75-f6bfb0952b8d.png) + +## 查看alertmanager web界面 + +现在我们就可以在alertmanager的界面上看到我们新添加的ip标签了,同时报警信息里也带了ip。 + +![](figures/20220106-fec3c37b-f253-4aa9-a986-96012785126a.png) + +## 修改snmp\_notifier模版 + +altermanager模块将报警消息推送到snmp\_notifier后,还需要需改snmp\_notifier的描述模版description-template.tpl。 + +![](figures/20220106-e39ed7ff-add2-4ef5-9b4a-45edddfe74ff.png) + +## snmptrapd服务器接受报警信息 + +![](figures/20220106-02524930-39ff-4c6d-898e-4070ab278009.png) + +## 添加额外的模版 + +默认情况下,snmptrapd只会输出三行模版信息,要添加额外的版本信息需要使用–snmp.extra-field-template参数,比如添加第4行模版则在snmp\_notifier启动时指定参数–snmp.extra-field-template=4=/opt/snmp\_notifier/extra-field-template.tpl,模版extra-field-template.tpl可以参考description-template.tpl的格式 + +``` +{{- if .Alerts -}} +{{- range $severity, $alerts := (groupAlertsByLabel .Alerts "severity") -}} +{{- range $index, $alert := $alerts }} +{{ $alert.Annotations.ip }} +{{ end }} +{{ end }} +{{- end -}} +``` + +## 启动snmp\_notifier + +``` +nohup /opt/snmp_notifier/snmp_notifier --snmp.trap-description-template=/opt/snmp_notifier/description-template.tpl --snmp.extra-field-template=4=/opt/snmp_notifier/extra-field-template.tpl > /opt/snmp_notifier/snmp_notifier.log 2>&1 & +``` + +## 再次查看snmptrapd服务器接受报警信息 + +![](figures/20220106-a510566b-e8dc-4b21-b5df-974e4bac5cd4.png) + diff --git "a/content/zh/post/2022/MogDB-openGauss\345\205\263\344\272\216PL-SQL\345\214\277\345\220\215\345\235\227\350\260\203\347\224\250\346\265\213\350\257\225.md" "b/content/zh/post/2022/MogDB-openGauss\345\205\263\344\272\216PL-SQL\345\214\277\345\220\215\345\235\227\350\260\203\347\224\250\346\265\213\350\257\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..33db45f9cbad61a9e69001a04da87484e72cd448 --- /dev/null +++ "b/content/zh/post/2022/MogDB-openGauss\345\205\263\344\272\216PL-SQL\345\214\277\345\220\215\345\235\227\350\260\203\347\224\250\346\265\213\350\257\225.md" @@ -0,0 +1,226 @@ ++++ + +title = "MogDB/openGauss关于PL/SQL匿名块调用测试" + +date = "2021-12-24" + +tags = [ "MogDB/openGauss关于PL/SQL匿名块调用测试"] + +archives = "2021-12" + +author = "lmj" + +summary = "MogDB/openGauss关于PL/SQL匿名块调用测试" + +img = "/zh/post/2022/title/img7.png" + +times = "12:30" + ++++ + +# MogDB/openGauss关于PL/SQL匿名块调用测试 + +## 一、原理介绍 + +PL/SQL\(Procedure Language/Structure Query Language\)是标准SQL语言添加了过程化功能的一门程序设计语言。 + +单一的SQL语句只能进行数据操作,没有流程控制,无法开发复杂的应用。PL/SQL语言是结合了结构化查询与数据库自身过程控制为一体的强大语言。 + +- 1.PL/SQL原理 + + PL/SQL是一种块结构的语言,它将一组语句放在一个块中,一次性发送给服务器。 + + PL/SQL引擎分析收到PL/SQL语句块中的内容,把其中的过程控制语句由PL/SQL引擎自身去执行,把PL/SQL块中的SQL语句交给服务器的SQL语句执行器执行。 + + PL/SQL块发送给服务器后,先被编译然后执行,对于有名称的PL/SQL块(如子程序)可以单独编译,永久的存储在数据库中,随时准备执行。 + + PL/SQL是一种块结构的语言,一个PL/SQL程序包含了一个或者多个逻辑块,逻辑块中可以声明变量,变量在使用之前必须先声明。 + +- 2.PL/SQL特点 + + –与SQL紧密结合 + + –支持面向对象编程 + + –更好的性能 + + –可移植性 + + –安全性 + +- 3.语法结构 + + 除了正常的执行程序外,PL/SQL还提供了专门的异常处理部分进行异常处理 + + ``` + [DECLARE + --declaration statements] ① + BEGIN + --executable statements ② + [EXCEPTION + --exception statements] ③ + END; + ``` + + **语法解析** + + ①声明部分:声明部分包含了变量和常量的定义。在此声明PL/SQL用到的变量,类型及游标,以及局部的存储过程和函数, + + 这个部分由关键字DECLARE开始,如果不声明变量或者常量,可以省略这部分。 + + ②执行部分:执行部分是 PL/SQL块的指令部分,由关键字BEGIN开始,关键字END结尾。 + + 所有的可执行PL/SQL语句都放在这一部分,该部分执行命令并操作变量。其他的PL/SQL块可以作为子块嵌套在该部分。 + + PL/SQL块的执行部分是必选的。注意END关键字后面用分号结尾。 + + ③异常处理部分:该部分是可选的,该部分用EXCEPTION关键字把可执行部分分成两个小部分,之前的程序是正常运行的程序, + + 一旦出现异常就跳转到异常部分执行。 + +- 4.PL/SQL语句块的类型 + + 1、匿名块 + + 2、命名块 + + –①procedure 存储过程 + + –②function 函数 + + –③package 包 + + –④trigger 触发器 + + 原本大家可能一提到PL/SQL就会想到ORACLE,ORACLE的PL/SQL很强大,它的匿名块调用以及有名块调用可以解决很多问题,在MOGDB/openGauss中,其实也有这样的功能,如下,是我针对MOGDB/openGauss匿名块的一些测试。 + + +## 二、匿名块测试 + +- 1.普通匿名块调用 + + ``` + openGauss=# create table t1(a int ,b text); + CREATE TABLE + + openGauss=# DECLARE + openGauss-# PRAGMA AUTONOMOUS_TRANSACTION; + openGauss-# BEGIN + openGauss$# raise notice 'Normal anonymous block printing.'; + openGauss$# insert into t1 values(1,'I am lmj!'); + openGauss$# END; + openGauss$# / + NOTICE: Normal anonymous block printing. + + ANONYMOUS BLOCK EXECUTE + openGauss=# select * from t1; + a | b + ---+----------- + 1 | I am lmj! + (1 row) + ``` + + +- 2.匿名块和事务影响 + + 启动一个事务后,执行一个自治事务匿名块,如果事务回滚,则匿名块不回滚。 + + ``` + openGauss=# truncate table t1; + TRUNCATE TABLE + + openGauss=# START TRANSACTION; + START TRANSACTION + openGauss=# DECLARE + openGauss-# PRAGMA AUTONOMOUS_TRANSACTION; + openGauss-# BEGIN + openGauss$# raise notice 'an autonomous transaction anonymous block.'; + openGauss$# insert into t1 values(1,'it will commit!'); + openGauss$# END; + openGauss$# / + NOTICE: an autonomous transaction anonymous block. + + ANONYMOUS BLOCK EXECUTE + openGauss=# insert into t1 values(1,'you will rollback!'); + INSERT 0 1 + openGauss=# rollback; + ROLLBACK + openGauss=# select * from t1; + a | b + ---+----------------- + 1 | it will commit! + (1 row) + ``` + +- 3.外部匿名块和内部匿名块 + + 其中外部匿名块是一个公共匿名块,而内部匿名块是一个自治事务匿名块,可以根据如下例子和第二个例子对比事务回滚和匿名块回滚 + + ``` + openGauss=# truncate table t1; + TRUNCATE TABLE + + openGauss=# DECLARE + openGauss-# BEGIN + openGauss$# DECLARE + openGauss$# PRAGMA AUTONOMOUS_TRANSACTION; + openGauss$# BEGIN + openGauss$# raise notice 'just use call.'; + openGauss$# insert into t1 values(1,'can you rollback!'); + openGauss$# END; + openGauss$# insert into t1 values(2,'I will rollback!'); + openGauss$# rollback; + openGauss$# END; + openGauss$# / + NOTICE: just use call. + ANONYMOUS BLOCK EXECUTE + openGauss=# select * from t1; + a | b + ---+--- + (0 rows) + ``` + +- 4.匿名块直接执行自治事务匿名块并引发异常 + + ``` + openGauss=# DECLARE + openGauss-# PRAGMA AUTONOMOUS_TRANSACTION; + openGauss-# res int := 0; + openGauss-# res2 int := 1; + openGauss-# BEGIN + openGauss$# raise notice 'just use call.'; + openGauss$# res2 = res2/res; + openGauss$# END; + openGauss$# / + NOTICE: just use call. + + ERROR: ERROR: division by zero + CONTEXT: PL/pgSQL function inline_code_block line 7 at assignment + ``` + + 匿名块执行错误,会报出异常 + +- 5.异常捕获 + + 在执行期间引发异常后,将捕获匿名块,如下所示,在执行错误后,抛出autonomous throw exception提示 + + ``` + openGauss=# DECLARE + openGauss-# PRAGMA AUTONOMOUS_TRANSACTION; + openGauss-# res int := 0; + openGauss-# res2 int := 1; + openGauss-# BEGIN + openGauss$# raise notice 'error catch.'; + openGauss$# res2 = res2/res; + openGauss$# EXCEPTION + openGauss$# WHEN division_by_zero THEN + openGauss$# raise notice 'autonomous throw exception.'; + openGauss$# END; + openGauss$# / + NOTICE: error catch. + + NOTICE: autonomous throw exception. + ANONYMOUS BLOCK EXECUTE + ``` + + diff --git "a/content/zh/post/2022/MogDB-opengauss\346\233\264\346\224\271\346\225\260\346\215\256\345\272\223\347\233\256\345\275\225\344\275\215\347\275\256(\345\273\272\350\256\256\346\265\213\350\257\225\347\216\257\345\242\203).md" "b/content/zh/post/2022/MogDB-opengauss\346\233\264\346\224\271\346\225\260\346\215\256\345\272\223\347\233\256\345\275\225\344\275\215\347\275\256(\345\273\272\350\256\256\346\265\213\350\257\225\347\216\257\345\242\203).md" new file mode 100644 index 0000000000000000000000000000000000000000..cbaf0164b0f3451423aabeaf93f0ecd1f49eeaf7 --- /dev/null +++ "b/content/zh/post/2022/MogDB-opengauss\346\233\264\346\224\271\346\225\260\346\215\256\345\272\223\347\233\256\345\275\225\344\275\215\347\275\256(\345\273\272\350\256\256\346\265\213\350\257\225\347\216\257\345\242\203).md" @@ -0,0 +1,102 @@ ++++ + +title = "MogDB/opengauss更改数据库目录位置建议测试环境" + +date = "2021-12-15" + +tags = [ "MogDB/opengauss更改数据库目录位置建议测试环境"] + +archives = "2021-12" + +author = "张凡" + +summary = "MogDB/opengauss更改数据库目录位置建议测试环境" + +img = "/zh/post/2022/title/img6.png" + +times = "12:30" + ++++ + +# MogDB/opengauss更改数据库目录位置\(建议测试环境\) + +有时我们部署完数据库,发现随着数据量的不断增加,数据目录所在的磁盘大小不能够满足我们的需求,需要更大的磁盘空间,这时选择重新部署数据库会很麻烦,之前所使用的数据库还需要重新导入,这里介绍将数据库目录更改到别的位置的方法,不建议生产环境使用,建议测试环境使用。 + +## 一、环境说明 + +``` +[root@node1 ~]# cat /etc/redhat-release +CentOS Linux release 7.6.1810 (Core) +``` + +## 二、查看数据目录位置 + +``` +[omm@node1 ~]$ gsql -d postgres -p26000 -r +gsql ((MogDB 2.0.1 build f892ccb7) compiled at 2021-07-09 16:12:59 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +postgres=# show data_directory ; + data_directory +----------------- + /opt/mogdb/data +(1 row) +``` + +## 三、更改数据库位置 + +``` +postgres=# alter system set data_directory='/opt/data'; ==>更改数据目录位置 +NOTICE: please restart the database for the POSTMASTER level parameter to take effect. +ALTER SYSTEM SET +postgres=# show data_directory ; ==>这个参数需要重启数据库生效 + data_directory +----------------- + /opt/mogdb/data +(1 row) + +postgres=# +``` + +## 四、停止数据库,拷贝数据目录 + +``` +[omm@node1 ~]$ gs_ctl stop -D /opt/mogdb/data/ ==>本机采取二进制方式部署,采用标准安装可的使用gs_om -t stop 停止 +[2021-12-15 16:05:07.505][22522][][gs_ctl]: gs_ctl stopped ,datadir is /opt/mogdb/data +waiting for server to shut down........ done +server stopped +[omm@node1 mogdb]$ mkdir -p /opt/data ==>创建新的数据目录 +[omm@node1 mogdb]$ cd /opt/data/ +[omm@node1 data]$ cp -r /opt/mogdb/data/* /opt/data/ ==>将原来数据目录的数据拷贝到新的数据目录下 +[omm@node1 data]$ chmod 0700 /opt/data ==>将新的数据目录赋予0700的权限,否则重启数据库会报错 +``` + +## 五、启动数据库,查看数据目录位置 + +``` +[omm@node1 data]$ gs_ctl start -D /opt/data +[2021-12-15 16:09:17.271][22740][][gs_ctl]: gs_ctl started,datadir is /opt/data +[2021-12-15 16:09:17.569][22740][][gs_ctl]: waiting for server to start... +.0 LOG: [Alarm Module]can not read GAUSS_WARNING_TYPE env. +....... +..... +.... +[2021-12-15 16:09:18.632][22740][][gs_ctl]: done +[2021-12-15 16:09:18.632][22740][][gs_ctl]: server started (/opt/data) +[omm@node1 data]$ gsql -d postgres -p26000 -r +gsql ((MogDB 2.0.1 build f892ccb7) compiled at 2021-07-09 16:12:59 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +postgres=# show data_directory ; ==>更改成功 + data_directory +---------------- + /opt/data +(1 row) +``` + +## 六、总结 + +以上是更改MogDB/opengauss数据库数据目录的方法,操作简单,不建议生产环境使用。 + diff --git "a/content/zh/post/2022/centos7-\345\256\211\350\243\205openGauss\346\236\201\347\256\200\347\211\210\346\234\254.md" "b/content/zh/post/2022/centos7-\345\256\211\350\243\205openGauss\346\236\201\347\256\200\347\211\210\346\234\254.md" new file mode 100644 index 0000000000000000000000000000000000000000..021d88a21bb39cc579d5d5e2cd43d23dcf47a251 --- /dev/null +++ "b/content/zh/post/2022/centos7-\345\256\211\350\243\205openGauss\346\236\201\347\256\200\347\211\210\346\234\254.md" @@ -0,0 +1,162 @@ ++++ + +title = "centos7 安装openGauss极简版本" + +date = "2021-12-14" + +tags = [ "centos7 安装openGauss极简版本"] + +archives = "2021-12" + +author = "雪狼sunny" + +summary = "centos7 安装openGauss极简版本" + +img = "/zh/post/2022/title/img3.png" + +times = "12:30" + ++++ + +# centos7 安装openGauss极简版本 + + 1 基础环境准备: + +- 系统: + + ``` + [root@bogon ~]# cat /etc/redhat-release + CentOS Linux release 7.9.2009 (Core) + [root@bogon ~]# + ``` + +- 相关软件: + + 安装netstat,由于centos7默认不带这个命令需要单独安装 + + ``` + yum install net-tools -y + ``` + + 安装bzip2 因为官方的安装包是openGauss-x.x.x-openEuler-64bit.tar.bz2 不安装这个软件直接解压命令报错 + + ``` + yum -y install bzip2 -y + ``` + + 修改内核的配置因为在你安装install命令的时候会出现如下报错 + + ``` + On systemwide basis, the maximum number of SEMMNI is not correct. the current SEMMNI value is: 128. Please check it. + ``` + + **解决:**在/etc/sysctl.conf中加入语句kernel.sem = 250 32000 100 999,然后执行sysctl -p + + 安装wget 命令用于下载openGauss的软件包 + + ``` + wget https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.1.0/x86/openGauss-2.1.0-CentOS-64bit.tar.bz2 + ``` + +- 关闭防火墙和selinux + + ``` + ## 关闭防火墙 + + systemctl status firewalld + + systemctl disable firewalld.service + + systemctl stop firewalld.service + + + ## 关闭SELinux + + sed -i '/SELINUX=/d' etc/selinux/config + + echo "SELINUX=disabled" >> /etc/selinux/config + + cat etc/selinux/config|grep -v ^#|grep -v '^$' + ``` + +- 输入这个命令selinux直接关闭不用重启 + + ``` + setenforce 0 + ``` + + +## 2 安装环境准备: + +``` +groupadd -g 1001 dbgrp +useradd -u 2001 -g dbgrp omm +mkdir -p /opt/software/openGauss +chown -R omm:dbgrp opt +``` + +- 切换omm用户安装 + + ``` + [root@db1 ~]# su - omm + [omm@db1 ~]$ cd /opt/software/openGauss/ + [omm@db1 openGauss]$ tar -jxf openGauss-2.1.0-CentOS-64bit.tar.bz2 -C /opt/software/openGauss/ + ``` + +- 安装: + + ``` + [omm@bogon ~]$ cd /opt/software/openGauss/simpleInstall/ + [omm@bogon simpleInstall]$ ls + finance.sql install.sh README.md school.sql + [omm@bogon simpleInstall]$ sh install.sh -w gauss#123 + - -w:初始化数据库密码(gs_initdb指定),安全需要必须设置。 + -p:指定的openGauss端口号,如不指定,默认为5432。 + -h|–help:打印使用说明。 + ``` + + 安装后,该数据库部署结点的名称为sgnode(gs\_initdb指定)。 + + 执行时,如果出现报错“the maximum number of SEMMNI is not correct, the current SEMMNI is xxx. Please check it.”,请使用有root权限的用户执行如下命令 。 + + 安装成功会出现如下界面: + + ![](figures/安装成功会出现如下界面.png) + +- 启动成功: + + ``` + [omm@bogon ~]$ gs_ctl start -D $GAUSSHOME/data/single_node -Z single_node + [2021-12-14 15:32:45.083][11887][][gs_ctl]: gs_ctl started,datadir is /opt/software/openGauss/data/single_node + [2021-12-14 15:32:45.089][11887][][gs_ctl]: another server might be running; Please use the restart command + [omm@bogon ~]$ gsql -d postgres -p -r + failed to connect Unknown:-r. + [omm@bogon ~]$ gsql -d postgres -p + gsql:选项需要一个参数 -- p + Try "gsql --help" for more information. + [omm@bogon ~]$ gsql -d postgres -p 5432 -r + gsql ((openGauss 2.1.0 build 590b0f8e) compiled at 2021-09-30 14:29:04 commit 0 last mr ) + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + + openGauss=# \l + List of databases + Name | Owner | Encoding | Collate | Ctype | Access privileges + -----------+-------+----------+-------------+-------------+------------------- + postgres | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | + template0 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/omm + + | | | | | omm=CTc/omm + template1 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/omm + + | | | | | omm=CTc/omm + (3 rows) + + openGauss=# \q + ``` + + +文档写的很粗糙,但是安装完使用完全没有问题。 + +参考连接:https://opengauss.org/zh/docs/2.1.0/docs/installation/%E5%8D%95%E8%8A%82%E7%82%B9%E5%AE%89%E8%A3%85.html + +opengauss + diff --git a/content/zh/post/2022/figures/0497eb639cb14b5182dc5b2aff97a757.png b/content/zh/post/2022/figures/0497eb639cb14b5182dc5b2aff97a757.png new file mode 100644 index 0000000000000000000000000000000000000000..c93afc07ec11bfe5abbec34781a1a3eb66c9ac17 Binary files /dev/null and b/content/zh/post/2022/figures/0497eb639cb14b5182dc5b2aff97a757.png differ diff --git a/content/zh/post/2022/figures/05476910e9e44c9fb0723d26b0f467f4.png b/content/zh/post/2022/figures/05476910e9e44c9fb0723d26b0f467f4.png new file mode 100644 index 0000000000000000000000000000000000000000..864e3b8fa9fd25d6932059fc55a8d11b81942ae4 Binary files /dev/null and b/content/zh/post/2022/figures/05476910e9e44c9fb0723d26b0f467f4.png differ diff --git a/content/zh/post/2022/figures/0bacb67d8b9d4ff6b786b2b734458b10.png b/content/zh/post/2022/figures/0bacb67d8b9d4ff6b786b2b734458b10.png new file mode 100644 index 0000000000000000000000000000000000000000..0d355ada50f4bcd081dbeb6b1cb4af7061f02187 Binary files /dev/null and b/content/zh/post/2022/figures/0bacb67d8b9d4ff6b786b2b734458b10.png differ diff --git a/content/zh/post/2022/figures/0feab0d29d324acc9c4e87ffc7a3e826.png b/content/zh/post/2022/figures/0feab0d29d324acc9c4e87ffc7a3e826.png new file mode 100644 index 0000000000000000000000000000000000000000..8971898569210edc1c45c92abb552934261c407a Binary files /dev/null and b/content/zh/post/2022/figures/0feab0d29d324acc9c4e87ffc7a3e826.png differ diff --git a/content/zh/post/2022/figures/128f20b65c554c85bbcda62acad5616e.png b/content/zh/post/2022/figures/128f20b65c554c85bbcda62acad5616e.png new file mode 100644 index 0000000000000000000000000000000000000000..7303cf2f06a86e61bb2fc3c9a2057cb04c826531 Binary files /dev/null and b/content/zh/post/2022/figures/128f20b65c554c85bbcda62acad5616e.png differ diff --git a/content/zh/post/2022/figures/17fb09d479354307b7e2a8b27cbd2f7e.png b/content/zh/post/2022/figures/17fb09d479354307b7e2a8b27cbd2f7e.png new file mode 100644 index 0000000000000000000000000000000000000000..f18a32d614e60fb688c2548479183c3234f62bd9 Binary files /dev/null and b/content/zh/post/2022/figures/17fb09d479354307b7e2a8b27cbd2f7e.png differ diff --git a/content/zh/post/2022/figures/1e185faf72d14f6bb07e527d753614ed.png b/content/zh/post/2022/figures/1e185faf72d14f6bb07e527d753614ed.png new file mode 100644 index 0000000000000000000000000000000000000000..43ff9ab8a42250d3894c24a05d70de149eafe466 Binary files /dev/null and b/content/zh/post/2022/figures/1e185faf72d14f6bb07e527d753614ed.png differ diff --git a/content/zh/post/2022/figures/1e1aea950edc44d99adc91c658a9e14a.png b/content/zh/post/2022/figures/1e1aea950edc44d99adc91c658a9e14a.png new file mode 100644 index 0000000000000000000000000000000000000000..838c524603941c7931801e6ed4f852b847381fc8 Binary files /dev/null and b/content/zh/post/2022/figures/1e1aea950edc44d99adc91c658a9e14a.png differ diff --git a/content/zh/post/2022/figures/20211015-225127-update.png b/content/zh/post/2022/figures/20211015-225127-update.png new file mode 100644 index 0000000000000000000000000000000000000000..481f898e5a451c2c1c502123a623cf0fddcc6a74 Binary files /dev/null and b/content/zh/post/2022/figures/20211015-225127-update.png differ diff --git a/content/zh/post/2022/figures/20211015-225510-fig-5-03.png b/content/zh/post/2022/figures/20211015-225510-fig-5-03.png new file mode 100644 index 0000000000000000000000000000000000000000..1d574e2e56d3cee1653f7b71038c3495c1fa32d0 Binary files /dev/null and b/content/zh/post/2022/figures/20211015-225510-fig-5-03.png differ diff --git a/content/zh/post/2022/figures/20211015-225511-fig-5-04.png b/content/zh/post/2022/figures/20211015-225511-fig-5-04.png new file mode 100644 index 0000000000000000000000000000000000000000..448edebc0ca928085083b41490c7f6ce5537c7d2 Binary files /dev/null and b/content/zh/post/2022/figures/20211015-225511-fig-5-04.png differ diff --git a/content/zh/post/2022/figures/20211015-225511-fig-5-05.png b/content/zh/post/2022/figures/20211015-225511-fig-5-05.png new file mode 100644 index 0000000000000000000000000000000000000000..ba5a91d3a741191fece09420d0e528d0b793661a Binary files /dev/null and b/content/zh/post/2022/figures/20211015-225511-fig-5-05.png differ diff --git a/content/zh/post/2022/figures/20211015-225511-fig-5-06.png b/content/zh/post/2022/figures/20211015-225511-fig-5-06.png new file mode 100644 index 0000000000000000000000000000000000000000..ec42630e6ab731d2cdff5c682c3b68f2858b5f41 Binary files /dev/null and b/content/zh/post/2022/figures/20211015-225511-fig-5-06.png differ diff --git a/content/zh/post/2022/figures/20211015-225512-72285f7db5051f38a7940e7f235f49df.png b/content/zh/post/2022/figures/20211015-225512-72285f7db5051f38a7940e7f235f49df.png new file mode 100644 index 0000000000000000000000000000000000000000..a31040b940a3ceb94c49369163d8032ce291b8a1 Binary files /dev/null and b/content/zh/post/2022/figures/20211015-225512-72285f7db5051f38a7940e7f235f49df.png differ diff --git a/content/zh/post/2022/figures/20211015-225512-d34f1a911a8804c0b1f8d791a65f175e.png b/content/zh/post/2022/figures/20211015-225512-d34f1a911a8804c0b1f8d791a65f175e.png new file mode 100644 index 0000000000000000000000000000000000000000..89b8d017aef8c37174214e17653996811cd43b77 Binary files /dev/null and b/content/zh/post/2022/figures/20211015-225512-d34f1a911a8804c0b1f8d791a65f175e.png differ diff --git a/content/zh/post/2022/figures/20211015-225513-64eaedd1d1501b104652b104bd3152b2.png b/content/zh/post/2022/figures/20211015-225513-64eaedd1d1501b104652b104bd3152b2.png new file mode 100644 index 0000000000000000000000000000000000000000..90415e2cefa209883e57263aa0a06970060faa41 Binary files /dev/null and b/content/zh/post/2022/figures/20211015-225513-64eaedd1d1501b104652b104bd3152b2.png differ diff --git a/content/zh/post/2022/figures/20211017-204222-dc83a9cc72803e849caa49dae027369f.png b/content/zh/post/2022/figures/20211017-204222-dc83a9cc72803e849caa49dae027369f.png new file mode 100644 index 0000000000000000000000000000000000000000..ce18bb6706250da0e6adabdf825ab2ada4cf242f Binary files /dev/null and b/content/zh/post/2022/figures/20211017-204222-dc83a9cc72803e849caa49dae027369f.png differ diff --git a/content/zh/post/2022/figures/20211017-210839-v2-58a3a0df18e1a92b9cc209036fb149ab_b.jpg b/content/zh/post/2022/figures/20211017-210839-v2-58a3a0df18e1a92b9cc209036fb149ab_b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2eb470c3661d574785df9ae4c30d4c0c6c808097 Binary files /dev/null and b/content/zh/post/2022/figures/20211017-210839-v2-58a3a0df18e1a92b9cc209036fb149ab_b.jpg differ diff --git a/content/zh/post/2022/figures/20211203-7294cdd5-5b8a-41dd-9558-468c56d0e49d.png b/content/zh/post/2022/figures/20211203-7294cdd5-5b8a-41dd-9558-468c56d0e49d.png new file mode 100644 index 0000000000000000000000000000000000000000..6297ff5616df65adac566d3353e0fde2baa6b1aa Binary files /dev/null and b/content/zh/post/2022/figures/20211203-7294cdd5-5b8a-41dd-9558-468c56d0e49d.png differ diff --git a/content/zh/post/2022/figures/20211203-8632d683-5aa7-4e1f-907c-3952796968f4.png b/content/zh/post/2022/figures/20211203-8632d683-5aa7-4e1f-907c-3952796968f4.png new file mode 100644 index 0000000000000000000000000000000000000000..d16ccb9c68401d34c58af39b0a07bdf898d402fe Binary files /dev/null and b/content/zh/post/2022/figures/20211203-8632d683-5aa7-4e1f-907c-3952796968f4.png differ diff --git a/content/zh/post/2022/figures/20211204-00e0901d-e71f-46d3-95ed-9e14cb28b1ac.png b/content/zh/post/2022/figures/20211204-00e0901d-e71f-46d3-95ed-9e14cb28b1ac.png new file mode 100644 index 0000000000000000000000000000000000000000..4babb78798c586c2bd1305d1a7b5d31731623c7c Binary files /dev/null and b/content/zh/post/2022/figures/20211204-00e0901d-e71f-46d3-95ed-9e14cb28b1ac.png differ diff --git a/content/zh/post/2022/figures/20211204-10f40098-2578-4da8-83c9-dd493f7d3111.png b/content/zh/post/2022/figures/20211204-10f40098-2578-4da8-83c9-dd493f7d3111.png new file mode 100644 index 0000000000000000000000000000000000000000..0eca84c53c45a78748b2ac30441d3c2ae0a0625e Binary files /dev/null and b/content/zh/post/2022/figures/20211204-10f40098-2578-4da8-83c9-dd493f7d3111.png differ diff --git a/content/zh/post/2022/figures/20211204-17ff081b-5a00-4c19-974a-69a531902983.png b/content/zh/post/2022/figures/20211204-17ff081b-5a00-4c19-974a-69a531902983.png new file mode 100644 index 0000000000000000000000000000000000000000..7d1317b2efd2630911aa1b6cae4f05c017440437 Binary files /dev/null and b/content/zh/post/2022/figures/20211204-17ff081b-5a00-4c19-974a-69a531902983.png differ diff --git a/content/zh/post/2022/figures/20211204-183e159b-ef0f-4134-b134-71f99ba6e89a.png b/content/zh/post/2022/figures/20211204-183e159b-ef0f-4134-b134-71f99ba6e89a.png new file mode 100644 index 0000000000000000000000000000000000000000..a604acac42f8692befc2b70a6314a4fd1f6177ed Binary files /dev/null and b/content/zh/post/2022/figures/20211204-183e159b-ef0f-4134-b134-71f99ba6e89a.png differ diff --git a/content/zh/post/2022/figures/20211204-25c40a97-f135-48be-af18-f1fe9986db5b.png b/content/zh/post/2022/figures/20211204-25c40a97-f135-48be-af18-f1fe9986db5b.png new file mode 100644 index 0000000000000000000000000000000000000000..b789b3c71b71b411361b06b72b11cba51b752cb8 Binary files /dev/null and b/content/zh/post/2022/figures/20211204-25c40a97-f135-48be-af18-f1fe9986db5b.png differ diff --git a/content/zh/post/2022/figures/20211204-32f1f188-106d-4627-8b7c-c939ddcb1c59.png b/content/zh/post/2022/figures/20211204-32f1f188-106d-4627-8b7c-c939ddcb1c59.png new file mode 100644 index 0000000000000000000000000000000000000000..f315d94a03a2ebb05fd0d3c91c5530385b98a7a0 Binary files /dev/null and b/content/zh/post/2022/figures/20211204-32f1f188-106d-4627-8b7c-c939ddcb1c59.png differ diff --git a/content/zh/post/2022/figures/20211204-41c59db9-f61d-4dae-b29d-7036223ba567.png b/content/zh/post/2022/figures/20211204-41c59db9-f61d-4dae-b29d-7036223ba567.png new file mode 100644 index 0000000000000000000000000000000000000000..487c06b27ef1fe62cb3c40c4245821e8c9b3430a Binary files /dev/null and b/content/zh/post/2022/figures/20211204-41c59db9-f61d-4dae-b29d-7036223ba567.png differ diff --git a/content/zh/post/2022/figures/20211204-7e5f33ac-8420-463d-9639-f67586ad76ed.png b/content/zh/post/2022/figures/20211204-7e5f33ac-8420-463d-9639-f67586ad76ed.png new file mode 100644 index 0000000000000000000000000000000000000000..0bb2ba7ea4b90d596a24c7689585895c01adddd1 Binary files /dev/null and b/content/zh/post/2022/figures/20211204-7e5f33ac-8420-463d-9639-f67586ad76ed.png differ diff --git a/content/zh/post/2022/figures/20211204-a73fa928-786e-406e-8289-c87c4275ab5f.png b/content/zh/post/2022/figures/20211204-a73fa928-786e-406e-8289-c87c4275ab5f.png new file mode 100644 index 0000000000000000000000000000000000000000..8305a75178d74b1c68fe0780afedb5feef90083a Binary files /dev/null and b/content/zh/post/2022/figures/20211204-a73fa928-786e-406e-8289-c87c4275ab5f.png differ diff --git a/content/zh/post/2022/figures/20211204-ae369c99-359e-419f-a4c2-9dba1f855cd5.png b/content/zh/post/2022/figures/20211204-ae369c99-359e-419f-a4c2-9dba1f855cd5.png new file mode 100644 index 0000000000000000000000000000000000000000..1fa6d5fd5a8424d17193f8e3b3b3d27220d83043 Binary files /dev/null and b/content/zh/post/2022/figures/20211204-ae369c99-359e-419f-a4c2-9dba1f855cd5.png differ diff --git a/content/zh/post/2022/figures/20211204-aec67dd0-2b24-4f75-8d74-9ea4b2a22edd.png b/content/zh/post/2022/figures/20211204-aec67dd0-2b24-4f75-8d74-9ea4b2a22edd.png new file mode 100644 index 0000000000000000000000000000000000000000..1dac8aa838a69cb66a31b4c373562e280854139f Binary files /dev/null and b/content/zh/post/2022/figures/20211204-aec67dd0-2b24-4f75-8d74-9ea4b2a22edd.png differ diff --git a/content/zh/post/2022/figures/20211204-b6e374da-906c-4f47-bc31-96f0ca3037fa.png b/content/zh/post/2022/figures/20211204-b6e374da-906c-4f47-bc31-96f0ca3037fa.png new file mode 100644 index 0000000000000000000000000000000000000000..e428d5b76562c8a83be7dbc84548bc7801b89a69 Binary files /dev/null and b/content/zh/post/2022/figures/20211204-b6e374da-906c-4f47-bc31-96f0ca3037fa.png differ diff --git a/content/zh/post/2022/figures/20211204-ba7b78a2-3978-45b4-b868-61334e4087f2.png b/content/zh/post/2022/figures/20211204-ba7b78a2-3978-45b4-b868-61334e4087f2.png new file mode 100644 index 0000000000000000000000000000000000000000..75a4863733bb6105fbfab011fdd7e5b1fe1d5c72 Binary files /dev/null and b/content/zh/post/2022/figures/20211204-ba7b78a2-3978-45b4-b868-61334e4087f2.png differ diff --git a/content/zh/post/2022/figures/20211204-c0cfe4c4-d76b-4a8c-bd04-7a2f81f603a6.png b/content/zh/post/2022/figures/20211204-c0cfe4c4-d76b-4a8c-bd04-7a2f81f603a6.png new file mode 100644 index 0000000000000000000000000000000000000000..3a39eb7614216719a8d22fd4b5e28d7176103804 Binary files /dev/null and b/content/zh/post/2022/figures/20211204-c0cfe4c4-d76b-4a8c-bd04-7a2f81f603a6.png differ diff --git a/content/zh/post/2022/figures/20211204-c6e730da-357c-4f03-92d7-95d47f015284.png b/content/zh/post/2022/figures/20211204-c6e730da-357c-4f03-92d7-95d47f015284.png new file mode 100644 index 0000000000000000000000000000000000000000..ac85a5955ef6ad9191312a316a794268f2412fef Binary files /dev/null and b/content/zh/post/2022/figures/20211204-c6e730da-357c-4f03-92d7-95d47f015284.png differ diff --git a/content/zh/post/2022/figures/20211204-c8674984-9927-4b9d-bdde-fb9725ea88ee.png b/content/zh/post/2022/figures/20211204-c8674984-9927-4b9d-bdde-fb9725ea88ee.png new file mode 100644 index 0000000000000000000000000000000000000000..48ade05b02fa82a89e09b316994df74fcb82baca Binary files /dev/null and b/content/zh/post/2022/figures/20211204-c8674984-9927-4b9d-bdde-fb9725ea88ee.png differ diff --git a/content/zh/post/2022/figures/20211204-cf9d6243-d31c-4e37-aa26-953e2822e0c1.png b/content/zh/post/2022/figures/20211204-cf9d6243-d31c-4e37-aa26-953e2822e0c1.png new file mode 100644 index 0000000000000000000000000000000000000000..c84230d44dba9cc68f2c5fd6e69ec5aff2cb5912 Binary files /dev/null and b/content/zh/post/2022/figures/20211204-cf9d6243-d31c-4e37-aa26-953e2822e0c1.png differ diff --git a/content/zh/post/2022/figures/20211204-cfc47e9a-4272-48e2-9fba-ab5a17c9b323.png b/content/zh/post/2022/figures/20211204-cfc47e9a-4272-48e2-9fba-ab5a17c9b323.png new file mode 100644 index 0000000000000000000000000000000000000000..a9ca333867019fe50cb57edf09a2af4926fb3ef7 Binary files /dev/null and b/content/zh/post/2022/figures/20211204-cfc47e9a-4272-48e2-9fba-ab5a17c9b323.png differ diff --git a/content/zh/post/2022/figures/20211204-eb905549-76da-4976-aaa6-dfef16877d00.png b/content/zh/post/2022/figures/20211204-eb905549-76da-4976-aaa6-dfef16877d00.png new file mode 100644 index 0000000000000000000000000000000000000000..dd724e0d88a9bef64bd751b6002aeda6152169e3 Binary files /dev/null and b/content/zh/post/2022/figures/20211204-eb905549-76da-4976-aaa6-dfef16877d00.png differ diff --git a/content/zh/post/2022/figures/20211204-ec617df5-639c-43a2-a45e-5d84738909c5.png b/content/zh/post/2022/figures/20211204-ec617df5-639c-43a2-a45e-5d84738909c5.png new file mode 100644 index 0000000000000000000000000000000000000000..be9ec6d3d2d82baad20fef36377952702c579549 Binary files /dev/null and b/content/zh/post/2022/figures/20211204-ec617df5-639c-43a2-a45e-5d84738909c5.png differ diff --git a/content/zh/post/2022/figures/20211204-f08b84a5-2be7-4bc4-826a-397c9ad77d79.png b/content/zh/post/2022/figures/20211204-f08b84a5-2be7-4bc4-826a-397c9ad77d79.png new file mode 100644 index 0000000000000000000000000000000000000000..e06701ead547264e1127a5d42b26dd2c0c1fa35e Binary files /dev/null and b/content/zh/post/2022/figures/20211204-f08b84a5-2be7-4bc4-826a-397c9ad77d79.png differ diff --git a/content/zh/post/2022/figures/20211204-fc1c14b8-f666-4600-b21e-b73aec582740.png b/content/zh/post/2022/figures/20211204-fc1c14b8-f666-4600-b21e-b73aec582740.png new file mode 100644 index 0000000000000000000000000000000000000000..5a87c69729318bfa7ddc75ddab30207bb2e02e09 Binary files /dev/null and b/content/zh/post/2022/figures/20211204-fc1c14b8-f666-4600-b21e-b73aec582740.png differ diff --git a/content/zh/post/2022/figures/20211204-ffad91b6-007a-441c-8af8-835a9c0e0597.png b/content/zh/post/2022/figures/20211204-ffad91b6-007a-441c-8af8-835a9c0e0597.png new file mode 100644 index 0000000000000000000000000000000000000000..7bd74a94450fa999a7c30d94b030df1f1284d101 Binary files /dev/null and b/content/zh/post/2022/figures/20211204-ffad91b6-007a-441c-8af8-835a9c0e0597.png differ diff --git a/content/zh/post/2022/figures/20211216-05611555-f74d-47d5-8057-a86a6fd5e38f.png b/content/zh/post/2022/figures/20211216-05611555-f74d-47d5-8057-a86a6fd5e38f.png new file mode 100644 index 0000000000000000000000000000000000000000..70bdc3f06fc1d17ee17642c4866ac81dfe8d2e8d Binary files /dev/null and b/content/zh/post/2022/figures/20211216-05611555-f74d-47d5-8057-a86a6fd5e38f.png differ diff --git a/content/zh/post/2022/figures/20211216-2e9cd439-b92e-4fcd-8180-ef7096c80a16.png b/content/zh/post/2022/figures/20211216-2e9cd439-b92e-4fcd-8180-ef7096c80a16.png new file mode 100644 index 0000000000000000000000000000000000000000..54f565eafb318182382b5b0ae491739477b244d8 Binary files /dev/null and b/content/zh/post/2022/figures/20211216-2e9cd439-b92e-4fcd-8180-ef7096c80a16.png differ diff --git a/content/zh/post/2022/figures/20211216-b9c6b9ce-6a77-4ce0-a064-291015801db2.png b/content/zh/post/2022/figures/20211216-b9c6b9ce-6a77-4ce0-a064-291015801db2.png new file mode 100644 index 0000000000000000000000000000000000000000..ae10b7c699123e524d996fb2475763192a8b6517 Binary files /dev/null and b/content/zh/post/2022/figures/20211216-b9c6b9ce-6a77-4ce0-a064-291015801db2.png differ diff --git a/content/zh/post/2022/figures/20211216-cd0ca2d6-dd3c-41d5-9643-775edc3e9035.png b/content/zh/post/2022/figures/20211216-cd0ca2d6-dd3c-41d5-9643-775edc3e9035.png new file mode 100644 index 0000000000000000000000000000000000000000..b0efe89242bc30ca5904b71d3eff765eecffd614 Binary files /dev/null and b/content/zh/post/2022/figures/20211216-cd0ca2d6-dd3c-41d5-9643-775edc3e9035.png differ diff --git a/content/zh/post/2022/figures/20211223-01cf061e-a19f-4516-9ddf-d38eb5bbbc86.png b/content/zh/post/2022/figures/20211223-01cf061e-a19f-4516-9ddf-d38eb5bbbc86.png new file mode 100644 index 0000000000000000000000000000000000000000..ed878049a36441975138595a7ab0079c4baff4d2 Binary files /dev/null and b/content/zh/post/2022/figures/20211223-01cf061e-a19f-4516-9ddf-d38eb5bbbc86.png differ diff --git a/content/zh/post/2022/figures/20211223-453c2df5-151d-4333-a812-732e1a32313b.png b/content/zh/post/2022/figures/20211223-453c2df5-151d-4333-a812-732e1a32313b.png new file mode 100644 index 0000000000000000000000000000000000000000..e258154c1d0b52f79be87fd4570da1bb1721657a Binary files /dev/null and b/content/zh/post/2022/figures/20211223-453c2df5-151d-4333-a812-732e1a32313b.png differ diff --git a/content/zh/post/2022/figures/20211223-60e81928-181c-4964-b0ec-abdd2acc7da7.png b/content/zh/post/2022/figures/20211223-60e81928-181c-4964-b0ec-abdd2acc7da7.png new file mode 100644 index 0000000000000000000000000000000000000000..a43926f0230c4ba8a6523e5429e0dda4e5885908 Binary files /dev/null and b/content/zh/post/2022/figures/20211223-60e81928-181c-4964-b0ec-abdd2acc7da7.png differ diff --git a/content/zh/post/2022/figures/20211223-7afbf443-21c5-4855-8ed7-c264abaf9ff0.png b/content/zh/post/2022/figures/20211223-7afbf443-21c5-4855-8ed7-c264abaf9ff0.png new file mode 100644 index 0000000000000000000000000000000000000000..4fc27ca5c1058a159d5cd750b934ca1c468dd057 Binary files /dev/null and b/content/zh/post/2022/figures/20211223-7afbf443-21c5-4855-8ed7-c264abaf9ff0.png differ diff --git a/content/zh/post/2022/figures/20211223-83e9cf25-6bbc-4e0e-a24d-963d9050ae73.png b/content/zh/post/2022/figures/20211223-83e9cf25-6bbc-4e0e-a24d-963d9050ae73.png new file mode 100644 index 0000000000000000000000000000000000000000..727b87b2bbfaed3e445b8ac80a90d79681e94390 Binary files /dev/null and b/content/zh/post/2022/figures/20211223-83e9cf25-6bbc-4e0e-a24d-963d9050ae73.png differ diff --git a/content/zh/post/2022/figures/20211223-8c6710da-e8ba-4c22-a1dd-dc76ecaec07a.png b/content/zh/post/2022/figures/20211223-8c6710da-e8ba-4c22-a1dd-dc76ecaec07a.png new file mode 100644 index 0000000000000000000000000000000000000000..0b280a7401e075154afd043494ad3be826854d0a Binary files /dev/null and b/content/zh/post/2022/figures/20211223-8c6710da-e8ba-4c22-a1dd-dc76ecaec07a.png differ diff --git a/content/zh/post/2022/figures/20211223-8e28c064-237c-4c48-8d6d-7498b11f1c3b.png b/content/zh/post/2022/figures/20211223-8e28c064-237c-4c48-8d6d-7498b11f1c3b.png new file mode 100644 index 0000000000000000000000000000000000000000..d98e87e855ed813ce9b1dc511e7eefb14681f5f7 Binary files /dev/null and b/content/zh/post/2022/figures/20211223-8e28c064-237c-4c48-8d6d-7498b11f1c3b.png differ diff --git a/content/zh/post/2022/figures/20211223-92cb0889-6352-4ae6-a73f-1ec772e8a730.png b/content/zh/post/2022/figures/20211223-92cb0889-6352-4ae6-a73f-1ec772e8a730.png new file mode 100644 index 0000000000000000000000000000000000000000..2801ceb245952612ff8d74ab6b0f3320fa4bdce9 Binary files /dev/null and b/content/zh/post/2022/figures/20211223-92cb0889-6352-4ae6-a73f-1ec772e8a730.png differ diff --git a/content/zh/post/2022/figures/20211223-9c55c807-e30b-44a9-8810-4d2b70db10a9.png b/content/zh/post/2022/figures/20211223-9c55c807-e30b-44a9-8810-4d2b70db10a9.png new file mode 100644 index 0000000000000000000000000000000000000000..5b63821ac2a1bc0c7f714290fc67b9c826221a77 Binary files /dev/null and b/content/zh/post/2022/figures/20211223-9c55c807-e30b-44a9-8810-4d2b70db10a9.png differ diff --git a/content/zh/post/2022/figures/20211223-ae44972c-4cc6-49b7-94c5-5b507039a686.png b/content/zh/post/2022/figures/20211223-ae44972c-4cc6-49b7-94c5-5b507039a686.png new file mode 100644 index 0000000000000000000000000000000000000000..fad7bb7c8ea49982827d5d58f4f4e380147a0f03 Binary files /dev/null and b/content/zh/post/2022/figures/20211223-ae44972c-4cc6-49b7-94c5-5b507039a686.png differ diff --git a/content/zh/post/2022/figures/20211223-c49e9596-383a-41c4-8057-77cdfd9e8f5e.png b/content/zh/post/2022/figures/20211223-c49e9596-383a-41c4-8057-77cdfd9e8f5e.png new file mode 100644 index 0000000000000000000000000000000000000000..3c9a607823ebe007dc21c165618f1d321f48df73 Binary files /dev/null and b/content/zh/post/2022/figures/20211223-c49e9596-383a-41c4-8057-77cdfd9e8f5e.png differ diff --git a/content/zh/post/2022/figures/20211223-ef70cfd4-da07-4c1d-aabe-cc867cedbc80.png b/content/zh/post/2022/figures/20211223-ef70cfd4-da07-4c1d-aabe-cc867cedbc80.png new file mode 100644 index 0000000000000000000000000000000000000000..31ffa5881dc342ca0466ded20706002b17b637ab Binary files /dev/null and b/content/zh/post/2022/figures/20211223-ef70cfd4-da07-4c1d-aabe-cc867cedbc80.png differ diff --git a/content/zh/post/2022/figures/20220106-02524930-39ff-4c6d-898e-4070ab278009.png b/content/zh/post/2022/figures/20220106-02524930-39ff-4c6d-898e-4070ab278009.png new file mode 100644 index 0000000000000000000000000000000000000000..aebbd447a1e0820bdc56d66313358a0b9961a20f Binary files /dev/null and b/content/zh/post/2022/figures/20220106-02524930-39ff-4c6d-898e-4070ab278009.png differ diff --git a/content/zh/post/2022/figures/20220106-03097507-8fce-424c-8c74-969e1fb06f16.png b/content/zh/post/2022/figures/20220106-03097507-8fce-424c-8c74-969e1fb06f16.png new file mode 100644 index 0000000000000000000000000000000000000000..09afd5367d5e1a8e05ea157143908b81c0a5cd69 Binary files /dev/null and b/content/zh/post/2022/figures/20220106-03097507-8fce-424c-8c74-969e1fb06f16.png differ diff --git a/content/zh/post/2022/figures/20220106-36068d2a-eccf-45ff-89df-c994c6331802.png b/content/zh/post/2022/figures/20220106-36068d2a-eccf-45ff-89df-c994c6331802.png new file mode 100644 index 0000000000000000000000000000000000000000..f532a04bf18ba67f375e04f67e060fe2a5de1ef9 Binary files /dev/null and b/content/zh/post/2022/figures/20220106-36068d2a-eccf-45ff-89df-c994c6331802.png differ diff --git a/content/zh/post/2022/figures/20220106-a00fdef7-cefb-4775-bd75-f6bfb0952b8d.png b/content/zh/post/2022/figures/20220106-a00fdef7-cefb-4775-bd75-f6bfb0952b8d.png new file mode 100644 index 0000000000000000000000000000000000000000..b57fbe34f283ebc026f154c4e1b8719742d92690 Binary files /dev/null and b/content/zh/post/2022/figures/20220106-a00fdef7-cefb-4775-bd75-f6bfb0952b8d.png differ diff --git a/content/zh/post/2022/figures/20220106-a510566b-e8dc-4b21-b5df-974e4bac5cd4.png b/content/zh/post/2022/figures/20220106-a510566b-e8dc-4b21-b5df-974e4bac5cd4.png new file mode 100644 index 0000000000000000000000000000000000000000..d385047ab0c808c0287c97cdd9ebbbdd1fc51735 Binary files /dev/null and b/content/zh/post/2022/figures/20220106-a510566b-e8dc-4b21-b5df-974e4bac5cd4.png differ diff --git a/content/zh/post/2022/figures/20220106-e39ed7ff-add2-4ef5-9b4a-45edddfe74ff.png b/content/zh/post/2022/figures/20220106-e39ed7ff-add2-4ef5-9b4a-45edddfe74ff.png new file mode 100644 index 0000000000000000000000000000000000000000..cafc81772fe9e11227290703a20f6bc9f1738846 Binary files /dev/null and b/content/zh/post/2022/figures/20220106-e39ed7ff-add2-4ef5-9b4a-45edddfe74ff.png differ diff --git a/content/zh/post/2022/figures/20220106-fec3c37b-f253-4aa9-a986-96012785126a.png b/content/zh/post/2022/figures/20220106-fec3c37b-f253-4aa9-a986-96012785126a.png new file mode 100644 index 0000000000000000000000000000000000000000..5d29f51e53a200a7107138126c21fea57af716ae Binary files /dev/null and b/content/zh/post/2022/figures/20220106-fec3c37b-f253-4aa9-a986-96012785126a.png differ diff --git a/content/zh/post/2022/figures/20220107-154aa7ec-6a24-41aa-8fe4-0eee137d0982.png b/content/zh/post/2022/figures/20220107-154aa7ec-6a24-41aa-8fe4-0eee137d0982.png new file mode 100644 index 0000000000000000000000000000000000000000..c5b803e5863889bc79fa9c2651c4742c4bc40191 Binary files /dev/null and b/content/zh/post/2022/figures/20220107-154aa7ec-6a24-41aa-8fe4-0eee137d0982.png differ diff --git a/content/zh/post/2022/figures/20220107-2073c9b3-0749-4d3d-a577-cf9467225d37.png b/content/zh/post/2022/figures/20220107-2073c9b3-0749-4d3d-a577-cf9467225d37.png new file mode 100644 index 0000000000000000000000000000000000000000..997aad3e04ca89b0e750bfb5e2050ee0d95df7e4 Binary files /dev/null and b/content/zh/post/2022/figures/20220107-2073c9b3-0749-4d3d-a577-cf9467225d37.png differ diff --git a/content/zh/post/2022/figures/20220107-39368262-8b82-4c5d-973d-c268dab99042.png b/content/zh/post/2022/figures/20220107-39368262-8b82-4c5d-973d-c268dab99042.png new file mode 100644 index 0000000000000000000000000000000000000000..4a6ebf97eabe536833d4afbc63770db130de9e89 Binary files /dev/null and b/content/zh/post/2022/figures/20220107-39368262-8b82-4c5d-973d-c268dab99042.png differ diff --git a/content/zh/post/2022/figures/20220107-62164f26-2335-4465-ad23-47148ecae8a1.png b/content/zh/post/2022/figures/20220107-62164f26-2335-4465-ad23-47148ecae8a1.png new file mode 100644 index 0000000000000000000000000000000000000000..f9d2972e242530ecda89e2c1cc80d54897cb45c3 Binary files /dev/null and b/content/zh/post/2022/figures/20220107-62164f26-2335-4465-ad23-47148ecae8a1.png differ diff --git a/content/zh/post/2022/figures/20220107-6c96183e-8ed2-4eac-840d-6de2b6c9e746.png b/content/zh/post/2022/figures/20220107-6c96183e-8ed2-4eac-840d-6de2b6c9e746.png new file mode 100644 index 0000000000000000000000000000000000000000..517cae679afecb33b52b338e7e22834a9d2613ab Binary files /dev/null and b/content/zh/post/2022/figures/20220107-6c96183e-8ed2-4eac-840d-6de2b6c9e746.png differ diff --git a/content/zh/post/2022/figures/20220107-7b9036fd-66fb-44ff-9ad6-61a878d5940b.png b/content/zh/post/2022/figures/20220107-7b9036fd-66fb-44ff-9ad6-61a878d5940b.png new file mode 100644 index 0000000000000000000000000000000000000000..7ebb2bed99bdac96204c2cf30ce6f5b21ae87376 Binary files /dev/null and b/content/zh/post/2022/figures/20220107-7b9036fd-66fb-44ff-9ad6-61a878d5940b.png differ diff --git a/content/zh/post/2022/figures/20220107-ee45e332-8e56-4b07-a765-b1e5ce6df6b9.png b/content/zh/post/2022/figures/20220107-ee45e332-8e56-4b07-a765-b1e5ce6df6b9.png new file mode 100644 index 0000000000000000000000000000000000000000..d2651489047259b818fae49599920ae17522208f Binary files /dev/null and b/content/zh/post/2022/figures/20220107-ee45e332-8e56-4b07-a765-b1e5ce6df6b9.png differ diff --git a/content/zh/post/2022/figures/22b37a0e95ea4472b4d331064192382c.png b/content/zh/post/2022/figures/22b37a0e95ea4472b4d331064192382c.png new file mode 100644 index 0000000000000000000000000000000000000000..972721176606947baac4e685f09068ea3506520a Binary files /dev/null and b/content/zh/post/2022/figures/22b37a0e95ea4472b4d331064192382c.png differ diff --git a/content/zh/post/2022/figures/2775a3f24eb44c02931d63e302a4bf9c.png b/content/zh/post/2022/figures/2775a3f24eb44c02931d63e302a4bf9c.png new file mode 100644 index 0000000000000000000000000000000000000000..250d290fc508170e1ac9723bb4cf6088d100efbb Binary files /dev/null and b/content/zh/post/2022/figures/2775a3f24eb44c02931d63e302a4bf9c.png differ diff --git a/content/zh/post/2022/figures/27b944a22e1d45b39a0167b83e4d55a0.png b/content/zh/post/2022/figures/27b944a22e1d45b39a0167b83e4d55a0.png new file mode 100644 index 0000000000000000000000000000000000000000..3eb015ad3b77e040959a2e3e0db25dcb135393c9 Binary files /dev/null and b/content/zh/post/2022/figures/27b944a22e1d45b39a0167b83e4d55a0.png differ diff --git a/content/zh/post/2022/figures/2c62c125feb04ff89234abf76991601e.png b/content/zh/post/2022/figures/2c62c125feb04ff89234abf76991601e.png new file mode 100644 index 0000000000000000000000000000000000000000..301c2097e14c6982c8493543632391e08ecd8f39 Binary files /dev/null and b/content/zh/post/2022/figures/2c62c125feb04ff89234abf76991601e.png differ diff --git "a/content/zh/post/2022/figures/2\346\265\213\350\257\225\351\252\214\350\257\201.png" "b/content/zh/post/2022/figures/2\346\265\213\350\257\225\351\252\214\350\257\201.png" new file mode 100644 index 0000000000000000000000000000000000000000..77ad0591e79c4fd3a0863d0fd143abe569ed7873 Binary files /dev/null and "b/content/zh/post/2022/figures/2\346\265\213\350\257\225\351\252\214\350\257\201.png" differ diff --git a/content/zh/post/2022/figures/3507d173b3e24d9f94dd543947ae33ef.png b/content/zh/post/2022/figures/3507d173b3e24d9f94dd543947ae33ef.png new file mode 100644 index 0000000000000000000000000000000000000000..9629817a5cfe4f13b6f138cc4ecfecf513acb450 Binary files /dev/null and b/content/zh/post/2022/figures/3507d173b3e24d9f94dd543947ae33ef.png differ diff --git a/content/zh/post/2022/figures/356c385d615b442e951be7d27f00702e.png b/content/zh/post/2022/figures/356c385d615b442e951be7d27f00702e.png new file mode 100644 index 0000000000000000000000000000000000000000..bfcf116a8b809e831ac311125c29b4f97e4ae7c8 Binary files /dev/null and b/content/zh/post/2022/figures/356c385d615b442e951be7d27f00702e.png differ diff --git a/content/zh/post/2022/figures/387c8fc827e34000936c977270c10f22.png b/content/zh/post/2022/figures/387c8fc827e34000936c977270c10f22.png new file mode 100644 index 0000000000000000000000000000000000000000..2a91b909d5369a398eabeb078d61bf6304bdbcc2 Binary files /dev/null and b/content/zh/post/2022/figures/387c8fc827e34000936c977270c10f22.png differ diff --git a/content/zh/post/2022/figures/480ae4bbdd664652af43663f061aae84.png b/content/zh/post/2022/figures/480ae4bbdd664652af43663f061aae84.png new file mode 100644 index 0000000000000000000000000000000000000000..dd7de840d5a3ad685b488d21d0e4d64ad85f87ee Binary files /dev/null and b/content/zh/post/2022/figures/480ae4bbdd664652af43663f061aae84.png differ diff --git a/content/zh/post/2022/figures/591c2725601c492cbccf312e9b2a7a11.png b/content/zh/post/2022/figures/591c2725601c492cbccf312e9b2a7a11.png new file mode 100644 index 0000000000000000000000000000000000000000..535a1dc77b07db4595203b1d22b35f7962279226 Binary files /dev/null and b/content/zh/post/2022/figures/591c2725601c492cbccf312e9b2a7a11.png differ diff --git a/content/zh/post/2022/figures/5d3d9f82ce164b08a6866a606fd7e03d.png b/content/zh/post/2022/figures/5d3d9f82ce164b08a6866a606fd7e03d.png new file mode 100644 index 0000000000000000000000000000000000000000..ea9ef563d157560da1046da4b0813307c7e5d949 Binary files /dev/null and b/content/zh/post/2022/figures/5d3d9f82ce164b08a6866a606fd7e03d.png differ diff --git a/content/zh/post/2022/figures/5e12f329abe74ed38ae99d8828adaa5d.png b/content/zh/post/2022/figures/5e12f329abe74ed38ae99d8828adaa5d.png new file mode 100644 index 0000000000000000000000000000000000000000..eb0f7f0ac649f2604727a2e1fa3ade2dadc775f6 Binary files /dev/null and b/content/zh/post/2022/figures/5e12f329abe74ed38ae99d8828adaa5d.png differ diff --git a/content/zh/post/2022/figures/61364d2741cc46f7802cb48cc75571fe.png b/content/zh/post/2022/figures/61364d2741cc46f7802cb48cc75571fe.png new file mode 100644 index 0000000000000000000000000000000000000000..02ad3da2b101e84845e7bbe583e7c2c1b94ce07d Binary files /dev/null and b/content/zh/post/2022/figures/61364d2741cc46f7802cb48cc75571fe.png differ diff --git a/content/zh/post/2022/figures/614036c6b5d84a0c86de61b3cbf88b78.png b/content/zh/post/2022/figures/614036c6b5d84a0c86de61b3cbf88b78.png new file mode 100644 index 0000000000000000000000000000000000000000..3b9462f3b7abe01bf2509cadc04c3b954c20e09f Binary files /dev/null and b/content/zh/post/2022/figures/614036c6b5d84a0c86de61b3cbf88b78.png differ diff --git a/content/zh/post/2022/figures/615c11832ab3f51d914222dd.png b/content/zh/post/2022/figures/615c11832ab3f51d914222dd.png new file mode 100644 index 0000000000000000000000000000000000000000..15653719bf489da0437c7da5b7e89d6f0cc8f7ce Binary files /dev/null and b/content/zh/post/2022/figures/615c11832ab3f51d914222dd.png differ diff --git a/content/zh/post/2022/figures/615c11832ab3f51d914222e9.png b/content/zh/post/2022/figures/615c11832ab3f51d914222e9.png new file mode 100644 index 0000000000000000000000000000000000000000..b54e5ca6e7aced964332ccffebd61fd47b631009 Binary files /dev/null and b/content/zh/post/2022/figures/615c11832ab3f51d914222e9.png differ diff --git a/content/zh/post/2022/figures/615c11832ab3f51d914222f4.png b/content/zh/post/2022/figures/615c11832ab3f51d914222f4.png new file mode 100644 index 0000000000000000000000000000000000000000..c03cc7e87395a6da0108687d529dc20849c08ac6 Binary files /dev/null and b/content/zh/post/2022/figures/615c11832ab3f51d914222f4.png differ diff --git a/content/zh/post/2022/figures/615c11832ab3f51d91422301.png b/content/zh/post/2022/figures/615c11832ab3f51d91422301.png new file mode 100644 index 0000000000000000000000000000000000000000..30e8fdff5d050a36ced441b4ef515cb00df61e99 Binary files /dev/null and b/content/zh/post/2022/figures/615c11832ab3f51d91422301.png differ diff --git a/content/zh/post/2022/figures/615c13152ab3f51d91446977.png b/content/zh/post/2022/figures/615c13152ab3f51d91446977.png new file mode 100644 index 0000000000000000000000000000000000000000..d9272f7d33de4b4c7e5b8542808e63adb7278fc5 Binary files /dev/null and b/content/zh/post/2022/figures/615c13152ab3f51d91446977.png differ diff --git a/content/zh/post/2022/figures/615c14052ab3f51d9145c371.png b/content/zh/post/2022/figures/615c14052ab3f51d9145c371.png new file mode 100644 index 0000000000000000000000000000000000000000..c437be002001bc930fd48e01e2d178fbf51f9fbe Binary files /dev/null and b/content/zh/post/2022/figures/615c14052ab3f51d9145c371.png differ diff --git a/content/zh/post/2022/figures/615c14052ab3f51d9145c37e.png b/content/zh/post/2022/figures/615c14052ab3f51d9145c37e.png new file mode 100644 index 0000000000000000000000000000000000000000..d6803af00b4b54c2b5a13a403922aaa7e2ddcf83 Binary files /dev/null and b/content/zh/post/2022/figures/615c14052ab3f51d9145c37e.png differ diff --git a/content/zh/post/2022/figures/615c14052ab3f51d9145c394.png b/content/zh/post/2022/figures/615c14052ab3f51d9145c394.png new file mode 100644 index 0000000000000000000000000000000000000000..49ac43ab32753958e9ada19b8820b9154100ac1f Binary files /dev/null and b/content/zh/post/2022/figures/615c14052ab3f51d9145c394.png differ diff --git a/content/zh/post/2022/figures/615c14052ab3f51d9145c3a9.png b/content/zh/post/2022/figures/615c14052ab3f51d9145c3a9.png new file mode 100644 index 0000000000000000000000000000000000000000..c9223fbf01f4f981267768f0a46fb63fde0d4ff2 Binary files /dev/null and b/content/zh/post/2022/figures/615c14052ab3f51d9145c3a9.png differ diff --git a/content/zh/post/2022/figures/615c14a32ab3f51d9146b955.png b/content/zh/post/2022/figures/615c14a32ab3f51d9146b955.png new file mode 100644 index 0000000000000000000000000000000000000000..d71abeee3556049623218a9414e2c7a530abfe45 Binary files /dev/null and b/content/zh/post/2022/figures/615c14a32ab3f51d9146b955.png differ diff --git a/content/zh/post/2022/figures/615c14a32ab3f51d9146b960.png b/content/zh/post/2022/figures/615c14a32ab3f51d9146b960.png new file mode 100644 index 0000000000000000000000000000000000000000..f1f00ff4e7ed548b43e101b660e87d4624899762 Binary files /dev/null and b/content/zh/post/2022/figures/615c14a32ab3f51d9146b960.png differ diff --git a/content/zh/post/2022/figures/615c14a32ab3f51d9146b96f.png b/content/zh/post/2022/figures/615c14a32ab3f51d9146b96f.png new file mode 100644 index 0000000000000000000000000000000000000000..b2498e1aedf27793ae36e4758d8dcdde32e3779d Binary files /dev/null and b/content/zh/post/2022/figures/615c14a32ab3f51d9146b96f.png differ diff --git a/content/zh/post/2022/figures/615c14a32ab3f51d9146b994.png b/content/zh/post/2022/figures/615c14a32ab3f51d9146b994.png new file mode 100644 index 0000000000000000000000000000000000000000..03ccb8b753dcf761fa600ac6e1fc500ca240709c Binary files /dev/null and b/content/zh/post/2022/figures/615c14a32ab3f51d9146b994.png differ diff --git a/content/zh/post/2022/figures/615c15482ab3f51d9147a2aa.png b/content/zh/post/2022/figures/615c15482ab3f51d9147a2aa.png new file mode 100644 index 0000000000000000000000000000000000000000..f47be217bfa8fa59421baa8aa4435159595d0993 Binary files /dev/null and b/content/zh/post/2022/figures/615c15482ab3f51d9147a2aa.png differ diff --git a/content/zh/post/2022/figures/615c15482ab3f51d9147a2b3.png b/content/zh/post/2022/figures/615c15482ab3f51d9147a2b3.png new file mode 100644 index 0000000000000000000000000000000000000000..1a4b4ea30a0cf948ac6d7321af263b8d3e0eb015 Binary files /dev/null and b/content/zh/post/2022/figures/615c15482ab3f51d9147a2b3.png differ diff --git a/content/zh/post/2022/figures/615c15482ab3f51d9147a2ba.png b/content/zh/post/2022/figures/615c15482ab3f51d9147a2ba.png new file mode 100644 index 0000000000000000000000000000000000000000..33f0ad4668b3817e3147804996eca7fc2fbc7bab Binary files /dev/null and b/content/zh/post/2022/figures/615c15482ab3f51d9147a2ba.png differ diff --git a/content/zh/post/2022/figures/615c15c42ab3f51d91484e93.png b/content/zh/post/2022/figures/615c15c42ab3f51d91484e93.png new file mode 100644 index 0000000000000000000000000000000000000000..343eda20df9a14b2dc79c66e833ae3af2cb0f531 Binary files /dev/null and b/content/zh/post/2022/figures/615c15c42ab3f51d91484e93.png differ diff --git a/content/zh/post/2022/figures/615c15c42ab3f51d91484e9e.png b/content/zh/post/2022/figures/615c15c42ab3f51d91484e9e.png new file mode 100644 index 0000000000000000000000000000000000000000..f0ecb1dc524fe925c879ed344ce7f8250ae7f70f Binary files /dev/null and b/content/zh/post/2022/figures/615c15c42ab3f51d91484e9e.png differ diff --git a/content/zh/post/2022/figures/615c15c42ab3f51d91484ead.png b/content/zh/post/2022/figures/615c15c42ab3f51d91484ead.png new file mode 100644 index 0000000000000000000000000000000000000000..619671b7c931cc39da11eee4b5d9d3aedded25a9 Binary files /dev/null and b/content/zh/post/2022/figures/615c15c42ab3f51d91484ead.png differ diff --git a/content/zh/post/2022/figures/615c15c42ab3f51d91484ec6.png b/content/zh/post/2022/figures/615c15c42ab3f51d91484ec6.png new file mode 100644 index 0000000000000000000000000000000000000000..162882467f532bb80b153bd6c206f919c717437c Binary files /dev/null and b/content/zh/post/2022/figures/615c15c42ab3f51d91484ec6.png differ diff --git a/content/zh/post/2022/figures/615c15c42ab3f51d91484ed6.png b/content/zh/post/2022/figures/615c15c42ab3f51d91484ed6.png new file mode 100644 index 0000000000000000000000000000000000000000..bf4deb15222f2ef72f6393c3fe76bd9e72ca7686 Binary files /dev/null and b/content/zh/post/2022/figures/615c15c42ab3f51d91484ed6.png differ diff --git a/content/zh/post/2022/figures/615c16922ab3f51d914979b2.png b/content/zh/post/2022/figures/615c16922ab3f51d914979b2.png new file mode 100644 index 0000000000000000000000000000000000000000..d6ddb96060ab61f3361f332cc7216576f5911c42 Binary files /dev/null and b/content/zh/post/2022/figures/615c16922ab3f51d914979b2.png differ diff --git a/content/zh/post/2022/figures/615c16922ab3f51d914979bf.png b/content/zh/post/2022/figures/615c16922ab3f51d914979bf.png new file mode 100644 index 0000000000000000000000000000000000000000..48a7a21cad2c81ddc9379db0f3c8d92789da0b81 Binary files /dev/null and b/content/zh/post/2022/figures/615c16922ab3f51d914979bf.png differ diff --git a/content/zh/post/2022/figures/615c16922ab3f51d914979c5.png b/content/zh/post/2022/figures/615c16922ab3f51d914979c5.png new file mode 100644 index 0000000000000000000000000000000000000000..7687af7b39dccb9eece9763f782fefcc73faf810 Binary files /dev/null and b/content/zh/post/2022/figures/615c16922ab3f51d914979c5.png differ diff --git a/content/zh/post/2022/figures/615c16932ab3f51d914979dd.png b/content/zh/post/2022/figures/615c16932ab3f51d914979dd.png new file mode 100644 index 0000000000000000000000000000000000000000..100c3f1d34c739439d7c458d30732852af1df008 Binary files /dev/null and b/content/zh/post/2022/figures/615c16932ab3f51d914979dd.png differ diff --git a/content/zh/post/2022/figures/615c16932ab3f51d914979e7.png b/content/zh/post/2022/figures/615c16932ab3f51d914979e7.png new file mode 100644 index 0000000000000000000000000000000000000000..b42168b735293fb2f4251ee28d98616cfd099e76 Binary files /dev/null and b/content/zh/post/2022/figures/615c16932ab3f51d914979e7.png differ diff --git a/content/zh/post/2022/figures/615c16f62ab3f51d914a1b6d.png b/content/zh/post/2022/figures/615c16f62ab3f51d914a1b6d.png new file mode 100644 index 0000000000000000000000000000000000000000..5bbcf3bb2d2866c28bad8c11f180706be246a311 Binary files /dev/null and b/content/zh/post/2022/figures/615c16f62ab3f51d914a1b6d.png differ diff --git a/content/zh/post/2022/figures/615c16f62ab3f51d914a1b7d.png b/content/zh/post/2022/figures/615c16f62ab3f51d914a1b7d.png new file mode 100644 index 0000000000000000000000000000000000000000..1c48312f6a679222b78da0b7bb23b14b3864ac07 Binary files /dev/null and b/content/zh/post/2022/figures/615c16f62ab3f51d914a1b7d.png differ diff --git a/content/zh/post/2022/figures/615c16f62ab3f51d914a1b92.png b/content/zh/post/2022/figures/615c16f62ab3f51d914a1b92.png new file mode 100644 index 0000000000000000000000000000000000000000..2df830b26a6325c7ac7935b38b49466e60326472 Binary files /dev/null and b/content/zh/post/2022/figures/615c16f62ab3f51d914a1b92.png differ diff --git a/content/zh/post/2022/figures/615c16f62ab3f51d914a1ba8.png b/content/zh/post/2022/figures/615c16f62ab3f51d914a1ba8.png new file mode 100644 index 0000000000000000000000000000000000000000..80c76ef7af4c163b07e637efbf03ce131a3c015a Binary files /dev/null and b/content/zh/post/2022/figures/615c16f62ab3f51d914a1ba8.png differ diff --git a/content/zh/post/2022/figures/615c183c2ab3f51d914bfbaf.png b/content/zh/post/2022/figures/615c183c2ab3f51d914bfbaf.png new file mode 100644 index 0000000000000000000000000000000000000000..209022bb80a6179af2b98a09b5ef0234f18b3575 Binary files /dev/null and b/content/zh/post/2022/figures/615c183c2ab3f51d914bfbaf.png differ diff --git a/content/zh/post/2022/figures/615c183c2ab3f51d914bfbb6.png b/content/zh/post/2022/figures/615c183c2ab3f51d914bfbb6.png new file mode 100644 index 0000000000000000000000000000000000000000..352339817226447df02352a3374a865a94d730af Binary files /dev/null and b/content/zh/post/2022/figures/615c183c2ab3f51d914bfbb6.png differ diff --git a/content/zh/post/2022/figures/615c191d2ab3f51d914d3f1b.png b/content/zh/post/2022/figures/615c191d2ab3f51d914d3f1b.png new file mode 100644 index 0000000000000000000000000000000000000000..cd87ece05a26e3786c758cd784a346698cd32eea Binary files /dev/null and b/content/zh/post/2022/figures/615c191d2ab3f51d914d3f1b.png differ diff --git a/content/zh/post/2022/figures/615c191d2ab3f51d914d3f25.png b/content/zh/post/2022/figures/615c191d2ab3f51d914d3f25.png new file mode 100644 index 0000000000000000000000000000000000000000..6b2d1cc7fdd5eaeee8bf36bbdf362dfcc0f477f1 Binary files /dev/null and b/content/zh/post/2022/figures/615c191d2ab3f51d914d3f25.png differ diff --git a/content/zh/post/2022/figures/615c191d2ab3f51d914d3f32.png b/content/zh/post/2022/figures/615c191d2ab3f51d914d3f32.png new file mode 100644 index 0000000000000000000000000000000000000000..ae45a4c5b3ea2e928e25f9ee41ccaf2ea752e430 Binary files /dev/null and b/content/zh/post/2022/figures/615c191d2ab3f51d914d3f32.png differ diff --git a/content/zh/post/2022/figures/615c191d2ab3f51d914d3f43.png b/content/zh/post/2022/figures/615c191d2ab3f51d914d3f43.png new file mode 100644 index 0000000000000000000000000000000000000000..5f9989edc0a4b61b0df65c28672004a27f656429 Binary files /dev/null and b/content/zh/post/2022/figures/615c191d2ab3f51d914d3f43.png differ diff --git a/content/zh/post/2022/figures/615c19272ab3f51d914d4e90.png b/content/zh/post/2022/figures/615c19272ab3f51d914d4e90.png new file mode 100644 index 0000000000000000000000000000000000000000..491c3c2dba52acdf764e71524307235728ac1563 Binary files /dev/null and b/content/zh/post/2022/figures/615c19272ab3f51d914d4e90.png differ diff --git a/content/zh/post/2022/figures/615c19272ab3f51d914d4e97.png b/content/zh/post/2022/figures/615c19272ab3f51d914d4e97.png new file mode 100644 index 0000000000000000000000000000000000000000..7b38491cafc1d387f2c40b2520c06eddaee04654 Binary files /dev/null and b/content/zh/post/2022/figures/615c19272ab3f51d914d4e97.png differ diff --git a/content/zh/post/2022/figures/615c19272ab3f51d914d4e9d.png b/content/zh/post/2022/figures/615c19272ab3f51d914d4e9d.png new file mode 100644 index 0000000000000000000000000000000000000000..004c5dcdcf6eac988077eb915a2405591374fc24 Binary files /dev/null and b/content/zh/post/2022/figures/615c19272ab3f51d914d4e9d.png differ diff --git a/content/zh/post/2022/figures/615c19272ab3f51d914d4ea8.png b/content/zh/post/2022/figures/615c19272ab3f51d914d4ea8.png new file mode 100644 index 0000000000000000000000000000000000000000..29987cbe2b7cf0d5878f481349870afe8b16ec93 Binary files /dev/null and b/content/zh/post/2022/figures/615c19272ab3f51d914d4ea8.png differ diff --git a/content/zh/post/2022/figures/615c19272ab3f51d914d4eaf.png b/content/zh/post/2022/figures/615c19272ab3f51d914d4eaf.png new file mode 100644 index 0000000000000000000000000000000000000000..051ecde9986a3bba65c06144f9d7690fc993ea5c Binary files /dev/null and b/content/zh/post/2022/figures/615c19272ab3f51d914d4eaf.png differ diff --git a/content/zh/post/2022/figures/615c19302ab3f51d914d5dd8.png b/content/zh/post/2022/figures/615c19302ab3f51d914d5dd8.png new file mode 100644 index 0000000000000000000000000000000000000000..08238474333c93fc84cc3b258621d0fee17d2c6d Binary files /dev/null and b/content/zh/post/2022/figures/615c19302ab3f51d914d5dd8.png differ diff --git a/content/zh/post/2022/figures/615c19302ab3f51d914d5de4.png b/content/zh/post/2022/figures/615c19302ab3f51d914d5de4.png new file mode 100644 index 0000000000000000000000000000000000000000..2aa479db903a2395fc8e3dc6fb78975125c27581 Binary files /dev/null and b/content/zh/post/2022/figures/615c19302ab3f51d914d5de4.png differ diff --git a/content/zh/post/2022/figures/615c19302ab3f51d914d5df7.png b/content/zh/post/2022/figures/615c19302ab3f51d914d5df7.png new file mode 100644 index 0000000000000000000000000000000000000000..a3a949ef10f22ffd6102470de7605a03cee48b02 Binary files /dev/null and b/content/zh/post/2022/figures/615c19302ab3f51d914d5df7.png differ diff --git a/content/zh/post/2022/figures/615c19302ab3f51d914d5e02.png b/content/zh/post/2022/figures/615c19302ab3f51d914d5e02.png new file mode 100644 index 0000000000000000000000000000000000000000..8f88b2f335cd2f62b906e348081712c565b3690d Binary files /dev/null and b/content/zh/post/2022/figures/615c19302ab3f51d914d5e02.png differ diff --git a/content/zh/post/2022/figures/615c19302ab3f51d914d5e11.png b/content/zh/post/2022/figures/615c19302ab3f51d914d5e11.png new file mode 100644 index 0000000000000000000000000000000000000000..0f2e8cc62c89db71a928ba79e954630f8392860d Binary files /dev/null and b/content/zh/post/2022/figures/615c19302ab3f51d914d5e11.png differ diff --git a/content/zh/post/2022/figures/615c193f2ab3f51d914d72ba.png b/content/zh/post/2022/figures/615c193f2ab3f51d914d72ba.png new file mode 100644 index 0000000000000000000000000000000000000000..bc90ddd5235827dfa7250548ec42a6f34ee5c7f6 Binary files /dev/null and b/content/zh/post/2022/figures/615c193f2ab3f51d914d72ba.png differ diff --git a/content/zh/post/2022/figures/615c193f2ab3f51d914d72c2.png b/content/zh/post/2022/figures/615c193f2ab3f51d914d72c2.png new file mode 100644 index 0000000000000000000000000000000000000000..1994b49f03cdf89bfa7b0cc13ae12bb8309c56a9 Binary files /dev/null and b/content/zh/post/2022/figures/615c193f2ab3f51d914d72c2.png differ diff --git a/content/zh/post/2022/figures/615c193f2ab3f51d914d72e9.png b/content/zh/post/2022/figures/615c193f2ab3f51d914d72e9.png new file mode 100644 index 0000000000000000000000000000000000000000..349f61aced9156de9023b82fccce89edb6b7c2a5 Binary files /dev/null and b/content/zh/post/2022/figures/615c193f2ab3f51d914d72e9.png differ diff --git a/content/zh/post/2022/figures/615c193f2ab3f51d914d72fc.png b/content/zh/post/2022/figures/615c193f2ab3f51d914d72fc.png new file mode 100644 index 0000000000000000000000000000000000000000..f2fedb67cb1d2ceeb5105a36fd7c06e8d515f1fe Binary files /dev/null and b/content/zh/post/2022/figures/615c193f2ab3f51d914d72fc.png differ diff --git a/content/zh/post/2022/figures/615c19492ab3f51d914d811b.png b/content/zh/post/2022/figures/615c19492ab3f51d914d811b.png new file mode 100644 index 0000000000000000000000000000000000000000..b09136e5c909402c8e04fddecb64331b4ec8b493 Binary files /dev/null and b/content/zh/post/2022/figures/615c19492ab3f51d914d811b.png differ diff --git a/content/zh/post/2022/figures/615c19492ab3f51d914d8137.png b/content/zh/post/2022/figures/615c19492ab3f51d914d8137.png new file mode 100644 index 0000000000000000000000000000000000000000..8f896b99705a54eb554acc6568bef41c9ce08d61 Binary files /dev/null and b/content/zh/post/2022/figures/615c19492ab3f51d914d8137.png differ diff --git a/content/zh/post/2022/figures/615c19492ab3f51d914d8153.png b/content/zh/post/2022/figures/615c19492ab3f51d914d8153.png new file mode 100644 index 0000000000000000000000000000000000000000..d2c1072e2248dcd89006effe6b88d2b834d3f8bd Binary files /dev/null and b/content/zh/post/2022/figures/615c19492ab3f51d914d8153.png differ diff --git a/content/zh/post/2022/figures/615c19492ab3f51d914d8161.png b/content/zh/post/2022/figures/615c19492ab3f51d914d8161.png new file mode 100644 index 0000000000000000000000000000000000000000..dee08fb5bc1df54d112060aeec8374a9e1e050a2 Binary files /dev/null and b/content/zh/post/2022/figures/615c19492ab3f51d914d8161.png differ diff --git a/content/zh/post/2022/figures/615c533b2ab3f51d91a72523.jpg b/content/zh/post/2022/figures/615c533b2ab3f51d91a72523.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b54b725d38b5c112212ffd6e022548a88134a7df Binary files /dev/null and b/content/zh/post/2022/figures/615c533b2ab3f51d91a72523.jpg differ diff --git a/content/zh/post/2022/figures/615c53892ab3f51d91a7b1e6.png b/content/zh/post/2022/figures/615c53892ab3f51d91a7b1e6.png new file mode 100644 index 0000000000000000000000000000000000000000..bf5923c491b733359aef9a1806234efeb41f6a13 Binary files /dev/null and b/content/zh/post/2022/figures/615c53892ab3f51d91a7b1e6.png differ diff --git a/content/zh/post/2022/figures/615ffa572ab3f51d91af9b67.jpg b/content/zh/post/2022/figures/615ffa572ab3f51d91af9b67.jpg new file mode 100644 index 0000000000000000000000000000000000000000..20754d3e984a08d9c0dde9614e333fe60466b4e7 Binary files /dev/null and b/content/zh/post/2022/figures/615ffa572ab3f51d91af9b67.jpg differ diff --git a/content/zh/post/2022/figures/615ffb2b2ab3f51d91b0c00c.jpg b/content/zh/post/2022/figures/615ffb2b2ab3f51d91b0c00c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f412a1cd789f19faf6383481b5715051a330a4a8 Binary files /dev/null and b/content/zh/post/2022/figures/615ffb2b2ab3f51d91b0c00c.jpg differ diff --git a/content/zh/post/2022/figures/615ffbbb2ab3f51d91b187c6.jpg b/content/zh/post/2022/figures/615ffbbb2ab3f51d91b187c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cd29bf8b4e78704a87bb6b6a97f0fcbaf738d6ea Binary files /dev/null and b/content/zh/post/2022/figures/615ffbbb2ab3f51d91b187c6.jpg differ diff --git a/content/zh/post/2022/figures/615ffdad2ab3f51d91b42898.jpg b/content/zh/post/2022/figures/615ffdad2ab3f51d91b42898.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5447098b2acbfb8066511f2ae695178ba07140dc Binary files /dev/null and b/content/zh/post/2022/figures/615ffdad2ab3f51d91b42898.jpg differ diff --git a/content/zh/post/2022/figures/615ffeef2ab3f51d91b5bb72.jpg b/content/zh/post/2022/figures/615ffeef2ab3f51d91b5bb72.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46b53c33151eafdd08e8ce06c460bdb258a84c3d Binary files /dev/null and b/content/zh/post/2022/figures/615ffeef2ab3f51d91b5bb72.jpg differ diff --git a/content/zh/post/2022/figures/615fff622ab3f51d91b644eb.jpg b/content/zh/post/2022/figures/615fff622ab3f51d91b644eb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9cf62be780a130c39f37b8b0002200e1f542a22f Binary files /dev/null and b/content/zh/post/2022/figures/615fff622ab3f51d91b644eb.jpg differ diff --git a/content/zh/post/2022/figures/641abf7f6c9642b188ade66b1c8d25ee.png b/content/zh/post/2022/figures/641abf7f6c9642b188ade66b1c8d25ee.png new file mode 100644 index 0000000000000000000000000000000000000000..c3b32759df73c016ee54b2da091d97db20b1615a Binary files /dev/null and b/content/zh/post/2022/figures/641abf7f6c9642b188ade66b1c8d25ee.png differ diff --git a/content/zh/post/2022/figures/721e491c70e948abadf18b2eda7ce76f.png b/content/zh/post/2022/figures/721e491c70e948abadf18b2eda7ce76f.png new file mode 100644 index 0000000000000000000000000000000000000000..22ff947110d2b5333cdb107521037d1b537f57f4 Binary files /dev/null and b/content/zh/post/2022/figures/721e491c70e948abadf18b2eda7ce76f.png differ diff --git a/content/zh/post/2022/figures/7294465883ce45ac80a371f63dfe9659.png b/content/zh/post/2022/figures/7294465883ce45ac80a371f63dfe9659.png new file mode 100644 index 0000000000000000000000000000000000000000..25091e59d461f1ee0983c6723b4d45c6c52140b2 Binary files /dev/null and b/content/zh/post/2022/figures/7294465883ce45ac80a371f63dfe9659.png differ diff --git a/content/zh/post/2022/figures/7a7b1fc98317411a9a18982e944ba5c2.png b/content/zh/post/2022/figures/7a7b1fc98317411a9a18982e944ba5c2.png new file mode 100644 index 0000000000000000000000000000000000000000..54a1d1951071d9b4ac9b1b8341dbba395bd85367 Binary files /dev/null and b/content/zh/post/2022/figures/7a7b1fc98317411a9a18982e944ba5c2.png differ diff --git a/content/zh/post/2022/figures/816de1e0a8c04796a4f3478eff37baed.png b/content/zh/post/2022/figures/816de1e0a8c04796a4f3478eff37baed.png new file mode 100644 index 0000000000000000000000000000000000000000..4448f7deec2eb697af199860dcdb83f25c70bc29 Binary files /dev/null and b/content/zh/post/2022/figures/816de1e0a8c04796a4f3478eff37baed.png differ diff --git a/content/zh/post/2022/figures/a662d9a9a96b40d089a6d9c68788bf3d.png b/content/zh/post/2022/figures/a662d9a9a96b40d089a6d9c68788bf3d.png new file mode 100644 index 0000000000000000000000000000000000000000..2558cc4508684ab3bd7c593cf1a30bbf68750bc2 Binary files /dev/null and b/content/zh/post/2022/figures/a662d9a9a96b40d089a6d9c68788bf3d.png differ diff --git a/content/zh/post/2022/figures/a6d0fc02a8c948f2b43e4ef47cecd731.png b/content/zh/post/2022/figures/a6d0fc02a8c948f2b43e4ef47cecd731.png new file mode 100644 index 0000000000000000000000000000000000000000..2ced577820ebc2b01a6c4e1e28cbfc5fe7ff2ab2 Binary files /dev/null and b/content/zh/post/2022/figures/a6d0fc02a8c948f2b43e4ef47cecd731.png differ diff --git a/content/zh/post/2022/figures/ba1ea7c4485b4830b21538d56ecac309.png b/content/zh/post/2022/figures/ba1ea7c4485b4830b21538d56ecac309.png new file mode 100644 index 0000000000000000000000000000000000000000..549ec34af5c51d2af09e1fcf61fe56085763ec23 Binary files /dev/null and b/content/zh/post/2022/figures/ba1ea7c4485b4830b21538d56ecac309.png differ diff --git a/content/zh/post/2022/figures/c726f71fc88c4015b1d89f4586dfe290.png b/content/zh/post/2022/figures/c726f71fc88c4015b1d89f4586dfe290.png new file mode 100644 index 0000000000000000000000000000000000000000..9543723be927f77404ea82abeb0aee472e56e1d2 Binary files /dev/null and b/content/zh/post/2022/figures/c726f71fc88c4015b1d89f4586dfe290.png differ diff --git a/content/zh/post/2022/figures/cb8039252a6b45e99d8ff682fb9df992.png b/content/zh/post/2022/figures/cb8039252a6b45e99d8ff682fb9df992.png new file mode 100644 index 0000000000000000000000000000000000000000..f03b1407774c9f30845331fb12310da9936b1fbe Binary files /dev/null and b/content/zh/post/2022/figures/cb8039252a6b45e99d8ff682fb9df992.png differ diff --git a/content/zh/post/2022/figures/cd094375c2b44a8383694267e492fc63.png b/content/zh/post/2022/figures/cd094375c2b44a8383694267e492fc63.png new file mode 100644 index 0000000000000000000000000000000000000000..bc5bd1795fb97edbe7a23c5371f388fea99cfb7f Binary files /dev/null and b/content/zh/post/2022/figures/cd094375c2b44a8383694267e492fc63.png differ diff --git a/content/zh/post/2022/figures/d21813079e7b40a1b9edde6b9298d2f3.png b/content/zh/post/2022/figures/d21813079e7b40a1b9edde6b9298d2f3.png new file mode 100644 index 0000000000000000000000000000000000000000..b17148ae18deac8611e111ff18c3c27e16f51103 Binary files /dev/null and b/content/zh/post/2022/figures/d21813079e7b40a1b9edde6b9298d2f3.png differ diff --git a/content/zh/post/2022/figures/dbc89373c5734638a51add74523f640c.png b/content/zh/post/2022/figures/dbc89373c5734638a51add74523f640c.png new file mode 100644 index 0000000000000000000000000000000000000000..7d1f151cef0851276b848059877c33552f628fe1 Binary files /dev/null and b/content/zh/post/2022/figures/dbc89373c5734638a51add74523f640c.png differ diff --git a/content/zh/post/2022/figures/dc1c632c7c0f49f2ab7ebd57f78915d6.png b/content/zh/post/2022/figures/dc1c632c7c0f49f2ab7ebd57f78915d6.png new file mode 100644 index 0000000000000000000000000000000000000000..5900e1e846cd8422fbe466206be036b297c82ef5 Binary files /dev/null and b/content/zh/post/2022/figures/dc1c632c7c0f49f2ab7ebd57f78915d6.png differ diff --git a/content/zh/post/2022/figures/ee22045a1dca446b925881137106db5c.png b/content/zh/post/2022/figures/ee22045a1dca446b925881137106db5c.png new file mode 100644 index 0000000000000000000000000000000000000000..b5cd86b6651ab9d74de07e63db4e000e2f57cdfa Binary files /dev/null and b/content/zh/post/2022/figures/ee22045a1dca446b925881137106db5c.png differ diff --git a/content/zh/post/2022/figures/f569229a746940cba90ed0cda6fd1d2f.png b/content/zh/post/2022/figures/f569229a746940cba90ed0cda6fd1d2f.png new file mode 100644 index 0000000000000000000000000000000000000000..2b8f2a8381c9854c7ae1d8d60735b8c3c065b7cb Binary files /dev/null and b/content/zh/post/2022/figures/f569229a746940cba90ed0cda6fd1d2f.png differ diff --git a/content/zh/post/2022/figures/faa8002b28d94f5b9408f0e251daebc7.png b/content/zh/post/2022/figures/faa8002b28d94f5b9408f0e251daebc7.png new file mode 100644 index 0000000000000000000000000000000000000000..8c2cd7d12d8921b2212de6e2e8dc6d53dca4c916 Binary files /dev/null and b/content/zh/post/2022/figures/faa8002b28d94f5b9408f0e251daebc7.png differ diff --git "a/content/zh/post/2022/figures/openGauss\345\206\205\345\255\230\344\274\230\345\214\226\345\255\230\345\202\250\345\274\225\346\223\216\347\273\223\346\236\204\345\233\276.png" "b/content/zh/post/2022/figures/openGauss\345\206\205\345\255\230\344\274\230\345\214\226\345\255\230\345\202\250\345\274\225\346\223\216\347\273\223\346\236\204\345\233\276.png" new file mode 100644 index 0000000000000000000000000000000000000000..8d353fb3803fad934e57f5a228d3657235c5a608 Binary files /dev/null and "b/content/zh/post/2022/figures/openGauss\345\206\205\345\255\230\344\274\230\345\214\226\345\255\230\345\202\250\345\274\225\346\223\216\347\273\223\346\236\204\345\233\276.png" differ diff --git a/content/zh/post/2022/figures/outerBatch1.png b/content/zh/post/2022/figures/outerBatch1.png new file mode 100644 index 0000000000000000000000000000000000000000..2800c3a5a21a2181fe346b9c47430777a7c5f51a Binary files /dev/null and b/content/zh/post/2022/figures/outerBatch1.png differ diff --git a/content/zh/post/2022/figures/vi.png b/content/zh/post/2022/figures/vi.png new file mode 100644 index 0000000000000000000000000000000000000000..ca214d6c2b02532a6a1854465b32ac71a4099eff Binary files /dev/null and b/content/zh/post/2022/figures/vi.png differ diff --git a/content/zh/post/2022/figures/zh-cn_image_0000001197508006.png b/content/zh/post/2022/figures/zh-cn_image_0000001197508006.png new file mode 100644 index 0000000000000000000000000000000000000000..485878c43220a940a965b797966a4ebcacf81ccf Binary files /dev/null and b/content/zh/post/2022/figures/zh-cn_image_0000001197508006.png differ diff --git a/content/zh/post/2022/figures/zh-cn_image_0000001197720014.png b/content/zh/post/2022/figures/zh-cn_image_0000001197720014.png new file mode 100644 index 0000000000000000000000000000000000000000..b19a89f588b61a3f9a08e5ce016d9f3e7b821fdc Binary files /dev/null and b/content/zh/post/2022/figures/zh-cn_image_0000001197720014.png differ diff --git "a/content/zh/post/2022/figures/\344\270\213\347\274\226\350\276\221pg_hba-conf\346\226\207\344\273\266.png" "b/content/zh/post/2022/figures/\344\270\213\347\274\226\350\276\221pg_hba-conf\346\226\207\344\273\266.png" new file mode 100644 index 0000000000000000000000000000000000000000..77b7fb411e79de94a1a219622108ce7fca8b5840 Binary files /dev/null and "b/content/zh/post/2022/figures/\344\270\213\347\274\226\350\276\221pg_hba-conf\346\226\207\344\273\266.png" differ diff --git "a/content/zh/post/2022/figures/\344\270\273\350\246\201\347\224\2613\344\270\252\346\225\260\347\273\204\345\234\250\350\241\250\347\244\272.png" "b/content/zh/post/2022/figures/\344\270\273\350\246\201\347\224\2613\344\270\252\346\225\260\347\273\204\345\234\250\350\241\250\347\244\272.png" new file mode 100644 index 0000000000000000000000000000000000000000..0334790b6035793d50ab3702ef616bd25e9a060c Binary files /dev/null and "b/content/zh/post/2022/figures/\344\270\273\350\246\201\347\224\2613\344\270\252\346\225\260\347\273\204\345\234\250\350\241\250\347\244\272.png" differ diff --git "a/content/zh/post/2022/figures/\344\277\256\346\224\271\345\246\202\344\270\213.png" "b/content/zh/post/2022/figures/\344\277\256\346\224\271\345\246\202\344\270\213.png" new file mode 100644 index 0000000000000000000000000000000000000000..5cb43620f8f7278111c24044ee5662640b95226c Binary files /dev/null and "b/content/zh/post/2022/figures/\344\277\256\346\224\271\345\246\202\344\270\213.png" differ diff --git "a/content/zh/post/2022/figures/\345\256\211\350\243\205\346\210\220\345\212\237\344\274\232\345\207\272\347\216\260\345\246\202\344\270\213\347\225\214\351\235\242.png" "b/content/zh/post/2022/figures/\345\256\211\350\243\205\346\210\220\345\212\237\344\274\232\345\207\272\347\216\260\345\246\202\344\270\213\347\225\214\351\235\242.png" new file mode 100644 index 0000000000000000000000000000000000000000..571a8e81fd7c118c18a5c3367138621cfca0e9b4 Binary files /dev/null and "b/content/zh/post/2022/figures/\345\256\211\350\243\205\346\210\220\345\212\237\344\274\232\345\207\272\347\216\260\345\246\202\344\270\213\347\225\214\351\235\242.png" differ diff --git "a/content/zh/post/2022/figures/\346\210\221\344\273\254\350\207\252\345\256\232\344\271\211\347\232\204\346\214\207\346\240\207\345\267\262\347\273\217\351\207\207\351\233\206\345\210\260.png" "b/content/zh/post/2022/figures/\346\210\221\344\273\254\350\207\252\345\256\232\344\271\211\347\232\204\346\214\207\346\240\207\345\267\262\347\273\217\351\207\207\351\233\206\345\210\260.png" new file mode 100644 index 0000000000000000000000000000000000000000..7078b664720dec9ac98d5018024e246818f8c958 Binary files /dev/null and "b/content/zh/post/2022/figures/\346\210\221\344\273\254\350\207\252\345\256\232\344\271\211\347\232\204\346\214\207\346\240\207\345\267\262\347\273\217\351\207\207\351\233\206\345\210\260.png" differ diff --git "a/content/zh/post/2022/figures/\346\210\221\350\243\205\347\232\204\346\230\2571-16\347\211\210\346\234\254.png" "b/content/zh/post/2022/figures/\346\210\221\350\243\205\347\232\204\346\230\2571-16\347\211\210\346\234\254.png" new file mode 100644 index 0000000000000000000000000000000000000000..aaa8af6a0439336d61d0018a46db907ec4b2f684 Binary files /dev/null and "b/content/zh/post/2022/figures/\346\210\221\350\243\205\347\232\204\346\230\2571-16\347\211\210\346\234\254.png" differ diff --git "a/content/zh/post/2022/figures/\346\265\213\350\257\225\351\252\214\350\257\201.png" "b/content/zh/post/2022/figures/\346\265\213\350\257\225\351\252\214\350\257\201.png" new file mode 100644 index 0000000000000000000000000000000000000000..52401301383786f79a7562aaf351c36f41f0b6fb Binary files /dev/null and "b/content/zh/post/2022/figures/\346\265\213\350\257\225\351\252\214\350\257\201.png" differ diff --git "a/content/zh/post/2022/figures/\346\265\213\350\257\225\351\252\214\350\257\2011.png" "b/content/zh/post/2022/figures/\346\265\213\350\257\225\351\252\214\350\257\2011.png" new file mode 100644 index 0000000000000000000000000000000000000000..a0141bfac0eadab7abd4fb5c22f116fad574bf87 Binary files /dev/null and "b/content/zh/post/2022/figures/\346\265\213\350\257\225\351\252\214\350\257\2011.png" differ diff --git "a/content/zh/post/2022/figures/\347\254\2541\351\230\266\346\256\265.png" "b/content/zh/post/2022/figures/\347\254\2541\351\230\266\346\256\265.png" new file mode 100644 index 0000000000000000000000000000000000000000..54c38384d8cf113ab5528885cfbcc329f520be91 Binary files /dev/null and "b/content/zh/post/2022/figures/\347\254\2541\351\230\266\346\256\265.png" differ diff --git "a/content/zh/post/2022/figures/\347\254\2542\351\230\266\346\256\265.png" "b/content/zh/post/2022/figures/\347\254\2542\351\230\266\346\256\265.png" new file mode 100644 index 0000000000000000000000000000000000000000..cd6bd728aa9f5aa374a21e0be2cbde45a102f252 Binary files /dev/null and "b/content/zh/post/2022/figures/\347\254\2542\351\230\266\346\256\265.png" differ diff --git "a/content/zh/post/2022/figures/\347\274\226\350\257\221\346\211\247\350\241\214\347\250\213\345\272\217\345\220\216.png" "b/content/zh/post/2022/figures/\347\274\226\350\257\221\346\211\247\350\241\214\347\250\213\345\272\217\345\220\216.png" new file mode 100644 index 0000000000000000000000000000000000000000..a175c0287ccf70f3c55edf63d99e354ba1a5a456 Binary files /dev/null and "b/content/zh/post/2022/figures/\347\274\226\350\257\221\346\211\247\350\241\214\347\250\213\345\272\217\345\220\216.png" differ diff --git "a/content/zh/post/2022/figures/\350\213\245\345\207\272\347\216\260\344\273\245\344\270\213\347\273\223\346\236\234\350\241\250\346\230\216\350\277\236\346\216\245\346\210\220\345\212\237.png" "b/content/zh/post/2022/figures/\350\213\245\345\207\272\347\216\260\344\273\245\344\270\213\347\273\223\346\236\234\350\241\250\346\230\216\350\277\236\346\216\245\346\210\220\345\212\237.png" new file mode 100644 index 0000000000000000000000000000000000000000..971ed5c1a70d56cc8fe57ca853b866db7621d4af Binary files /dev/null and "b/content/zh/post/2022/figures/\350\213\245\345\207\272\347\216\260\344\273\245\344\270\213\347\273\223\346\236\234\350\241\250\346\230\216\350\277\236\346\216\245\346\210\220\345\212\237.png" differ diff --git "a/content/zh/post/2022/figures/\350\277\220\350\241\214gsom\345\220\216\346\212\245\351\224\231\345\246\202\344\270\213.png" "b/content/zh/post/2022/figures/\350\277\220\350\241\214gsom\345\220\216\346\212\245\351\224\231\345\246\202\344\270\213.png" new file mode 100644 index 0000000000000000000000000000000000000000..bf0af9fbe22086f18b7f881a71668c04b348e7c8 Binary files /dev/null and "b/content/zh/post/2022/figures/\350\277\220\350\241\214gsom\345\220\216\346\212\245\351\224\231\345\246\202\344\270\213.png" differ diff --git "a/content/zh/post/2022/figures/\350\277\231\346\227\266\345\217\257\350\203\275\351\201\207\345\210\260failed-to.png" "b/content/zh/post/2022/figures/\350\277\231\346\227\266\345\217\257\350\203\275\351\201\207\345\210\260failed-to.png" new file mode 100644 index 0000000000000000000000000000000000000000..05d13029ea1094f5587bb7194b7f2e7f8699861a Binary files /dev/null and "b/content/zh/post/2022/figures/\350\277\231\346\227\266\345\217\257\350\203\275\351\201\207\345\210\260failed-to.png" differ diff --git "a/content/zh/post/2022/figures/\350\277\231\351\207\214\344\270\27226000.png" "b/content/zh/post/2022/figures/\350\277\231\351\207\214\344\270\27226000.png" new file mode 100644 index 0000000000000000000000000000000000000000..bcbacff4517158209f0c0e94d14b968194d7acf4 Binary files /dev/null and "b/content/zh/post/2022/figures/\350\277\231\351\207\214\344\270\27226000.png" differ diff --git "a/content/zh/post/2022/figures/\350\277\231\351\207\214\344\270\272260001.png" "b/content/zh/post/2022/figures/\350\277\231\351\207\214\344\270\272260001.png" new file mode 100644 index 0000000000000000000000000000000000000000..18cf6daa85b2fd6a06e46d3648bcd3abf4396a44 Binary files /dev/null and "b/content/zh/post/2022/figures/\350\277\231\351\207\214\344\270\272260001.png" differ diff --git "a/content/zh/post/2022/figures/\350\277\236\346\216\245\346\262\241\346\234\211\351\227\256\351\242\230.png" "b/content/zh/post/2022/figures/\350\277\236\346\216\245\346\262\241\346\234\211\351\227\256\351\242\230.png" new file mode 100644 index 0000000000000000000000000000000000000000..8e747ebb6b64fe95708b78a315e381307c7788fe Binary files /dev/null and "b/content/zh/post/2022/figures/\350\277\236\346\216\245\346\262\241\346\234\211\351\227\256\351\242\230.png" differ diff --git "a/content/zh/post/2022/figures/\351\207\207\347\224\250JDBC\345\274\200\345\217\221\345\272\224\347\224\250\347\250\213\345\272\217\347\232\204\346\265\201\347\250\213.png" "b/content/zh/post/2022/figures/\351\207\207\347\224\250JDBC\345\274\200\345\217\221\345\272\224\347\224\250\347\250\213\345\272\217\347\232\204\346\265\201\347\250\213.png" new file mode 100644 index 0000000000000000000000000000000000000000..884b9343d59ad1a297845d7c097bc6c9e3743f51 Binary files /dev/null and "b/content/zh/post/2022/figures/\351\207\207\347\224\250JDBC\345\274\200\345\217\221\345\272\224\347\224\250\347\250\213\345\272\217\347\232\204\346\265\201\347\250\213.png" differ diff --git "a/content/zh/post/2022/node_exporter-\350\207\252\345\256\232\344\271\211\347\233\221\346\216\247\346\214\207\346\240\207.md" "b/content/zh/post/2022/node_exporter-\350\207\252\345\256\232\344\271\211\347\233\221\346\216\247\346\214\207\346\240\207.md" new file mode 100644 index 0000000000000000000000000000000000000000..3065f36a2c3ed90ecf2fc9d039c3df26323e8d23 --- /dev/null +++ "b/content/zh/post/2022/node_exporter-\350\207\252\345\256\232\344\271\211\347\233\221\346\216\247\346\214\207\346\240\207.md" @@ -0,0 +1,127 @@ ++++ + +title = "node exporter 自定义监控指标" + +date = "2021-12-16" + +tags = [ "node exporter 自定义监控指标"] + +archives = "2021-12" + +author = "高云龙" + +summary = "node exporter 自定义监控指标" + +img = "/zh/post/2022/title/img10.png" + +times = "12:30" + ++++ + +# node\_exporter 自定义监控指标 + +## 概述 + +node\_exporter除了可以收集系统指标外,还可以采集我们自定义的监控指标。采集自定义监控指标是通过textfile模块来完成的,textfile模块默认会随着node\_exporter启动而启动,如果想要采集自定义指标,还需要在启动node\_exporter的时候,添加–collector.textfile.directory=""参数,这个参数是自定义的采集路径,所有自定义监控指标文件都放在这个目录下,且文件名都以.prom结尾。 + +## 自定义指标 + +- 启动node\_exporter + + ``` + --创建目录 + # mkdir -p /opt/node_exporter/prom + --以指定采集路径的方式启动 + # nohup /opt/node_exporter/node_exporter --collector.textfile.directory="/opt/node_exporter/prom" > /opt/node_exporter/node_exporter.log 2>&1 & + ``` + +- 创建监控指标文件 + + ``` + # cd /opt/node_exporter/prom + # vi db_heartbeat.prom + + --HELP 和 TYPE 如果没有制定,node_exporter会自动添加 + # HELP db_select Metric read from /opt/node_exporter/prom/db_heartbeat.prom + # TYPE db_select untyped + db_select{database="172.16.3.90:5432"} 1 + db_select{database="172.16.3.90:7432"} 0 + ``` + + 在浏览器中可以看到,我们自定义的指标已经采集到 + + ![](figures/我们自定义的指标已经采集到.png) + + +## 定时任务 + +自定义监控指标大多数需要与crontab结合,按着需求设置采集指标的时间。 + +- flock命令 + + 为了防止某个任务的执行时间超过了 crontab 中为此任务设定的执行周期,使用flock命令将crontab串行化: + + flock -xn /tmp/flock.lock -c ‘xxx.sh’ --如果/tmp/flock.lock不存在,flock会自动创建 + + ``` + Usage: + flock [options] [command args] + flock [options] -c + flock [options] + + Options: + -s --shared get a shared lock + -x --exclusive get an exclusive lock (default) + -u --unlock remove a lock + -n --nonblock fail rather than wait + -w --timeout wait for a limited amount of time + -E --conflict-exit-code exit code after conflict or timeout + -o --close close file descriptor before running command + -c --command run a single command string through the shell + + -h, --help display this help and exit + -V, --version output version information and exit + + For more details see flock(1). + ``` + +- shell脚本 + + 这里以查询MogDB为例,通过sql(select 1;)进行探活 + + ``` + vi /opt/scripts/db_heartbeat.sh + + #!/bin/bash + + source /home/omm/.bashrc + + nums=( + 172.16.3.90:5432:opengauss_exporter:opengauss_exporter123 + 172.16.3.90:7432:opengauss_exporter:opengauss_exporter123 + ) + + for i in $(seq 0 $[${#nums[*]}-1]) + do + ip=`echo ${nums[$i]}|awk -F ':' '{print $1}'` + port=`echo ${nums[$i]}|awk -F ':' '{print $2}'` + username=`echo ${nums[$i]}|awk -F ':' '{print $3}'` + password=`echo ${nums[$i]}|awk -F ':' '{print $4}'` + + result=`gsql "host=$ip port=$port user=$username password=$password dbname=postgres" -t -c "select 1"` + if [ $? -eq 0 ]; then + echo "db_select{database=\"$ip:$port\"} 1" >> /opt/node_exporter/prom/db_heartbeat.prom + else + echo "db_select{database=\"$ip:$port\"} 0" >> /opt/node_exporter/prom/db_heartbeat.prom + fi + done + ``` + +- crontab + + ``` + --执行脚本之前,先清理.prom文件,防止监控指标重复 + * * * * * /usr/bin/flock -xn /tmp/flock.lock -c ">/opt/node_exporter/prom/db_heartbeat.prom && /usr/bin/bash /opt/scripts/db_heartbeat.sh >> /opt/scripts/db_heartbeat.log" + ``` + + diff --git "a/content/zh/post/2022/openGauss-MOT\345\255\230\345\202\250\345\274\225\346\223\216.md" "b/content/zh/post/2022/openGauss-MOT\345\255\230\345\202\250\345\274\225\346\223\216.md" new file mode 100644 index 0000000000000000000000000000000000000000..dca8a3f793fba23afa9b06fed18046576230e26f --- /dev/null +++ "b/content/zh/post/2022/openGauss-MOT\345\255\230\345\202\250\345\274\225\346\223\216.md" @@ -0,0 +1,51 @@ ++++ + +title = "openGauss MOT存储引擎" + +date = "2022-01-07" + +tags = [ "openGauss MOT存储引擎"] + +archives = "2022-01" + +author = "ORA-DBA" + +summary = "openGauss MOT存储引擎" + +img = "/zh/post/2022/title/img15.jpg" + +times = "12:30" + ++++ + +# openGauss MOT存储引擎 + +## 介绍 + +MOT存储引擎,是一种事务性行存储,针对多核和大内存服务器进行了优化。MOT为事务性工作负载提供更高的性能。 + +MOT支持ACID特性,并包括严格的持久性和高可用性支持。企业可以在关键任务、性能敏感的在线事务处理(OLTP)中使用MOT,以实现高性能、高吞吐、可预测低延迟以及多核服务器的高利用率。 + +MOT适合在多路和多核处理器的现代服务器上运行。 + +## openGauss内存优化存储引擎结构 + +openGauss内存优化存储引擎结构图 + +![](figures/openGauss内存优化存储引擎结构图.png) + +openGauss 内存优化存储引擎组件负责管理MOT和事务。 + +MOT与基于磁盘的普通表并排创建。MOT实现了几乎完全的SQL覆盖,并且支持完整的数据库功能集,如存储过程和自定义函数。 + +通过完全存储在内存中的数据和索引、非统一内存访问感知(NUMA-aware)设计、消除锁和锁存争用的算法以及查询原生编译,MOT可提供更快的数据访问和更高效的事务执行。 + +MOT有效的几乎无锁的设计和高度调优的实现,使其在多核服务器上实现了卓越的近线性吞吐量扩展。 + +**MOT完全支持ACID特性:** + +- 原子性(Atomicity):原子事务是一系列不可分割的数据库操作。在事务完成(分别提交或中止)之后,这些操作要么全部发生,要么全部不发生。 +- 一致性(Consistency):事务结束后,数据库处于一致状态,保留数据完整性。 +- 隔离性(Isolation):事务之间不能相互干扰。MOT支持不同的重复读和读提交隔离级别。在下一个版本中,MOT还将支持可序列化隔离。 +- 持久性(Durability):即使发生崩溃和失败,成功完成(提交)的事务效果持久保存。MOT完全集成了openGauss的基于WAL的日志记录。同时支持同步和异步日志记录选项。MOT还支持同步+面向NUMA优化的组提交。 + diff --git "a/content/zh/post/2022/openGauss-MogDB-TPCH\346\200\247\350\203\275\346\265\213\350\257\225\346\255\245\351\252\244.md" "b/content/zh/post/2022/openGauss-MogDB-TPCH\346\200\247\350\203\275\346\265\213\350\257\225\346\255\245\351\252\244.md" new file mode 100644 index 0000000000000000000000000000000000000000..b0e9a1296e2beabf316f592394c083140e5126da --- /dev/null +++ "b/content/zh/post/2022/openGauss-MogDB-TPCH\346\200\247\350\203\275\346\265\213\350\257\225\346\255\245\351\252\244.md" @@ -0,0 +1,106 @@ ++++ + +title = "openGauss/MogDB TPCH性能测试步骤" + +date = "2021-12-28" + +tags = [ "openGauss/MogDB TPCH性能测试步骤"] + +archives = "2021-12" + +author = "Seven" + +summary = "openGauss/MogDB TPCH性能测试步骤" + +img = "/zh/post/2022/title/img13.png" + +times = "12:30" + ++++ + +# openGauss/MogDB TPCH性能测试步骤 + +TPCH官网直接下载的包无法直接兼容OpenGauss/MogDB/Postgresql,为了兼容pg/og的语法,总结了测试步骤供大家参考 + +- 建表 + + ``` + gsql -p 26000 -d postgres -U tpch < createtab_og.sql + ``` + +- 生成数据 + + 例: + + 生成100G数据 + + ``` + ./dbgen -s 100 + ``` + + 例: + + 8线程生成500G数据 + + ``` + #!/bin/sh + ./dbgen -vf -s 500 -S 1 -C 8 & + ./dbgen -vf -s 500 -S 2 -C 8 & + ./dbgen -vf -s 500 -S 3 -C 8 & + ./dbgen -vf -s 500 -S 4 -C 8 & + ./dbgen -vf -s 500 -S 5 -C 8 & + ./dbgen -vf -s 500 -S 6 -C 8 & + ./dbgen -vf -s 500 -S 7 -C 8 & + ./dbgen -vf -s 500 -S 8 -C 8 & + ``` + +- 数据转换 + + 生成的数据文件格式为tbl,转为csv格式 + + ``` + for i in `ls .tbl`;do sed 's/|$//' $i > ${i/tbl/csv};echo $i;done; + ``` + +- 导入数据 + + ``` + dir=/TPCH/TPCH_gs/TPCH/dbgen/data + opts='-p 26000 -d postgres' + gsql $opts -c "COPY tpch.region FROM '$dir/region.csv' WITH (FORMAT csv,DELIMITER '|')" + gsql $opts -c "COPY tpch.nation FROM '$dir/nation.csv' WITH (FORMAT csv,DELIMITER '|')" + gsql $opts -c "COPY tpch.part FROM '$dir/part.csv' WITH (FORMAT csv,DELIMITER '|')" + gsql $opts -c "COPY tpch.supplier FROM '$dir/supplier.csv' WITH (FORMAT csv,DELIMITER '|')" + gsql $opts -c "COPY tpch.customer FROM '$dir/customer.csv' WITH (FORMAT csv,DELIMITER '|')" + gsql $opts -c "COPY tpch.partsupp FROM '$dir/partsupp.csv' WITH (FORMAT csv,DELIMITER '|')" + gsql $opts -c "COPY tpch.orders FROM '$dir/orders.csv' WITH (FORMAT csv,DELIMITER '|')" + gsql $opts -c "COPY tpch.lineitem FROM '$dir/lineitem.csv' WITH (FORMAT csv,DELIMITER '|')" + ``` + +- 创建所需函数 + + ``` + create or replace function NUMTOYMINTERVAL(float8, text) returns interval as $$ select ($1||' '||$2)::interval; + $$ language sql strict immutable; + + create or replace function NUMTODSINTERVAL(float8, text) returns interval as $$ select ($1||' '||$2)::interval; + $$ language sql strict immutable; + ``` + +- 执行SQL文件夹下sql + + ``` + #!/bin/bash + opts='-p 26000 -d postgres -U tpch -W 'password'' + for i in `seq 10 22` + do + echo $i"'s result" + gsql ${opts} -f ${i}.sql + done + ``` + + +TPCH包及建表语句,执行SQL语句见网盘: + +链接: https://pan.baidu.com/s/1Cg7neIxXGjDYS7BfZxl2IQ 密码: urkt + diff --git "a/content/zh/post/2022/openGauss-MogDB\345\244\247\345\257\271\350\261\241LargeObject\345\255\230\345\217\226\346\265\213\350\257\225.md" "b/content/zh/post/2022/openGauss-MogDB\345\244\247\345\257\271\350\261\241LargeObject\345\255\230\345\217\226\346\265\213\350\257\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..1aa789027640dd641c96d81fa7d8bed537b7c5e4 --- /dev/null +++ "b/content/zh/post/2022/openGauss-MogDB\345\244\247\345\257\271\350\261\241LargeObject\345\255\230\345\217\226\346\265\213\350\257\225.md" @@ -0,0 +1,238 @@ ++++ + +title = "openGauss/MogDB大对象LargeObject存取测试" + +date = "2021-12-17" + +tags = [ "openGauss/MogDB大对象LargeObject存取测试"] + +archives = "2021-12" + +author = "多米爸比" + +summary = "openGauss/MogDB大对象LargeObject存取测试" + +img = "/zh/post/2022/title/img14.png" + +times = "12:30" + ++++ + +# openGauss/MogDB大对象LargeObject存取测试 + +openGauss/MogDB数据库里bytea二进制类型受segment size编译参数限制,默认不能超过1GB,如果字段存储数据超过1GB可以使用lo(Large Object)扩展类型。 + +## lo类型需要先创建lo extension + +``` +$ gsql -p5432 -Uomm postgres -r +gsql ((MogDB 2.0.1 build f892ccb7) compiled at 2021-07-09 16:15:21 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +postgres=# create extension lo; +CREATE EXTENSION +``` + +创建完lo扩展,我们新建test\_lo表,info字段使用lo类型。 + +``` +postgres=# create table test_lo(id int,info lo); +CREATE TABLE +``` + +创建test\_lo表管理触发器,对update和delete操作使用lo\_manage函数管理,不然会产生孤立大对象。 + +``` +postgres=# create trigger test_lo before UPDATE OR DELETE ON test_lo FOR EACH ROW EXECUTE procedure lo_manage(info); +WARNING: Trigger function with non-plpgsql type is not recommended. +DETAIL: Non-plpgsql trigger function are not shippable by default. +HINT: Unshippable trigger may lead to bad performance. +CREATE TRIGGER +``` + +使用dd生成2GB文件 + +``` +postgres=# \! dd if=/dev/zero of=test_lo bs=1M count=2048 && sync +记录了2048+0 的读入 +记录了2048+0 的写出 +2147483648字节(2.1 GB,2.0 GiB)已复制,0.805435 s,2.7 GB/s +``` + +## 测试lo\_import函数导入数据到数据表 + +``` +postgres=# insert into test_lo values(1,lo_import('/home/omm/test_lo')); +INSERT 0 1 +``` + +可以看到数据可以正常导入,如果不使用lo类型,使用bytea类型会提示下面的报错。 + +``` +ERROR: requested length too large +``` + +## 测试lo\_export函数导出数据表数据到文件 + +``` +postgres=# select lo_export(test_lo.info,'/home/omm/test_ext_lo') from test_lo where id=1; + lo_export +----------- + 1 +(1 row) +``` + +可以看到数据正常导出。 + +查看导入导出的数据文件,也可以使用diff命令进行比对。 + +``` +postgres=# \! ls -lh test_* +-rw-r--r-- 1 omm dbgrp 2.0G 12月 17 13:00 test_ext_lo +-rw------- 1 omm dbgrp 2.0G 12月 17 12:58 test_lo +``` + +## 查看数据表大对象字段大小 + +分两步进行,首先查大对象字段的oid(lo类型字段在用户表里面只存储一个oid引用指针,并不实际存数据) + +``` +postgres=# select * from test_lo; + id | info +----+------- + 1 | 16392 +(1 row) +``` + +实际数据使用多条bytea记录存储在pg\_largeobject表,可以根据oid查询统计字段的大小 + +``` +postgres=# select loid,pg_size_pretty(sum(octet_length(data))) +from pg_largeobject +where loid =16392 +group by loid; + loid | pg_size_pretty +-------+---------------- + 16392 | 2048 MB +(1 row) +``` + +也可以使用如下函数来查询 + +``` +create or replace function get_lo_size(oid) +returns bigint +volatile strict +as $function$ +declare + fd integer; + sz bigint; +begin + fd := lo_open($1, x'40000'::int); + perform lo_lseek64(fd, 0, 2); + sz := lo_tell64(fd); + perform lo_close(fd); + return sz; +end; +$function$ language plpgsql; +``` + +查询结果如下 + +``` +postgres=# select pg_size_pretty(get_lo_size(16392)); + pg_size_pretty +---------------- + 2048 MB +(1 row) +``` + +再来测试JDBC应用层的使用 + +## JDBC-Java文件入库 + +``` + public static void main(String[] args) throws Exception{ + Class.forName("org.postgresql.Driver"); + + Connection conn = DriverManager.getConnection("jdbc:postgresql://ip:port/dbname","username","password"); + + conn.setAutoCommit(false); + + LargeObjectManager lobj = conn.unwrap(org.postgresql.PGConnection.class).getLargeObjectAPI(); + + long oid = lobj.createLO(LargeObjectManager.READ | LargeObjectManager.WRITE); + + LargeObject obj = lobj.open(oid, LargeObjectManager.WRITE); + + File file = new File("c:/work/test_lo"); + FileInputStream fis = new FileInputStream(file); + + byte buf[] = new byte[10*1024*1024]; + int s, tl = 0; + while ((s = fis.read(buf, 0, 2048)) > 0) + { + obj.write(buf, 0, s); + tl += s; + } + + obj.close(); + + PreparedStatement ps = conn.prepareStatement("INSERT INTO test_lo VALUES (?, ?)"); + ps.setInt(1, 100); + ps.setLong(2, oid); + ps.executeUpdate(); + ps.close(); + fis.close(); + + conn.commit(); + conn.close(); + + } +``` + +## JDBC-Java读数据输出到文件 + +``` + public static void main(String[] args) throws Exception{ + Class.forName("org.postgresql.Driver"); + + Connection conn = DriverManager.getConnection("jdbc:postgresql://ip:port/dbname","username","password"); + + conn.setAutoCommit(false); + + LargeObjectManager lobj = conn.unwrap(org.postgresql.PGConnection.class).getLargeObjectAPI(); + + PreparedStatement ps = conn.prepareStatement("SELECT info FROM test_lo WHERE id = ?"); + ps.setInt(1, 100); + ResultSet rs = ps.executeQuery(); + + File file = new File("c:/work/test_out_lo"); + FileOutputStream fos = new FileOutputStream(file); + + while (rs.next()) + { + long oid = rs.getLong(1); + LargeObject obj = lobj.open(oid, LargeObjectManager.READ); + + byte buf[] = new byte[10*1024*1024]; + int s, tl = 0; + while ((s = obj.read(buf, 0, 2048)) > 0) + { + fos.write(buf, 0, s); + tl += s; + } + + obj.close(); + } + rs.close(); + ps.close(); + fos.close(); + + conn.commit(); + conn.close(); + + } +``` + diff --git "a/content/zh/post/2022/openGauss-gsql-\345\270\270\347\224\250\345\205\203\345\221\275\344\273\244-\344\270\200.md" "b/content/zh/post/2022/openGauss-gsql-\345\270\270\347\224\250\345\205\203\345\221\275\344\273\244-\344\270\200.md" new file mode 100644 index 0000000000000000000000000000000000000000..f0d867db9fe532c6774b8067e018ca66bfe57042 --- /dev/null +++ "b/content/zh/post/2022/openGauss-gsql-\345\270\270\347\224\250\345\205\203\345\221\275\344\273\244-\344\270\200.md" @@ -0,0 +1,472 @@ ++++ + +title = "openGauss gsql 常用元命令 一" + +date = "2022-01-10" + +tags = [ "openGauss gsql 常用元命令 一"] + +archives = "2022-01" + +author = "晨辉" + +summary = "openGauss gsql 常用元命令 一" + +img = "/zh/post/2022/title/img12.png" + +times = "12:30" + ++++ + +# openGauss gsql 常用元命令 一 + +## 连接数据库 使用 -E参数可以显示元命令具体执行的SQL信息 + +``` +[omm@og1 ~]$ gsql -d postgres -p15400 -E +gsql ((openGauss 2.1.0 build 590b0f8e) compiled at 2021-09-30 14:29:04 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. +``` + +## \\l 显示数据库中数据库信息 + +``` +openGauss=# \l +********* QUERY ********** +SELECT d.datname as "Name", + pg_catalog.pg_get_userbyid(d.datdba) as "Owner", + pg_catalog.pg_encoding_to_char(d.encoding) as "Encoding", + d.datcollate as "Collate", + d.datctype as "Ctype", + pg_catalog.array_to_string(d.datacl, E'\n') AS "Access privileges" +FROM pg_catalog.pg_database d +ORDER BY 1; +************************** + + List of databases + Name | Owner | Encoding | Collate | Ctype | Access privileges +-----------+-------+----------+------------+------------+------------------- + mydb | omm | UTF8 | en_US.utf8 | en_US.utf8 | + postgres | omm | UTF8 | en_US.utf8 | en_US.utf8 | + studentdb | omm | UTF8 | en_US.utf8 | en_US.utf8 | + template0 | omm | UTF8 | en_US.utf8 | en_US.utf8 | =c/omm + + | | | | | omm=CTc/omm + template1 | omm | UTF8 | en_US.utf8 | en_US.utf8 | =c/omm + + | | | | | omm=CTc/omm +(5 rows) +``` + +## \\du 同\\dg 显示数据库中所有用户和角色 + +``` +openGauss=# \du +********* QUERY ********** +SELECT r.rolname, r.rolsuper, r.rolinherit, + r.rolcreaterole, r.rolcreatedb, r.rolcanlogin, + r.rolconnlimit, r.rolvalidbegin, r.rolvaliduntil, + ARRAY(SELECT b.rolname + FROM pg_catalog.pg_auth_members m + JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) + WHERE m.member = r.oid) as memberof +, r.rolreplication +, r.rolauditadmin +, r.rolsystemadmin +, r.rolmonitoradmin +, r.roloperatoradmin +, r.rolpolicyadmin +, r.roluseft +, r.rolkind +FROM pg_catalog.pg_roles r +WHERE r.rolname not in ('gs_role_copy_files', 'gs_role_signal_backend', 'gs_role_tablespace', 'gs_role_replication', 'gs_role_account_lock', 'gs_role_pldebugger') +ORDER BY 1; +************************** + + List of roles + Role name | Attributes | Member of +-----------+------------------------------------------------------------------------------------------------------------------+----------- + omm | Sysadmin, Create role, Create DB, Replication, Administer audit, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} + student | Sysadmin | {} + +openGauss=# \dg +********* QUERY ********** +SELECT r.rolname, r.rolsuper, r.rolinherit, + r.rolcreaterole, r.rolcreatedb, r.rolcanlogin, + r.rolconnlimit, r.rolvalidbegin, r.rolvaliduntil, + ARRAY(SELECT b.rolname + FROM pg_catalog.pg_auth_members m + JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) + WHERE m.member = r.oid) as memberof +, r.rolreplication +, r.rolauditadmin +, r.rolsystemadmin +, r.rolmonitoradmin +, r.roloperatoradmin +, r.rolpolicyadmin +, r.roluseft +, r.rolkind +FROM pg_catalog.pg_roles r +WHERE r.rolname not in ('gs_role_copy_files', 'gs_role_signal_backend', 'gs_role_tablespace', 'gs_role_replication', 'gs_role_account_lock', 'gs_role_pldebugger') +ORDER BY 1; +************************** + + List of roles + Role name | Attributes | Member of +-----------+------------------------------------------------------------------------------------------------------------------+----------- + omm | Sysadmin, Create role, Create DB, Replication, Administer audit, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} + student | Sysadmin | {} +``` + +## \\db 显示数据库中所有表空间信息 + +``` +openGauss=# \db +********* QUERY ********** +SELECT spcname AS "Name", + pg_catalog.pg_get_userbyid(spcowner) AS "Owner", + pg_catalog.pg_tablespace_location(oid) AS "Location" +FROM pg_catalog.pg_tablespace +ORDER BY 1; +************************** + + List of tablespaces + Name | Owner | Location +------------+-------+------------------------ + pg_default | omm | + pg_global | omm | + student_ts | omm | tablespace/student_ts1 +(3 rows) +``` + +## \\dn 显示数据库中所有schema信息 + +``` +openGauss=# \dn +********* QUERY ********** +SELECT n.nspname AS "Name", + pg_catalog.pg_get_userbyid(n.nspowner) AS "Owner" +FROM pg_catalog.pg_namespace n +WHERE n.nspname !~ '^pg_' AND n.nspname <> 'information_schema' +ORDER BY 1; +************************** + + List of schemas + Name | Owner +----------------+--------- + blockchain | omm + cstore | omm + db4ai | omm + dbe_perf | omm + dbe_pldebugger | omm + pkg_service | omm + pmk | omm + public | omm + snapshot | omm + sqladvisor | omm + student | student +(11 rows) +``` + +## \\d 显示当前数据库下相关数据库对象信息\(包含表、视图、物化视图、序列、外部表、stream\\ contview\) + +``` +openGauss=# \d +********* QUERY ********** +SELECT n.nspname as "Schema", + c.relname as "Name", + CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'i' THEN 'index' WHEN 'I' THEN 'global partition index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' WHEN 'm' THEN 'materialized view' WHEN 'e' THEN 'stream' WHEN 'o' THEN 'contview' END as "Type", + pg_catalog.pg_get_userbyid(c.relowner) as "Owner", + c.reloptions as "Storage" +FROM pg_catalog.pg_class c + LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace +WHERE c.relkind IN ('r','v','m','S','f','e','o','') + AND n.nspname <> 'pg_catalog' + AND n.nspname <> 'db4ai' + AND n.nspname <> 'information_schema' + AND n.nspname !~ '^pg_toast' + AND c.relname not like 'matviewmap_%' + AND c.relname not like 'mlog_%' + AND pg_catalog.pg_table_is_visible(c.oid) +ORDER BY 1,2; +************************** + + List of relations + Schema | Name | Type | Owner | Storage +--------+--------+-------+-------+---------------------------------- + public | test | table | omm | {orientation=row,compression=no} + public | v_test | view | omm | +(2 rows) +``` + +## \\d tablename 查看某个表的详细信息 + +``` +openGauss=# \d test +********* QUERY ********** +SELECT c.oid, + n.nspname, + c.relname +FROM pg_catalog.pg_class c + LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace +WHERE c.relname ~ '^(test)$' + AND pg_catalog.pg_table_is_visible(c.oid) +ORDER BY 2, 3; +************************** + +********* QUERY ********** +SELECT c.relchecks, c.relkind, c.relhasindex, c.relhasrules, c.relhastriggers, c.relhasoids, '', c.reltablespace, CASE WHEN c.reloftype = 0 THEN '' ELSE c.reloftype::pg_catalog.regtype::pg_catalog.text END, c.relpersistence,c.relhasclusterkey, c.relreplident, (select count(1) as haspolicy from pg_catalog.pg_class WHERE relname = 'pg_rlspolicy') +FROM pg_catalog.pg_class c + LEFT JOIN pg_catalog.pg_class tc ON (c.reltoastrelid = tc.oid) +WHERE c.oid = '16575'; +************************** + +********* QUERY ********** +SELECT * FROM pg_catalog.pg_class WHERE relname = 'gs_encrypted_columns' AND relnamespace = 11; +************************** + +********* QUERY ********** +SELECT a.attname, + pg_catalog.format_type(a.atttypid, a.atttypmod), + (SELECT substring(pg_catalog.pg_get_expr(d.adbin, d.adrelid) for 176) + FROM pg_catalog.pg_attrdef d + WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef), + a.attnotnull, a.attnum, + (SELECT c.collname FROM pg_catalog.pg_collation c, pg_catalog.pg_type t + WHERE c.oid = a.attcollation AND t.oid = a.atttypid AND a.attcollation <> t.typcollation) AS attcollation, + NULL AS indexdef, + NULL AS attfdwoptions, + (SELECT pg_catalog.format_type (a.atttypmod, g.data_type_original_mod) AS clientlogic_original_type FROM gs_encrypted_columns g WHERE g.column_name = a.attname AND g.rel_id = 16575group by g.data_type_original_oid, g.data_type_original_mod), +(SELECT g.data_type_original_oid AS clientlogic_original_type_oid FROM gs_encrypted_columns g WHERE g.column_name = a.attname AND g.rel_id = 16575group by g.data_type_original_oid, g.data_type_original_mod), + (SELECT h.adgencol + FROM pg_catalog.pg_attrdef h + WHERE h.adrelid = a.attrelid AND h.adnum = a.attnum AND a.atthasdef) AS generated_column +FROM pg_catalog.pg_attribute a +WHERE a.attrelid = '16575' AND a.attnum > 0 AND NOT a.attisdropped AND a.attkvtype != 4 AND a.attname <> 'tableoid' AND a.attname <> 'tablebucketid' +ORDER BY a.attnum; +************************** + +********* QUERY ********** +SELECT c2.relname, i.indisprimary, i.indisunique, i.indisclustered, i.indisvalid, pg_catalog.pg_get_indexdef(i.indexrelid, 0, true), + pg_catalog.pg_get_constraintdef(con.oid, true), contype, condeferrable, condeferred, i.indisreplident, c2.reltablespace, i.indisusable +FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_index i + LEFT JOIN pg_catalog.pg_constraint con ON (conrelid = i.indrelid AND conindid = i.indexrelid AND contype IN ('p','u','x')) +WHERE c.oid = '16575' AND c.oid = i.indrelid AND i.indexrelid = c2.oid +ORDER BY i.indisprimary DESC, i.indisunique DESC, c2.relname; +************************** + +********* QUERY ********** +SELECT pol.policyname, pol.policypermissive, trim(pol.policyroles::text, '{}'), pol.policyqual, pol.policycmd +FROM pg_catalog.pg_rlspolicies pol +LEFT JOIN pg_catalog.pg_namespace N on (N.nspname = pol.schemaname) +LEFT JOIN pg_catalog.pg_class C on (pol.tablename = C.relname and C.relnamespace = N.oid) +WHERE C.oid = '16575' ORDER BY 1; +************************** + +********* QUERY ********** +SELECT c.oid::pg_catalog.regclass FROM pg_catalog.pg_class c, pg_catalog.pg_inherits i WHERE c.oid=i.inhparent AND i.inhrelid = '16575' ORDER BY inhseqno; +************************** + +********* QUERY ********** +SELECT c.oid::pg_catalog.regclass FROM pg_catalog.pg_class c, pg_catalog.pg_inherits i WHERE c.oid=i.inhrelid AND i.inhparent = '16575' ORDER BY c.oid::pg_catalog.regclass::pg_catalog.text; +************************** + +********* QUERY ********** +select partkey,partstrategy from pg_partition where parentid = 16575 order by partkey +************************** + + Table "public.test" + Column | Type | Modifiers +--------+---------+----------- + id | integer | +Indexes: + "idx_id_test" btree (id) TABLESPACE pg_default + +openGauss=# +``` + +## \\dt 显示当前数据库中所有的表 + +``` +openGauss=# \dt +********* QUERY ********** +SELECT n.nspname as "Schema", + c.relname as "Name", + CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'i' THEN 'index' WHEN 'I' THEN 'global partition index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' WHEN 'm' THEN 'materialized view' WHEN 'e' THEN 'stream' WHEN 'o' THEN 'contview' END as "Type", + pg_catalog.pg_get_userbyid(c.relowner) as "Owner", + c.reloptions as "Storage" +FROM pg_catalog.pg_class c + LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace +WHERE c.relkind IN ('r','') + AND n.nspname <> 'pg_catalog' + AND n.nspname <> 'db4ai' + AND n.nspname <> 'information_schema' + AND n.nspname !~ '^pg_toast' + AND c.relname not like 'matviewmap_%' + AND c.relname not like 'mlog_%' + AND pg_catalog.pg_table_is_visible(c.oid) +ORDER BY 1,2; +************************** + + List of relations + Schema | Name | Type | Owner | Storage +--------+------+-------+-------+---------------------------------- + public | test | table | omm | {orientation=row,compression=no} +(1 row) +``` + +## \\dt+ 以扩展方式显示当前数据库所有表信息,比起\\dt 多了最后一列描述信息 + +``` +openGauss=# \dt+ +********* QUERY ********** +SELECT n.nspname as "Schema", + c.relname as "Name", + CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'i' THEN 'index' WHEN 'I' THEN 'global partition index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' WHEN 'm' THEN 'materialized view' WHEN 'e' THEN 'stream' WHEN 'o' THEN 'contview' END as "Type", + pg_catalog.pg_get_userbyid(c.relowner) as "Owner", + pg_catalog.pg_size_pretty(pg_catalog.pg_table_size(c.oid)) as "Size", + c.reloptions as "Storage", + pg_catalog.obj_description(c.oid, 'pg_class') as "Description" +FROM pg_catalog.pg_class c + LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace +WHERE c.relkind IN ('r','') + AND n.nspname <> 'pg_catalog' + AND n.nspname <> 'db4ai' + AND n.nspname <> 'information_schema' + AND n.nspname !~ '^pg_toast' + AND c.relname not like 'matviewmap_%' + AND c.relname not like 'mlog_%' + AND pg_catalog.pg_table_is_visible(c.oid) +ORDER BY 1,2; +************************** + + List of relations + Schema | Name | Type | Owner | Size | Storage | Description +--------+------+-------+-------+---------+----------------------------------+------------- + public | test | table | omm | 0 bytes | {orientation=row,compression=no} | +(1 row) +``` + +## \\di 查看当前数据库中索引信息 + +``` +openGauss=# \di +********* QUERY ********** +SELECT n.nspname as "Schema", + c.relname as "Name", + CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'i' THEN 'index' WHEN 'I' THEN 'global partition index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' WHEN 'm' THEN 'materialized view' WHEN 'e' THEN 'stream' WHEN 'o' THEN 'contview' END as "Type", + pg_catalog.pg_get_userbyid(c.relowner) as "Owner", + c2.relname as "Table", + c.reloptions as "Storage" +FROM pg_catalog.pg_class c + LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + LEFT JOIN pg_catalog.pg_index i ON i.indexrelid = c.oid + LEFT JOIN pg_catalog.pg_class c2 ON i.indrelid = c2.oid +WHERE c.relkind IN ('i','I','') + AND n.nspname <> 'pg_catalog' + AND n.nspname <> 'db4ai' + AND n.nspname <> 'information_schema' + AND n.nspname !~ '^pg_toast' + AND c.relname not like 'matviewmap_%' + AND c.relname not like 'mlog_%' + AND pg_catalog.pg_table_is_visible(c.oid) +ORDER BY 1,2; +************************** + + List of relations + Schema | Name | Type | Owner | Table | Storage +--------+-------------+-------+-------+-------+--------- + public | idx_id_test | index | omm | test | +(1 row) +``` + +## \\di indexname 查看当前数据库某个索引的信息 + +``` +openGauss=# \di idx_id_test +********* QUERY ********** +SELECT n.nspname as "Schema", + c.relname as "Name", + CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'i' THEN 'index' WHEN 'I' THEN 'global partition index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' WHEN 'm' THEN 'materialized view' WHEN 'e' THEN 'stream' WHEN 'o' THEN 'contview' END as "Type", + pg_catalog.pg_get_userbyid(c.relowner) as "Owner", + c2.relname as "Table", + c.reloptions as "Storage" +FROM pg_catalog.pg_class c + LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + LEFT JOIN pg_catalog.pg_index i ON i.indexrelid = c.oid + LEFT JOIN pg_catalog.pg_class c2 ON i.indrelid = c2.oid +WHERE c.relkind IN ('i','I','s','') + AND n.nspname !~ '^pg_toast' + AND c.relname not like 'matviewmap_%' + AND c.relname not like 'mlog_%' + AND c.relname ~ '^(idx_id_test)$' + AND pg_catalog.pg_table_is_visible(c.oid) +ORDER BY 1,2; +************************** + + List of relations + Schema | Name | Type | Owner | Table | Storage +--------+-------------+-------+-------+-------+--------- + public | idx_id_test | index | omm | test | +(1 row) +``` + +## \\dv 查看当前数据库视图信息 + +``` +openGauss=# \dv +********* QUERY ********** +SELECT n.nspname as "Schema", + c.relname as "Name", + CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'i' THEN 'index' WHEN 'I' THEN 'global partition index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' WHEN 'm' THEN 'materialized view' WHEN 'e' THEN 'stream' WHEN 'o' THEN 'contview' END as "Type", + pg_catalog.pg_get_userbyid(c.relowner) as "Owner", + c.reloptions as "Storage" +FROM pg_catalog.pg_class c + LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace +WHERE c.relkind IN ('v','') + AND n.nspname <> 'pg_catalog' + AND n.nspname <> 'db4ai' + AND n.nspname <> 'information_schema' + AND n.nspname !~ '^pg_toast' + AND c.relname not like 'matviewmap_%' + AND c.relname not like 'mlog_%' + AND pg_catalog.pg_table_is_visible(c.oid) +ORDER BY 1,2; +************************** + + List of relations + Schema | Name | Type | Owner | Storage +--------+--------+------+-------+--------- + public | v_test | view | omm | +(1 row) +``` + +## \\ds 查看当前数据库序列信息 + +``` +openGauss=# \ds +********* QUERY ********** +SELECT n.nspname as "Schema", + c.relname as "Name", + CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'i' THEN 'index' WHEN 'I' THEN 'global partition index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' WHEN 'm' THEN 'materialized view' WHEN 'e' THEN 'stream' WHEN 'o' THEN 'contview' END as "Type", + pg_catalog.pg_get_userbyid(c.relowner) as "Owner", + c.reloptions as "Storage" +FROM pg_catalog.pg_class c + LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace +WHERE c.relkind IN ('S','') + AND n.nspname <> 'pg_catalog' + AND n.nspname <> 'db4ai' + AND n.nspname <> 'information_schema' + AND n.nspname !~ '^pg_toast' + AND c.relname not like 'matviewmap_%' + AND c.relname not like 'mlog_%' + AND pg_catalog.pg_table_is_visible(c.oid) +ORDER BY 1,2; +************************** + + List of relations + Schema | Name | Type | Owner | Storage +--------+------+----------+-------+--------- + public | sq1 | sequence | omm | +(1 row) +``` + diff --git "a/content/zh/post/2022/openGauss-gsql-\345\270\270\347\224\250\345\205\203\345\221\275\344\273\244-\344\272\214.md" "b/content/zh/post/2022/openGauss-gsql-\345\270\270\347\224\250\345\205\203\345\221\275\344\273\244-\344\272\214.md" new file mode 100644 index 0000000000000000000000000000000000000000..ac0e4ee6e47de9d96570da056b7a07f56cda69b7 --- /dev/null +++ "b/content/zh/post/2022/openGauss-gsql-\345\270\270\347\224\250\345\205\203\345\221\275\344\273\244-\344\272\214.md" @@ -0,0 +1,243 @@ ++++ + +title = "openGauss gsql 常用元命令 二" + +date = "2022-01-10" + +tags = ["openGauss gsql 常用元命令 二"] + +archives = "2022-01" + +author = "晨辉" + +summary = "openGauss gsql 常用元命令 二" + +img = "/zh/post/2022/title/img11.png" + +times = "12:30" + ++++ + +# openGauss gsql 常用元命令 二 + +## \\df 查看当前数据库函数信息 + +``` +openGauss=# \df +********* QUERY ********** +SELECT n.nspname as "Schema", + p.proname as "Name", + pg_catalog.pg_get_function_result(p.oid) as "Result data type", + pg_catalog.pg_get_function_arguments(p.oid) as "Argument data types", + CASE + WHEN p.proisagg THEN 'agg' + WHEN p.proiswindow THEN 'window' + WHEN p.prorettype = 'pg_catalog.trigger'::pg_catalog.regtype THEN 'trigger' + ELSE 'normal' +END as "Type" , + fencedmode as "fencedmode" + , + propackage as "propackage" + , + prokind as "prokind" + +FROM pg_catalog.pg_proc p + LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace +WHERE pg_catalog.pg_function_is_visible(p.oid) + AND n.nspname <> 'pg_catalog' + AND n.nspname <> 'db4ai' + AND n.nspname <> 'information_schema' +ORDER BY 1, 2, 4; +************************** + + List of functions + Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind +--------+-----------+------------------+--------------------------------------------------+--------+------------+------------+--------- + public | fuc_worth | numeric | price numeric, amount integer, OUT worth numeric | normal | f | f | f +(1 row) +``` + +## \\dx 查看已安装的扩展程序信息 + +``` +openGauss=# \dx +********* QUERY ********** +SELECT e.extname AS "Name", e.extversion AS "Version", n.nspname AS "Schema", c.description AS "Description" +FROM pg_catalog.pg_extension e LEFT JOIN pg_catalog.pg_namespace n ON n.oid = e.extnamespace LEFT JOIN pg_catalog.pg_description c ON c.objoid = e.oid AND c.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass +ORDER BY 1; +************************** + + List of installed extensions + Name | Version | Schema | Description +-----------------+---------+------------+-------------------------------------------------- + dist_fdw | 1.0 | pg_catalog | foreign-data wrapper for distfs access + file_fdw | 1.0 | pg_catalog | foreign-data wrapper for flat file access + hdfs_fdw | 1.0 | pg_catalog | foreign-data wrapper for flat file access + hstore | 1.1 | pg_catalog | data type for storing sets of (key, value) pairs + log_fdw | 1.0 | pg_catalog | Foreign Data Wrapper for accessing logging data + mot_fdw | 1.0 | pg_catalog | foreign-data wrapper for MOT access + plpgsql | 1.0 | pg_catalog | PL/pgSQL procedural language + security_plugin | 1.0 | pg_catalog | provides security functionality +(8 rows) +``` + +## \\x 语法 \\x\[on|off|auto\] 设置语句的输出模式,模式为行的方式输出,执行 \\x on 切换为以列的方式来显示 + +``` +openGauss=# \x +Expanded display is on. +openGauss=# \dx +********* QUERY ********** +SELECT e.extname AS "Name", e.extversion AS "Version", n.nspname AS "Schema", c.description AS "Description" +FROM pg_catalog.pg_extension e LEFT JOIN pg_catalog.pg_namespace n ON n.oid = e.extnamespace LEFT JOIN pg_catalog.pg_description c ON c.objoid = e.oid AND c.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass +ORDER BY 1; +************************** + +List of installed extensions +-[ RECORD 1 ]------------------------------------------------- +Name | dist_fdw +Version | 1.0 +Schema | pg_catalog +Description | foreign-data wrapper for distfs access +-[ RECORD 2 ]------------------------------------------------- +Name | file_fdw +Version | 1.0 +Schema | pg_catalog +Description | foreign-data wrapper for flat file access +-[ RECORD 3 ]------------------------------------------------- +``` + +## \\timing 语法 \\timing \[on|off\] 控制显示SQL的执行时间,默认为off, on 为显示SQL语句的执行时间 + +``` +openGauss=# select * from test; + id +---- + 1 +(1 row) + +openGauss=# \timing on +Timing is on. +openGauss=# select * from test; + id +---- + 1 +(1 row) + +Time: 0.352 ms +``` + +## \\h 用于获取SQL语句的帮助,例如 \\h merge + +``` +openGauss=# \h merge +Command: MERGE +Description: insert, update, or delete rows of a table based upon source data +Syntax: +MERGE [/*+ plan_hint */] INTO table_name [ [ AS ] alias ] +USING { { table_name | view_name } | subquery } [ [ AS ] alias ] +ON ( condition ) +[ + WHEN MATCHED THEN + UPDATE SET { column_name = { expression | DEFAULT } | + ( column_name [, ...] ) = ( { expression | DEFAULT } [, ...] ) } [, ...] + [ WHERE condition ] +] +[ + WHEN NOT MATCHED THEN + INSERT { DEFAULT VALUES | + [ ( column_name [, ...] ) ] VALUES ( { expression | DEFAULT } [, ...] ) [, ...] [ WHERE condition ] } +]; +``` + +## ? 获取gsql的元命令的帮助 + +``` +openGauss=# \? +General + \copyright show openGauss usage and distribution terms + \g [FILE] or ; execute query (and send results to file or |pipe) + \h(\help) [NAME] help on syntax of SQL commands, * for all commands + \parallel [on [num]|off] toggle status of execute (currently off) + \q quit gsql + +Query Buffer + \e [FILE] [LINE] edit the query buffer (or file) with external editor + \ef [FUNCNAME [LINE]] edit function definition with external editor + \p show the contents of the query buffer + \r reset (clear) the query buffer + \w FILE write query buffer to file + +Input/Output + \copy ... perform SQL COPY with data stream to the client host + \echo [STRING] write string to standard output + \i FILE execute commands from file + \i+ FILE KEY execute commands from encrypted file + \ir FILE as \i, but relative to location of current script + \ir+ FILE KEY as \i+, but relative to location of current script + \o [FILE] send all query results to file or |pipe + \qecho [STRING] write string to query output stream (see \o) +``` + +## ! os\_command 用于执行操作系统命令,同oracle的 !,mysql的 system + +``` +openGauss-# \! pwd +/home/omm +openGauss-# \! ls +1.sh create_db_tables.sql test.sql +``` + +## \\o filename 用于重定向输出到文件,注意这个不是简单的将屏幕的内容输出到文本,而是将SQL语句正确执行的结果输出到文本 + +``` +openGauss-# \o test.out +openGauss-# select * from test; +WARNING: Session unused timeout. +FATAL: terminating connection due to administrator command +could not send data to server: Broken pipe +The connection to the server was lost. Attempting reset: Succeeded. +openGauss=# select * from test; +openGauss=# \! cat test.out + id +---- + 1 +(1 row) + +openGauss=# select * from pg_tables; +openGauss=# \! cat test.out + id +---- + 1 +(1 row) + + schemaname | tablename | tableowner | tablespace | hasindexes | hasrules | hastriggers | tablecreator | created | last_ddl_time +--------------------+-------------------------------+------------+------------+------------+----------+-------------+--------------+-------------------------------+------------------------------- + pg_catalog | pg_statistic | omm | | t | f | f | | | + +\i file.sql +``` + +## \\conninfo 显示gsql中显示会话的连接信息 + +## \\c\[onnect\] \[DBNAME\] 切换数据库 + +``` +openGauss=# \conninfo +You are connected to database "postgres" as user "omm" via socket in "/opt/huawei/tmp" at port "15400". +openGauss=# \c mydb +Non-SSL connection (SSL connection is recommended when requiring high-security) +You are now connected to database "mydb" as user "omm". +mydb=# \conninfo +You are connected to database "mydb" as user "omm" via socket in "/opt/huawei/tmp" at port "15400". +``` + +## \\echo \[string\] 打印字符串 + +``` +mydb=# \echo Hello World! +Hello World! +``` + +## \\q 退出gsql + diff --git "a/content/zh/post/2022/openGauss\344\270\255\347\232\204SQL\345\274\225\346\223\216\344\273\213\347\273\215.md" "b/content/zh/post/2022/openGauss\344\270\255\347\232\204SQL\345\274\225\346\223\216\344\273\213\347\273\215.md" new file mode 100644 index 0000000000000000000000000000000000000000..ab04f5c4cd4eafbe886c8ef010a48e05917edfb1 --- /dev/null +++ "b/content/zh/post/2022/openGauss\344\270\255\347\232\204SQL\345\274\225\346\223\216\344\273\213\347\273\215.md" @@ -0,0 +1,46 @@ ++++ + +title = "openGauss中的SQL引擎介绍" + +date = "2021-12-23" + +tags = [ "openGauss中的SQL引擎介绍"] + +archives = "2021-12" + +author = "ccgo" + +summary = "openGauss中的SQL引擎介绍" + +img = "/zh/post/2022/title/img2.png" + +times = "12:30" + ++++ + +# openGauss中的SQL引擎介绍 + +![](figures/20211223-8c6710da-e8ba-4c22-a1dd-dc76ecaec07a.png) + +![](figures/20211223-01cf061e-a19f-4516-9ddf-d38eb5bbbc86.png) + +![](figures/20211223-92cb0889-6352-4ae6-a73f-1ec772e8a730.png) + +![](figures/20211223-453c2df5-151d-4333-a812-732e1a32313b.png) + +![](figures/20211223-60e81928-181c-4964-b0ec-abdd2acc7da7.png) + +![](figures/20211223-9c55c807-e30b-44a9-8810-4d2b70db10a9.png) + +![](figures/20211223-7afbf443-21c5-4855-8ed7-c264abaf9ff0.png) + +![](figures/20211223-ef70cfd4-da07-4c1d-aabe-cc867cedbc80.png) + +![](figures/20211223-ae44972c-4cc6-49b7-94c5-5b507039a686.png) + +![](figures/20211223-c49e9596-383a-41c4-8057-77cdfd9e8f5e.png) + +![](figures/20211223-83e9cf25-6bbc-4e0e-a24d-963d9050ae73.png) + +![](figures/20211223-8e28c064-237c-4c48-8d6d-7498b11f1c3b.png) + diff --git "a/content/zh/post/2022/openGauss\344\272\213\345\212\241\346\234\272\345\210\266\344\270\255MVCC\346\212\200\346\234\257\347\232\204\345\256\236\347\216\260\345\210\206\346\236\220.md" "b/content/zh/post/2022/openGauss\344\272\213\345\212\241\346\234\272\345\210\266\344\270\255MVCC\346\212\200\346\234\257\347\232\204\345\256\236\347\216\260\345\210\206\346\236\220.md" new file mode 100644 index 0000000000000000000000000000000000000000..94e23564b066e59db4f776c63ed9a673ffd5b6ba --- /dev/null +++ "b/content/zh/post/2022/openGauss\344\272\213\345\212\241\346\234\272\345\210\266\344\270\255MVCC\346\212\200\346\234\257\347\232\204\345\256\236\347\216\260\345\210\206\346\236\220.md" @@ -0,0 +1,477 @@ ++++ + +title = "openGauss事务机制中MVCC技术的实现分析" + +date = "2021-12-27" + +tags = [ "openGauss事务机制中MVCC技术的实现分析"] + +archives = "2021-12" + +author = "luooofan" + +summary = "openGauss事务机制中MVCC技术的实现分析" + +img = "/zh/post/2022/title/img16.png" + +times = "12:30" + ++++ + +# openGauss事务机制中MVCC技术的实现分析 + +## 概述 + +1. **事务** + + 事务是为用户提供的最核心、最具吸引力的数据库功能之一。简单地说,事务是用户定义的一系列数据库操作\(如查询、插入、修改或删除等\)的集合,从数据库内部保证了该操作集合作为一个整体的原子性\(Atomicity\)、一致性\(Consistency\)、隔离性\(Isolation\)和持久性\(Durability\),这些特性统称事务的ACID特性。 + +2. **DBMS中的并发控制** + + 并发控制旨在针对数据库中对事务并行的场景,保证 ACID 中的一致性(Consistency)与隔离性(Isolation)。数据库技术中主流的三种并发控制技术分别是: Multi-version Concurrency Control \(MVCC\), Strict Two-Phase Locking \(S2PL\), 以及 Optimistic Concurrency Control \(OCC\),每种技术也都有很多的变种。 + +3. **MVCC** + + MVCC的基本机制是:写事务不会原地修改元组内容,每次写操作都会在旧的版本之上创建新的版本,并且会保留旧的版本。当某个事务需要读取数据时,数据库系统会从所有的版本中选取出符合该事务隔离级别要求的版本。 + + MVCC 的主要优点是读数据的锁请求与写数据的锁请求不冲突,以此来实现读不阻塞写,写也不阻塞读。 + + +- openGauss事务整体架构 + + ![](figures/20211017-210839-v2-58a3a0df18e1a92b9cc209036fb149ab_b.jpg) + + 在openGauss中,事务的实现与存储引擎的实现有很强关联,代码主要集中在src/gausskernel/storage/access/transam及src/gausskernel/storage/lmgr下,关键文件如图所示。 + + - (1) 事务管理器:事务系统的中枢,它的实现是一个有限循环状态机,通过接受外部系统的命令并根据当前事务所处的状态决定事务的下一步执行过程。 + - (2) 日志管理器:用来记录事务执行的状态以及数据变化的过程,包括事务提交日志\(CLOG\)、事务提交序列日志(CSNLOG)以及事务日志(XLOG)。其中CLOG日志只用来记录事务执行的结果状态,CSNLOG记录日志提交的顺序,用于可见性判断;XLOG是数据的redo日志,用于恢复及持久化。 + - (3) 线程管理机制:通过一片内存区域记录所有线程的事务信息,任何一个线程可以通过访问该区域获取其他事务的状态信息。 + - (4) MVCC机制:openGauss系统中,事务执行读流程结合各事务提交的CSN序列号,采用了多版本并发控制机制,实现了元组的读和写互不阻塞。 + - (5) 锁管理器:实现系统的写并发控制,通过锁机制来保证事务写流程的隔离性。 + + +## MVCC的实现 + +- 我们需要关注: + - 元组版本号的实现 + - 快照的实现 + - 判断数据有效性、可见性、可更新性的算法的实现 + - 不同的隔离级别的实现 + +- 多版本元组存储结构 + - src/include/access/htup.h + + 为了定义MVCC 中不同版本的数据,Opengauss在每个元组的头部信息HeapTupleHeaderData中引入了一些字段如下: + + ``` + typedef struct HeapTupleHeaderData { + union { + HeapTupleFields t_heap; /* 存储该元组的一些描述信息 */ + DatumTupleFields t_datum; + } t_choice; + + ItemPointerData t_ctid; /* (块号,块内偏移) 存储用来记录当前元组或新元组的物理位置 */ + + /* Fields below here must match MinimalTupleData! */ + + uint16 t_infomask2; + + uint16 t_infomask; /* various flag bits, see below */ + + uint8 t_hoff; + + /* ^ - 23 bytes - ^ */ + + bits8 t_bits[FLEXIBLE_ARRAY_MEMBER]; + + /* MORE DATA FOLLOWS AT END OF STRUCT */ + } HeapTupleHeaderData; + typedef HeapTupleHeaderData* HeapTupleHeader + ``` + + - HeapTupleFields + + ``` + typedef struct HeapTupleFields { + ShortTransactionId t_xmin; /* 存放插入该 Tuple 时的 txid */ + ShortTransactionId t_xmax; /* 存放删除或者更新该 Tuple 时的 txid,如果还没更新或者删除,那么置 0,表示无效 */ + + union { + CommandId t_cid; /* 创建或更新/删除该 Tuple 的命令在该事务内执行的所有 SQL 命令中的编号 */ + ShortTransactionId t_xvac; /* old-style VACUUM FULL xact ID */ + } t_field3; + } HeapTupleFields; + ``` + + - t\_infomask + + ``` + #define HEAP_HASNULL 0x0001 /* has null attribute(s) */ + #define HEAP_HASVARWIDTH 0x0002 /* has variable-width attribute(s) */ + #define HEAP_HASEXTERNAL 0x0004 /* has external stored attribute(s) */ + #define HEAP_HASOID 0x0008 /* has an object-id field */ + #define HEAP_COMPRESSED 0x0010 /* has compressed data */ + #define HEAP_COMBOCID 0x0020 /* t_cid is a combo cid */ + #define HEAP_XMAX_EXCL_LOCK 0x0040 /* xmax is exclusive locker */ + #define HEAP_XMAX_SHARED_LOCK 0x0080 /* xmax is shared locker */ + /* if either LOCK bit is set, xmax hasn't deleted the tuple, only locked it */ + #define HEAP_IS_LOCKED (HEAP_XMAX_EXCL_LOCK | HEAP_XMAX_SHARED_LOCK) + #define HEAP_XMIN_COMMITTED 0x0100 /* t_xmin committed */ + #define HEAP_XMIN_INVALID 0x0200 /* t_xmin invalid/aborted */ + #define HEAP_XMIN_FROZEN (HEAP_XMIN_INVALID | HEAP_XMIN_COMMITTED) + #define HEAP_XMAX_COMMITTED 0x0400 /* t_xmax committed */ + #define HEAP_XMAX_INVALID 0x0800 /* t_xmax invalid/aborted */ + ... + ``` + + +- 插入、删除、更新元组 + - 元组在页中是如何存放的 + + ![](figures/20211015-225510-fig-5-03.png) + + ![](figures/20211015-225127-update.png) + + - 插入 + + 假设一个txid为99的事务插入一个元组 + + ![](figures/20211015-225511-fig-5-04.png) + + - 删除 + + 假设一个txid为111的事务删除一个元组 + + ![](figures/20211015-225511-fig-5-05.png) + + - 更新 + + 假设99号事务插入的元组被100号事务更新了两次 + + ![](figures/20211015-225511-fig-5-06.png) + + openGauss通过HeapTupleHeaderData 的几个特殊的字段,给元组设置了不同的版本号,元组的每次更新操作都会产生一条新版本的元组,版本之间从旧到新形成了一条版本链(旧的ctid指向新的元组)。 + + +- 事务快照的实现 + + 为了实现元组对事务的可见性判断,openGauss引入了事务快照SnapshotData + + 在openGauss中,有两种方式来实现快照。 + + - (1)活跃事务数组方法 + + 在数据库进程中,维护一个全局的数组,其中的成员为正在执行的事务信息,包括事务的事务号,该数组即活跃事务数组。 + + 在每个事务开始的时候,复制一份该数组内容。 + + 当事务执行过程中扫描到某个元组时,需要通过判断元组xmin和xmax这两个事务对于查询事务的可见性,来决定该元组是否对查询事务可见。 + + ![](figures/20211015-225512-d34f1a911a8804c0b1f8d791a65f175e.png) + + - (2)时间戳方法 + + ![](figures/20211015-225512-72285f7db5051f38a7940e7f235f49df.png) + + 在openGauss内部,使用一个全局自增的长整数作为逻辑的时间戳,模拟数据库内部的时序,该逻辑时间戳被称为提交顺序号(Commit Sequence Number,简称CSN)。 + + 每当一个事务提交的时候,在CSN日志中会记录该事务号 XID对应的逻辑时间戳 CSN 值。 + + ![](figures/20211015-225513-64eaedd1d1501b104652b104bd3152b2.png) + + ``` + #define COMMITSEQNO_INPROGRESS UINT64CONST(0x0) // 表示该事务还未提交或回滚 + #define COMMITSEQNO_ABORTED UINT64CONST(0x1) // 表示该事务已经回滚 + #define COMMITSEQNO_FROZEN UINT64CONST(0x2) // 表示该事务已提交,且对任何快照可见 + #define COMMITSEQNO_FIRST_NORMAL UINT64CONST(0x3) // 事务正常的CSN号起始值 + #define COMMITSEQNO_COMMIT_INPROGRESS (UINT64CONST(1) << 62) // 事务正在提交中 + ``` + + - 事务快照数据结构SnapshotData + + src/include/utils/snapshot.h + + 获取快照时会记录当前活跃的最小的xid,记为snapshot.xmin。当前最新提交的“事务id\(latestCompleteXid\) + 1”,记为snapshot.xmax。当前最新提交的“CSN号 + 1”\(NextCommitSeqNo\),记为snapshot.csn。 + + ``` + typedef struct SnapshotData { + SnapshotSatisfiesFunc satisfies; /* 判断可见性的函数;通常使用MVCC,即HeapTupleSatisfiesMVCC */ + TransactionId xmin; /*当前活跃事务最小值,小于该值的事务说明已结束 */ + TransactionId xmax; /*最新提交事务id(latestCompeleteXid)+1,大于等于改值说明事务还未开始,该事务id不可见 */ + TransactionId* xip; /*记录当前活跃事务链表,在CSN版本中该值无用 */ + TransactionId* subxip; /* 记录缓存子事务活跃链表,在CSN版本中该值无用 */ + uint32 xcnt; /* 记录活跃事务的个数(xip中元组数)在CSN版本中该值无用 */ + ... + + CommitSeqNo snapshotcsn; /* 快照的CSN号,一般为最新提交事务的CSN号+1(NextCommitSeqNo),CSN号严格小于该值的事务可见。 */ + ... + + CommandId curcid; /*事务块中的命令序列号,即同一事务中,前面插入的数据,后面可见。 */ + uint32 active_count; /* ActiveSnapshot stack的refcount */ + uint32 regd_count; /* RegisteredSnapshotList 的refcount*/ + void* user_data; /* 本地多版本快照使用,标记该快照还有线程使用,不能直接释放 */ + SnapshotType snapshot_type; /* openGauss单机无用 */ + } SnapshotData; + ``` + + - satisfies是openGauss提供的对于事务可见性判断的统一操作接口。 + + src/gausskernel/storage/access/heap/heapam\_visibility.c + + - HeapTupleSatisfiesMVCC:判断元组对某一快照版本是否有效 + - HeapTupleSatisfiesUpdate:判断元组是否可更新 + - HeapTupleSatisfiesDirty:判断当前元组是否已脏 + - HeapTupleSatisfiesSelf:判断tuple对自身信息是否有效 + - HeapTupleSatisfiesToast:用于TOAST表(参考文档)的判断 + - HeapTupleSatisfiesVacuum:用在VACUUM,判断某个元组是否对任何正在运行的事务可见,如果是,则该元组不能被VACUUM删除 + - HeapTupleSatisfiesAny:所有元组都可见 + - HeapTupleSatisfiesHistoricMVCC:用于CATALOG 表 + - …… + + - MVCC可见性判断机制 + + + + + + + + + + + + + + + + +

状态

+

xmax对于查询可见

+

xmax对于查询不可见

+

xmin对于查询可见

+

记录不可见(先插入,后删除)

+

记录可见(先插入,未删除)

+

xmin对于查询不可见

+

不可能发生

+

记录不可见(未插入,未删除)

+
+ + - XidVisibleInSnapshot + + src/gausskernel/storage/access/heap/heapam\_visibility.c + + ``` + bool XidVisibleInSnapshot(TransactionId xid, Snapshot snapshot, TransactionIdStatus* hintstatus, Buffer buffer, bool* sync) + { + bool looped = false; + *hintstatus = XID_INPROGRESS; + if (GTM_MODE && TransactionIdFollowsOrEquals(xid, snapshot->xmax)) { + return false; + } + loop: + csn = TransactionIdGetCommitSeqNo(xid, false, true, false); + if (COMMITSEQNO_IS_COMMITTED(csn)) { + *hintstatus = XID_COMMITTED; + if (csn < snapshot->snapshotcsn) + return true; + else + return false; + } else if (COMMITSEQNO_IS_COMMITTING(csn)) { + ... + } else { + if (csn == COMMITSEQNO_ABORTED) + *hintstatus = XID_ABORTED; + return false; + } + } + ``` + + 如果xid事务正在执行: + + ``` + if (looped) { + ereport(DEBUG1, (errmsg("transaction id %lu's csn %ld is changed to ABORT after lockwait.", xid, csn))); + RecheckXidFinish(xid, csn); + CSNLogSetCommitSeqNo(xid, 0, NULL, COMMITSEQNO_ABORTED); + SetLatestFetchState(xid, COMMITSEQNO_ABORTED); + *hintstatus = XID_ABORTED; + return false; + } else { + if (!COMMITSEQNO_IS_SUBTRANS(csn)) { + ... + CommitSeqNo latestCSN = GET_COMMITSEQNO(csn); + if (latestCSN >= snapshot->snapshotcsn) { + ... + return false; + } + } else { + parentXid = (TransactionId)GET_PARENTXID(csn); + } + ... + if (TransactionIdIsValid(parentXid)) + SyncWaitXidEnd(parentXid, buffer); + else + SyncWaitXidEnd(xid, buffer); + looped = true; + parentXid = InvalidTransactionId; + goto loop; + } + ``` + + HeapTupleSatisfiesMVCC + + ``` + static bool HeapTupleSatisfiesMVCC(HeapTuple htup, Snapshot snapshot, Buffer buffer) + { + // 取元组头 + HeapTupleHeader tuple = htup->t_data; + ... + // 根据hint bit,若xmin没有被标记为已提交:可能被标记为回滚,或者还未标记 + if (!HeapTupleHeaderXminCommitted(tuple)) { + // 如果xmin已经被标记为invalid,说明插入该元组的事务已经回滚,直接返回不可见 + if (HeapTupleHeaderXminInvalid(tuple)) + return false; + // xmin还未标记,并且xmin为当前事务,说明是在同一个事务内的插入命令和扫描命令,则需要去判断CID + // 同一个事务内,后面的查询可以查到当前事务之前命令插入的并且未删除的结果 + if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(page, tuple))) { + if ((tuple->t_infomask & HEAP_COMBOCID) && CheckStreamCombocid(tuple, snapshot->curcid, page)) + return true; /* delete after stream producer thread scan started */ + + // 当前扫描命令之后的某条命令才插入 + if (HeapTupleHeaderGetCmin(tuple, page) >= snapshot->curcid) + return false; /* inserted after scan started */ + // 到这里说明当前扫描命令之前已经插入 + // 根据hint bit,xmax被标记为invalid + if (tuple->t_infomask & HEAP_XMAX_INVALID) + return true; + + ... + + // 当前扫描命令之后的某条命令删除了该元组 + if (HeapTupleHeaderGetCmax(tuple, page) >= snapshot->curcid) + return true; /* deleted after scan started */ + else + return false; /* deleted before scan started */ + } + // xmin还没打标记,并且不是当前事务 + else { + // 通过csnlog判断事务是否可见,并且返回该事务的最终提交状态 + visible = XidVisibleInSnapshot(HeapTupleHeaderGetXmin(page, tuple), snapshot, &hintstatus, buffer, NULL); + // 如果该事务提交,则打上提交的hint bit用于加速判断 + if (hintstatus == XID_COMMITTED) + SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED, HeapTupleHeaderGetXmin(page, tuple)); + // 如果事务回滚,则打上回滚标记 + if (hintstatus == XID_ABORTED) { + ... + SetHintBits(tuple, buffer, HEAP_XMIN_INVALID, InvalidTransactionId); + } + // 如果xmin不可见,则该元组不可见 + if (!visible) { + ... + return false; + } + } + } + // 根据hint bit,若xmin已经被标记为已提交,则通过函数接口CommittedXidVisibleInSnapshot判断是否对本次快照可见 + else { + /* xmin is committed, but maybe not according to our snapshot */ + if (!HeapTupleHeaderXminFrozen(tuple) && + !CommittedXidVisibleInSnapshot(HeapTupleHeaderGetXmin(page, tuple), snapshot, buffer)) { + if (...) { + return false; /* treat as still in progress */ + } + } + } + // 到此为止认为xmin visible,继续判断xmax的可见性 + + recheck_xmax: + // 根据hint bit,xmax已经被标记为invalid,即已经回滚 + if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid or aborted */ + return true; + + ... // 还有一些其他状态判断 + + // 根据hint bit,xmax没有被标记为commited + if (!(tuple->t_infomask & HEAP_XMAX_COMMITTED)) { + bool sync = false; + TransactionId xmax = HeapTupleHeaderGetXmax(page, tuple); + + // 如果xmax为当前事务 + if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmax(page, tuple))) { + // 如果删除该元组的命令后发生于快照扫描时刻 + if (HeapTupleHeaderGetCmax(tuple, page) >= snapshot->curcid) + return true; /* deleted after scan started */ + else + return false; /* deleted before scan started */ + } + + visible = XidVisibleInSnapshot(HeapTupleHeaderGetXmax(page, tuple), snapshot, &hintstatus, buffer, &sync); + /* + * If sync wait, xmax may be modified by others. So we need to check xmax again after acquiring the page lock. + */ + if (sync && (xmax != HeapTupleHeaderGetXmax(page, tuple))) { + goto recheck_xmax; + } + // 根据hintstatus在元组头部打标记 hint bit + if (hintstatus == XID_COMMITTED) { + SetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED, HeapTupleHeaderGetXmax(page, tuple)); + } + if (hintstatus == XID_ABORTED) { + ... + SetHintBits(tuple, buffer, HEAP_XMAX_INVALID, InvalidTransactionId); + } + if (!visible) { + if (...) { + if (sync && (xmax != HeapTupleHeaderGetXmax(page, tuple))) { + goto recheck_xmax; + } + return true; /* treat as still in progress */ + } + } + } + // 根据hint bit,xmax被标记为commited + else { + /* xmax is committed, but maybe not according to our snapshot */ + if (!CommittedXidVisibleInSnapshot(HeapTupleHeaderGetXmax(page, tuple), snapshot, buffer)) { + if (...) { + return true; /* treat as still in progress */ + } + } + } + return false; + } + ``` + + + +- 隔离级别的实现 + + ![](figures/zh-cn_image_0000001197508006.png) + + - (1)脏写(dirty write):两个事务分别写入,两个事务分别提交或回滚,则事务的结果无法确定,即一个事务可以回滚另一个事务的提交。 + - (2)脏读(dirty read):一个事务可以读取另一个事务未提交的修改数据。 + - (3)不可重复读(fuzzy read):一个事务重复读取前面读取过的数据,数据的结果被另外的事务修改。 + - (4)幻读(phantom):一个事务重复执行范围查询,返回一组符合条件的数据,每次查询的结果集因为其他事务的修改发生改变\(条数\)。 + - (5)更新丢失\(lost update\):一个事务在读取元组并更新该元组的过程中,有另一个事务修改了该元组的值,导致最终这次修改丢失。 + - (6)读偏斜\(read skew\):假设数据x,y有隐式的约束x+y=100;事务一读取x=50;事务二写x=25并更新y=75保证约束成立,事务二提交,事务一再读取y=75,导致事务一中读取x+y=125,不满足约束。 + - (7)写偏斜\(write skew\):假设数据x,y有隐式的约束x+y<=100;事务一读取x=50,并写入y=50;事务二读取y=30并写入x=70,并提交;事务一再提交;最终导致x=70,y=50不满足x+y<=100的约束。 + + 隔离级别越高,在一个事务执行过程中,它能“感知”到的并发事务的影响越小。在最高的可串行化隔离级别下,任意一个事务的执行,均“感知”不到有任何其他并发事务执行的影响,并且所有事务执行的效果就和一个个顺序执行的效果完全相同。 + + 在openGauss中,隔离级别的实现基于MVCC和快照机制,因此这种隔离方式被称为快照隔离\(Snapshot Isolation,SI\)。目前,openGauss支持读已提交和可重复读这两种隔离级别。两者实现上的差别在于在一个事务中获取快照的次数。\(在实现上可重复读隔离级别无幻读问题,有A5B写偏斜问题\) + + 如果采用读已提交的隔离级别,那么在一个事务块中每条语句的执行开始阶段,都会去获取一次最新的快照,从而可以看到那些在本事务块开始以后、在前面语句执行过程中提交的并发事务的效果。 + + 如果采用可重复读的隔离级别,那么在一个事务块中,只会在第一条语句的执行开始阶段,获取一次快照,后面执行的所有语句都会采用这个快照,整个事务块中的所有语句均不会看到该快照之后提交的并发事务的效果。 + + ![](figures/20211017-204222-dc83a9cc72803e849caa49dae027369f.png) + + + +## 总结 + +- 元组版本号的实现:使用元组头部信息的字段来标示元组的版本号 +- 快照的实现:活跃事务数组方法和时间戳方法 +- 判断数据有效性、可见性、可更新性的算法的实现: XidVisibleInSnapshot和HeapTupleSatisfiesMVCC +- 不同隔离级别的实现:在一个事务中获取快照的次数 + diff --git "a/content/zh/post/2022/openGauss\345\215\225\346\234\272\351\203\250\347\275\262.md" "b/content/zh/post/2022/openGauss\345\215\225\346\234\272\351\203\250\347\275\262.md" new file mode 100644 index 0000000000000000000000000000000000000000..9c7a6290729a2791f56c8cce53f81468fde8633e --- /dev/null +++ "b/content/zh/post/2022/openGauss\345\215\225\346\234\272\351\203\250\347\275\262.md" @@ -0,0 +1,404 @@ ++++ + +title = "openGauss单机部署" + +date = "2021-12-11" + +tags = [ "openGauss单机部署"] + +archives = "2021-11" + +author = "可达" + +summary = "openGauss单机部署" + +img = "/zh/post/2022/title/img1.png" + +times = "12:30" + ++++ + + + +# openGauss单机部署 + +## 一、安装环境 + +1. 操作系统:虚拟机VMware、CentOS7.9 +2. 环境设置: + - (1)虚拟机内存3G、磁盘100G + - (2)系统版本修改 + + 一开始使用了centos8,无法安装,因此降低版本,选用7.9后依然存在一些问题,因此修改/etc/redhat-release文件中系统版本为CentOS Linux release 7.6\(Core\) + + - (3)配置YUM源 + + ①删除系统自带yum源 + + ``` + rm -rf /etc/yum.repos.d/* + ``` + + ②下载阿里云yum源 + + ``` + wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo + ``` + + ③生成仓库缓存 + + ``` + yum makecache + ``` + + - (4)安装依赖包 + + ``` + 1 yum install ‐y libaio‐devel flex bison ncurses‐devel glibc.devel patch lsb_release + 2 yum install ‐y openssl* python3 + ``` + + - (5)关闭SELINUX和Firewall + + ``` + 1 setenforce 0 + 2 systemctl disable firewalld.service + 3 systemctl stop firewalld.service + ``` + + - (6)关闭交换内存 + + ``` + swapoff -a + ``` + + - (7)关闭透明大页 + + ``` + 1 vim /etc/rc.d/rc.local + 2 if test ‐f /sys/kernel/mm/transparent_hugepage/enabled; + 3 then + 4 echo never > /sys/kernel/mm/transparent_hugepage/enabled + 5 fi + 6 if test ‐f /sys/kernel/mm/transparent_hugepage/defrag; + 7 then + 8 echo never > /sys/kernel/mm/transparent_hugepage/defrag + 9 fi + ``` + + - (8)修改主机名 + + ``` + 1 echo "node1" > /etc/hostname + 2 echo “ 192.168.17.129 node1” >>/etc/hosts + ``` + + + +## 二、安装详细步骤 + +1. Opengauss安装 + - (1)下载opengauss安装包及创建用户组和目录 + + ``` + 1 groupadd dbgrp + 2 useradd -g dbgrp -d /home/omm -m -s /bin/bash omm + 3 echo "omm" | passwd -‐stdin omm + 4 mkdir -p /opt/software/openGauss + 5 chmod 755 -R /opt/software + 6 chown -R omm:dbgrp /opt/software/openGauss + cd /opt/software/openGauss/ + 7 wget https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.0/x86/openGauss-2.0.0-CentOS-64bit-all.tar.gz + 8 tar -zxvf openGauss-2.0.0-CentOS-64bit-all.tar.gz + 9 tar -zxvf openGauss-2.0.0-CentOS-64bit-om.tar.gz + ``` + + - (2)单机xml配置文件 + + 首先从如下地址复制文件至当前位置 + + ``` + cp script/gspylib/etc/conf/cluster_config_template.xml . + ``` + + 修改配置文件具体如下,配置文件中要注意配置一下几个参数:nodeNAMES、backips + + ``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + ``` + + - (3)设置lib库 + + +``` +vim .bashrc +添加 +export GPHOME=/opt/huawei/install/om +export PATH=$GPHOME/script/gspylib/pssh/bin:$GPHOME/script:$PATH +export LD_LIBRARY_PATH=$GPHOME/lib:$LD_LIBRARY_PATH +export PYTHONPATH=$GPHOME/lib +export GAUSSHOME=/opt/huawei/install/app +export PATH=$GAUSSHOME/bin:$PATH +export LD_LIBRARY_PATH=$GAUSSHOME/lib:$LD_LIBRARY_PATH +export S3_CLIENT_CRT_FILE=$GAUSSHOME/lib/client.crt +export GAUSS_VERSION=2.0.0 +export PGHOST=/opt/huawei/tmp +export GAUSSLOG=/opt/huawei/log/omm +umask 077 +export GAUSS_ENV=2 +export GS_CLUSTER_NAME=singlenode +``` + +1. (4)执行交互式初始化 + - ①预安装,操作如下: + + ``` + 1 cd /opt/software/openGauss/script + 2 root@node1 script]#python3 gs_preinstall -U omm -G dbgrp -X /opt/software/openGauss/cluster_config_template.xml + Parsing the configuration file. + Successfully parsed the configuration file. + Installing the tools on the local node. + Successfully installed the tools on the local node. + Setting pssh path + Successfully set core path. + Are you sure you want to create the user[omm] and create trust for it (yes)? yes + Preparing SSH service. + Successfully prepared SSH service. + Checking OS software. + Successfully check os software. + Checking OS version. + Successfully checked OS version. + Creating cluster's path. + Successfully created cluster's path. + Setting SCTP service. + Successfully set SCTP service. + Set and check OS parameter. + Setting OS parameters. + Successfully set OS parameters. + Warning: Installation environment contains some warning messages. + Please get more details by "/opt/software/openGauss/script/gs_checkos -i A -h node1 --detail". + Set and check OS parameter completed. + Preparing CRON service. + Successfully prepared CRON service. + Setting user environmental variables. + Successfully set user environmental variables. + Setting the dynamic link library. + Successfully set the dynamic link library. + Setting Core file + Successfully set core path. + Setting pssh path + Successfully set pssh path. + Set ARM Optimization. + No need to set ARM Optimization. + Fixing server package owner. + Setting finish flag. + Successfully set finish flag. + Preinstallation succeeded. + ``` + + 当出现“Preinstallation succeeded.”时,预安装成功。 + + ②安装 + + 进入script目录后进行正式安装,命令如下,其中“/opt/software/openGauss/cluster\_config\_template.xml”为前几步中编辑的配置文件。 + + 此过程需要输入密码,且设置的密码要符合复杂度要求如下: + + 最少包含8个字符; + + 不能和用户名和当前密码(ALTER)相同,或和当前密码反序; + + 至少包含大写字母(A-Z),小写字母(a-z),数字,非字母数字字符(限定为\~!@\#$%^&\*\(\)-\_=+|\[\{\}\];:,<.\>/?)四类字符中的三类字符: + + ``` + [omm@node1 openGauss]$ cd script/ + [omm@node1 script]$ gs_install -X /opt/software/openGauss/cluster_config_template.xml + Parsing the configuration file. + Check preinstall on every node. + Successfully checked preinstall on every node. + Creating the backup directory. + Successfully created the backup directory. + begin deploy.. + Installing the cluster. + begin prepare Install Cluster.. + Checking the installation environment on all nodes. + begin install Cluster.. + Installing applications on all nodes. + Successfully installed APP. + begin init Instance.. + encrypt cipher and rand files for database. + Please enter password for database: + Please repeat for database: + begin to create CA cert files + The sslcert will be generated in /opt/huawei/install/app/sslcert/om + Cluster installation is completed. + Configuring. + Deleting instances from all nodes. + Successfully deleted instances from all nodes. + Checking node configuration on all nodes. + Initializing instances on all nodes. + Updating instance configuration on all nodes. + Check consistence of memCheck and coresCheck on database nodes. + Configuring pg_hba on all nodes. + Configuration is completed. + Successfully started cluster. + Successfully installed application. + end deploy.. + ``` + + 测试安装是否成功,首先需要使数据库处于开启状态,然后输入”gsql -d postgres -p 26000”命令使数据库在本地运行,其中-p 为数据库端口dataPortBase,具体数值在前述过程中xml配置文件中确定,这里为26000。 + + ![](figures/这里为26000.png) + + ![](figures/这里为260001.png) + +2. Opengauss连接设置 + - (1)安装java,确认jdk版本为1.8 + - (2)从官网下载jdbc压缩包后,将其解压至路径/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7\_9.x86\_64/jre/lib/ext下 + - (3)配置数据库服务器中的白名单与监听名单 + + - ①以操作系统用户omm登录数据库主节点 + - ②执行如下命令增加对外提供服务的网卡IP或者主机名(英文逗号分隔),其中NodeName为当前节点名称,如: + + ``` + gs_guc reload -N NodeName -I all -c "listen_addresses='localhost,192.168.17.129'" + ``` + + - ③执行如下命令在数据库主节点配置文件中增加一条认证规则。(这里假设客户端IP地址为192.168.17.129,即远程连接的机器的IP地址) + + ``` + gs_guc reload -N all -I all -h "host all yushan 192.168.17.129/32 sha256" + - -N all表示openGauss中的所有主机。 + - -I all表示主机中的所有实例。 + - -h表示指定需要在“pg_hba.conf”增加的语句。 + - all表示允许客户端连接到任意的数据库。 + - yushan表示连接数据库的用户。 + - 192.168.17.129/32表示只允许IP地址为192.168.17.129的主机连接。在使用过程中,请根据用户的网络进行配置修改。32表示子网掩码为1的位数,即255.255.255.255 + - sha256表示连接时jack用户的密码使用sha256算法加密。 + ``` + + 与之效果相同的代替操作: + + 在/opt/huawei/install/data/db1路径(创建的节点名叫db1)下编辑pg\_hba.conf文件 + + ![](figures/下编辑pg_hba-conf文件.png) + + (4)通过编写java程序即可连接,example如下 + + ``` + import java.sql.Connection; + import java.sql.DriverManager; + import java.sql.PreparedStatement; + import java.sql.SQLException; + import java.sql.Statement; + import java.sql.CallableStatement; + public class test{ + public static Connection getConnect(String username, String passwd) + { + //驱动类。 + String driver = "org.postgresql.Driver"; + //数据库连接描述符。 + String sourceURL = "jdbc:postgresql://127.0.0.1:26000/postgres"; + Connection conn = null; + + try + { + //加载驱动。 + Class.forName(driver); + } + catch( Exception e ) + { + e.printStackTrace(); + return null; + } + + try + { + //创建连接。 + conn = DriverManager.getConnection(sourceURL, username, passwd); + System.out.println("Connection succeed!"); + } + catch(Exception e) + { + e.printStackTrace(); + return null; + } + + return conn; + }; + public static void main(String[] args) { + // TODO Auto-generated method stub + Connection conn = getConnect("yushan", "1qaz@wsx"); + //BatchInsertData(conn); + try { + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + } + ``` + + 编译执行程序后,如图,连接成功。 + + ![](figures/编译执行程序后.png) + + + +## 三、安装过程中碰到的问题与解决办法 + +1. 问题1 + + 在安装结束后,准备运行后,发现gsom 无法启动。 + + **解决过程:** + + 猜测可能是内存不足,虚拟机一开始设置的内存为1G,查阅相关博客发现,1G内存对于企业版不足,后将虚拟机内存设置为3G。 + +2. 问题2 + + 运行gsom后报错如下 + + ![](figures/运行gsom后报错如下.png) + + **解决过程:** + + 检查发现pg\_hba.conf文件配置出错,修改如下 + + ![](figures/修改如下.png) + +3. 问题3 + + 一开始安装的虚拟机为centos8,进行预安装后发现不支持该版本操作系统。 + + **解决过程:** + + 切换为centos7.9,但依然报错,因此修改/etc/redhat-release文件中系统版本为CentOS Linux release 7.6\(Core\) + + diff --git "a/content/zh/post/2022/openGauss\345\220\221\351\207\217\345\214\226\345\274\225\346\223\216--hash-join.md" "b/content/zh/post/2022/openGauss\345\220\221\351\207\217\345\214\226\345\274\225\346\223\216--hash-join.md" new file mode 100644 index 0000000000000000000000000000000000000000..59879cda175bba51bc992ceef315819c22396d40 --- /dev/null +++ "b/content/zh/post/2022/openGauss\345\220\221\351\207\217\345\214\226\345\274\225\346\223\216--hash-join.md" @@ -0,0 +1,181 @@ ++++ + +title = "openGauss向量化引擎--hash join" + +date = "2022-01-07" + +tags = [ "openGauss向量化引擎--hash join"] + +archives = "2022-01" + +author = "yanzongshuaiDBA" + +summary = "openGauss向量化引擎--hash join" + +img = "/zh/post/2022/title/img1.png" + +times = "12:30" + ++++ + +# openGauss向量化引擎--hash join + +传统的行执行器采用一次一个元组的执行模式,执行过程中CPU大部分时间没有用了处理数据,都用在了遍历执行树等操作,导致CPU的有效利用率较低。面向OLAP场景大量函数调用次数,需要巨大开销,为解决次问题,openGauss中开发了向量化引擎。采用一次一批元组的执行模式,可大幅减少遍历执行节点及调用函数的开销。 + +本文主要介绍hash join如何进行向量化的。 + +## 算子之间数据传递结构 + +算子之间数据组织及传递结构是VectorBatch: + +``` +class VectorBatch : public BaseObject { + +public: + // number of rows in the batch. + int m_rows; + // number of columns in the batch. + int m_cols; + // Shall we check the selection vector. + bool m_checkSel; + // Selection vector; + bool* m_sel; + // ScalarVector + ScalarVector* m_arr; + // SysColumns + SysColContainer* m_sysColumns; + // Compress buffer + StringInfo m_pCompressBuf; +... +} +``` + +![](figures/主要由3个数组在表示.png) + +主要由3个数组在表示:m\_vals为列值数组,m\_flag为对应列的行值是否为NULL,m\_sel为该行是否满足过滤条件。 + +## VecHashJoin + +向量化hash join的算子是VecHashJoin。其执行函数是ExecVecHashJoin,分为2个阶段:HASH\_BUILD和HASH\_PROBE。 + +``` +VectorBatch* ExecVecHashJoin(VecHashJoinState* node) +{ + int64 rows = 0; + for (;;) { + switch (node->joinState) { + case HASH_BUILD: { + if (node->hashTbl == NULL) + node->hashTbl = New(CurrentMemoryContext) HashJoinTbl(node); + ((HashJoinTbl*)(node->hashTbl))->Build();//构建hash表 + rows = ((HashJoinTbl*)(node->hashTbl))->getRows(); + } break; + case HASH_PROBE: { + result = ((HashJoinTbl*)(node->hashTbl))->Probe();//进行hash探测并构建join结果 + return result; + } + default: + break; + } + } +} +``` + +## HASH\_BUILD + +其中build的阶段又分为2个小阶段:1)获取内表的batch,然后通过m\_funBuild:申请hashCell \*cell\_arr连续内存,每个节点是一个hashCell,大小是m\_cellSize,共有batch中记录的行数个。然后将其接入m\_cache链表。然后将batch中列值依次存入cell\_arr中。2)通过PrepareProbe函数构建Hash表,并将cell\_arr中值放到hash表中。 + +``` +void HashJoinTbl::Build() +{ + for (;;) { + batch = VectorEngine(inner_node);//获取内表batch + if (unlikely(BatchIsNull(batch))) + break; + RuntimeBinding(m_funBuild, m_strategy)(batch); + } + PushDownFilterIfNeed(); + PrepareProbe(); + ... +} +``` + +第1阶段: + +![](figures/第1阶段.png) + +第2阶段: + +![](figures/第2阶段.png) + +第2阶段,通过m\_keyIdx数组得到哪一列是join key,将cell\_arr中该列值拿出来通过m\_innerHashFuncs函数计算hash值,将其保存到m\_cacheLoc\[\]数组中,作为m\_data数组下标,通过这种方式将内表列值放到hash表中。 + +## HASH\_PROBE + +通过probeHashTable进行探测,并join。也分为2个小阶段:1)外表hash阶段:首先获取外表的batch,通过m\_outerHashFuncs hash函数将外表的join key列hash出的值放到m\_cacheLoc数组中,作为hash表数组的下标:m\_hashTbl-\>m\_data\[m\_cacheLoc\[i\]\]。2)join阶段:定位到的m\_hashTbl-\>m\_data\[m\_cacheLoc\[i\]\]中列值和外表中列值是否相同,若相等则通过m\_keyMatch\[\]数组标记。最后将m\_keyMatch\[\]数组标记为1的列值构建成向量batch,并返回。 + +``` +VectorBatch* HashJoinTbl::probeHashTable(hashSource* probSource) +{ + VectorBatch* res_batch = NULL; + while (true) { + switch (m_probeStatus) { + case PROBE_FETCH: + //获取外表batch + m_outRawBatch = probSource->getBatch(); + if (BatchIsNull(m_outRawBatch)) { + } else { + int row = m_outRawBatch->m_rows; + int mask = m_hashTbl->m_size - 1; +hashBatch(m_outRawBatch, m_outKeyIdx, m_cacheLoc, m_outerHashFuncs); + for (int i = 0; i < row; i++) { + m_cacheLoc[i] = m_outRawBatch->m_arr[icol].m_vals[i] & mask; + m_cellCache[i] = m_hashTbl->m_data[m_cacheLoc[i]]; + m_match[i] = false; /* flag all the row no match */ + m_keyMatch[i] = true; + } + ... + } + break; + case PROBE_DATA: + res_batch = (this->*m_joinFun)(m_outRawBatch); + if (!BatchIsNull(res_batch)) + return res_batch; + break; + case PROBE_FINAL: + return endJoin(); + default: + break; + } + } +} +// +VectorBatch* HashJoinTbl::innerJoinT(VectorBatch* batch)//外部batch +{ + while (m_doProbeData) { + last_build_idx = 0; + RuntimeBinding(m_matchKeyFunction, i)(&batch->m_arr[m_outKeyIdx[i]], row, m_keyIdx[i], i); + for (row_idx = last_build_idx; row_idx < row; row_idx++) { + if (m_keyMatch[row_idx]) { + val = m_cellCache[row_idx]->m_val; + for (i = 0; i < m_innerBatch->m_cols; i++) { + p_vector = &m_innerBatch->m_arr[i]; + + p_vector->m_vals[result_row] = val[i].val; + p_vector->m_flag[result_row] = val[i].flag; + } + for (i = 0; i < m_outerBatch->m_cols; i++) { + p_vector = &m_outerBatch->m_arr[i]; + p_vector->m_vals[result_row] = batch->m_arr[i].m_vals[row_idx]; + p_vector->m_flag[result_row] = batch->m_arr[i].m_flag[row_idx]; + } + result_row++; + } + } + } + return buildResult(m_innerBatch, m_outerBatch, true); +} +``` + +![](figures/outerBatch1.png) + diff --git "a/content/zh/post/2022/opengauss\346\225\260\346\215\256\345\272\223-PITR\346\201\242\345\244\215.md" "b/content/zh/post/2022/opengauss\346\225\260\346\215\256\345\272\223-PITR\346\201\242\345\244\215.md" new file mode 100644 index 0000000000000000000000000000000000000000..c1ba9f3b49cfadcd82f4e330ab978138dffd842f --- /dev/null +++ "b/content/zh/post/2022/opengauss\346\225\260\346\215\256\345\272\223-PITR\346\201\242\345\244\215.md" @@ -0,0 +1,116 @@ ++++ + +title = "opengauss数据库-PITR恢复" + +date = "2022-01-05" + +tags = [ "opengauss数据库-PITR恢复"] + +archives = "2022-01" + +author = "周琦放" + +summary = "opengauss数据库-PITR恢复" + +img = "/zh/post/2022/title/img17.png" + +times = "12:30" + ++++ + +# opengauss数据库-PITR恢复 + +当数据库崩溃或希望回退到数据库之前的某一状态时,openGauss的即时恢复功能(Point-In-Time Recovery,简称PITR)可以支持恢复到备份归档数据之后的任意时间点 + +## 前提条件 + +全库备份文件:base.tar.gz; 归档的wal日志文件,归档备份目录/ogarchive + +## 备份数据文件 + +根据环境变量找到当前的数据文件目录,并重命名数据文件目录 + +请注意案例中的 /opt/huawei/install/data/ 为本示例中的数据文件目录,请根据实际情况修改此输入值 + +``` +[omm@ogsta ~]$ echo $DATADIR +/opt/huawei/install/data/dn +[omm@ogsta ~]$ cd /opt/huawei/install/data/ +[omm@ogsta data]$ mv dn/ dn_bak +[omm@ogsta data]$ ll +total 4 +drwx------ 23 omm dbgrp 4096 Jan 4 13:10 dn_bak +[omm@ogsta data]$ mkdir dn +``` + +- 全量备份解压 + + base.tar.gz压缩文件是通过gs\_basebackup 压缩,因此需要采用两次解压,gunzip和gs\_tar + + ``` + [omm@ogsta ogarchive]$ gunzip base.tar.gz + [omm@ogsta ogarchive]$ gs_tar -D /opt/huawei/install/data/dn -F base.tar + [omm@ogsta ogarchive]$ cd /opt/huawei/install/data/dn + [omm@ogsta dn]$ ls + asp_data cacert.pem mot.conf pg_hba.conf pg_multixact pg_snapshots pg_xlog server.crt term_file + backup_label full_backup_label pg_clog pg_hba.conf.bak pg_notify pg_stat_tmp postgresql.conf server.key undo + backup_label.old global pg_csnlog pg_hba.conf.lock pg_perf pg_tblspc postgresql.conf.bak server.key.cipher + base gs_profile pg_ctl.lock pg_ident.conf pg_replslot pg_twophase postgresql.conf.lock server.key.rand + build_completed.done gswlm_userinfo.cfg pg_errorinfo pg_llog pg_serial PG_VERSION rewind_lable sql_monitor + [omm@ogsta dn]$ + ``` + + +## 清空pg\_xlog + +该目录在数据文件目录中 + +``` +[omm@ogsta pg_xlog]$ pwd +/opt/huawei/install/data/dn/pg_xlog +[omm@ogsta pg_xlog]$ ll +total 32768 +-rw------- 1 omm dbgrp 16777216 Jan 4 13:38 000000010000000000000013 +-rw------- 1 omm dbgrp 16777216 Jan 4 13:38 000000010000000000000014 +drwx------ 2 omm dbgrp 80 Jan 4 13:38 archive_status +[omm@ogsta pg_xlog]$ rm -rf * +[omm@ogsta pg_xlog]$ ll +total 0 +``` + +## 配置recovery.conf文件 + +该配置文件请在数据文件目录中修改 + +``` +[omm@ogsta dn]$ pwd +/opt/huawei/install/data/dn +[omm@ogsta dn]$ cat recovery.conf +restore_command = 'cp /ogarchive/%f %p' +``` + +## 数据库启动 + +``` +gs_ctl -D /opt/huawei/install/data/dn start +``` + +这时可能遇到failed to translate name to xlog in GetOldestXLOGSegNo报错,需要如下的解决方法 + +![](figures/这时可能遇到failed-to.png) + +## 复制归档日志文件 + +以上的报错,是由于日志文件问题,根据recovery.conf文件内容,只是把归档目录中的文件自动复制到pg\_xlog目录中,不如直接手动把归档日志文件复制到pg\_xlog目录中 + +``` +cd /ogarchive/ +cp * /opt/huawei/install/data/dn/pg_xlog/ +``` + +## 重启 + +``` +gs_ctl -D /opt/huawei/install/data/dn start +``` + diff --git "a/content/zh/post/2022/opengauss\346\225\260\346\215\256\345\272\223-\344\270\273\344\273\216\346\220\255\345\273\272.md" "b/content/zh/post/2022/opengauss\346\225\260\346\215\256\345\272\223-\344\270\273\344\273\216\346\220\255\345\273\272.md" new file mode 100644 index 0000000000000000000000000000000000000000..487ebe8a49dedcb03292584b60381c9c4e005d87 --- /dev/null +++ "b/content/zh/post/2022/opengauss\346\225\260\346\215\256\345\272\223-\344\270\273\344\273\216\346\220\255\345\273\272.md" @@ -0,0 +1,421 @@ ++++ + +title = "opengauss数据库-主从搭建" + +date = "2021-12-31" + +tags = [ "opengauss数据库-主从搭建"] + +archives = "2021-12" + +author = "周琦放" + +summary = "opengauss数据库-主从搭建" + +img = "/zh/post/2022/title/img22.png" + +times = "12:30" + ++++ + +# opengauss数据库-主从搭建 + +## 环境说明 + +![](figures/zh-cn_image_0000001197720014.png) + +## 软件下载 + +opengauss 下载地址:https://opengauss.org/zh/download.html + +## 环境准备 + +- 关闭 SELINUX + +修改 /etc/selinux/config文件中的“SELINUX”值为“disabled + +``` +[root@ogpri openGauss]# more /etc/selinux/config +# This file controls the state of SELinux on the system. +# SELINUX= can take one of these three values: +# enforcing - SELinux security policy is enforced. +# permissive - SELinux prints warnings instead of enforcing. +# disabled - No SELinux policy is loaded. +SELINUX=disabled +# SELINUXTYPE= can take one of three values: +# targeted - Targeted processes are protected, +# minimum - Modification of targeted policy. Only selected processes are protected. +# mls - Multi Level Security protection. +SELINUXTYPE=targeted +``` + +- 关闭防火墙并禁止开机重启 + + ``` + systemctl disable firewalld.service + systemctl stop firewalld.service + ``` + +- 设置root用户远程登陆 + + 将PermitRootLogin改为yes。 + + ``` + vim /etc/ssh/sshd_config + PermitRootLogin yes + ``` + + 修改后生效 + + ``` + systemctl restart sshd.service + ``` + +- 关闭透明大页 + + ``` + echo never > /sys/kernel/mm/transparent_hugepage/enabled + ``` + + +## 数据库安装包依赖 + +root 用户下执行,所有的节点都要安装,建议配置本地yum源 + +``` +yum -y install libaio-devel flex bison ncurses-devel glibc-devel patch redhat-lsb-core readline-devel +``` + +- Python3依赖包 + + ``` + yum install bzip2-devel db4-devel gdbm-devel libpcap-devel openssl openssl-devel sqlite-devel tk-devel xz-devel zlib* gcc* + ``` + +- 安装python3 + + ``` + tar -xvf Python-3.6.8.tar.xz + cd Python-3.6.8/ + ./configure --prefix=/usr/local/python3 --enable-shared CFLAGS=-fPIC + make && make install + ``` + +- 设置python3 软连接 + + ``` + ln -s /usr/local/Python3/bin/python3 /usr/bin/python3 + ln -s /usr/local/python3/bin/pip3 /usr/bin/pip3 + ``` + +- 设置python3环境变量 + + ``` + vi /etc/profile + ``` + +- 安装python3 + + ``` + export PYTHON_HOME=/usr/local/python3.6.8 + export PATH=$PATH:$PYTHON_HOME/bin + :wq + Source /etc/profile + ``` + +- 验证安装结果 + + ``` + [root@ogpri ~]# python3 + Python 3.6.8 (default, Dec 27 2021, 21:52:53) + [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] on linux + Type "help", "copyright", "credits" or "license" for more information. + >>> + ``` + + +## 安装opengaussdb 数据库软件 + +数据库软件安装,如没有特殊说明,均在主节点执行,执行用户为root + +- 创建数据库软件目录 + + ``` + mkdir -p /opt/software/openGauss + chmod 755 -R /opt/software + ``` + + +- 创建XML配置文件 + + 如果是验证安装测试使用,建议修改其中的ip地址和hostname, + + ``` + vi /opt/software/openGauss/cluster_config.xml + ``` + + ``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ``` + +- 预安装 + + 在安装过程中请输入root,omm两个用户的密码 + + ``` + /opt/software/openGauss/script/gs_preinstall -U omm -G dbgrp -X /opt/software/openGauss/cluster_config.xml + ``` + + 安装日志如下: + + ``` + [root@ogpri script]# /opt/software/openGauss/script/gs_preinstall -U omm -G dbgrp -X /opt/software/openGauss/cluster_config.xml + Parsing the configuration file. + Successfully parsed the configuration file. + Installing the tools on the local node. + Successfully installed the tools on the local node. + Are you sure you want to create trust for root (yes/no)? yes + Please enter password for root. + Password: + + Creating SSH trust for the root permission user. + Checking network information. + All nodes in the network are Normal. + Successfully checked network information. + Creating SSH trust. + Creating the local key file. + Successfully created the local key files. + Appending local ID to authorized_keys. + Successfully appended local ID to authorized_keys. + Updating the known_hosts file. + Successfully updated the known_hosts file. + Appending authorized_key on the remote node. + Successfully appended authorized_key on all remote node. + Checking common authentication file content. + Successfully checked common authentication content. + Distributing SSH trust file to all node. + Successfully distributed SSH trust file to all node. + Verifying SSH trust on all hosts. + Successfully verified SSH trust on all hosts. + Successfully created SSH trust. + Successfully created SSH trust for the root permission user. + Setting pssh path + Successfully set core path. + Distributing package. + Begin to distribute package to tool path. + Successfully distribute package to tool path. + Begin to distribute package to package path. + Successfully distribute package to package path. + Successfully distributed package. + Are you sure you want to create the user[omm] and create trust for it (yes/no)? yes + Preparing SSH service. + Successfully prepared SSH service. + Installing the tools in the cluster. + Successfully installed the tools in the cluster. + Checking hostname mapping. + Successfully checked hostname mapping. + reating SSH trust for [omm] user. + Please enter password for current user[omm]. + Password: + + Checking network information. + All nodes in the network are Normal. + Successfully checked network information. + Creating SSH trust. + Creating the local key file. + Successfully created the local key files. + Appending local ID to authorized_keys. + Successfully appended local ID to authorized_keys. + Updating the known_hosts file. + Successfully updated the known_hosts file. + Appending authorized_key on the remote node. + Successfully appended authorized_key on all remote node. + Checking common authentication file content. + Successfully checked common authentication content. + Distributing SSH trust file to all node. + Successfully distributed SSH trust file to all node. + Verifying SSH trust on all hosts. + Successfully verified SSH trust on all hosts. + Successfully created SSH trust. + Successfully created SSH trust for [omm] user. + Checking OS software. + Successfully check os software. + Checking OS version. + Successfully checked OS version. + Creating cluster's path. + Successfully created cluster's path. + Set and check OS parameter. + Setting OS parameters. + Successfully set OS parameters. + Warning: Installation environment contains some warning messages. + Please get more details by "/opt/software/openGauss/script/gs_checkos -i A -h ogpri,ogsta --detail". + Set and check OS parameter completed. + Preparing CRON service. + Successfully prepared CRON service. + Setting user environmental variables. + Successfully set user environmental variables. + Setting the dynamic link library. + Successfully set the dynamic link library. + Setting Core file + Successfully set core path. + Setting pssh path + Successfully set pssh path. + Setting Cgroup. + Successfully set Cgroup. + Set ARM Optimization. + No need to set ARM Optimization. + Fixing server package owner. + Setting finish flag. + Successfully set finish flag. + Preinstallation succeeded. + ``` + +- 切换用户执行安装 + + ``` + su - ommgs_install -X /opt/software/openGauss/cluster_config.xml + ``` + + 安装日志如下: + + ``` + Parsing the configuration file. + Check preinstall on every node. + Successfully checked preinstall on every node. + Creating the backup directory. + Successfully created the backup directory. + begin deploy.. + Installing the cluster. + begin prepare Install Cluster.. + Checking the installation environment on all nodes. + begin install Cluster.. + Installing applications on all nodes. + Successfully installed APP. + begin init Instance.. + encrypt cipher and rand files for database. + Please enter password for database: + Please repeat for database: + begin to create CA cert files + The sslcert will be generated in /opt/huawei/install/app/share/sslcert/om + Cluster installation is completed. + Configuring. + Deleting instances from all nodes. + Successfully deleted instances from all nodes. + Checking node configuration on all nodes. + Initializing instances on all nodes. + Updating instance configuration on all nodes. + Check consistence of memCheck and coresCheck on database nodes. + Successful check consistence of memCheck and coresCheck on all nodes. + Configuring pg_hba on all nodes. + Configuration is completed. + Successfully started cluster. + Successfully installed application. + end deploy.. + ``` + +- 登陆验证 + + ``` + [omm@ogpri dn]$ gsql -d postgres -p 26000 + gsql ((openGauss 2.1.0 build 590b0f8e) compiled at 2021-09-30 14:29:04 commit 0 last mr ) + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + openGauss=# + ``` + +- 查看主从状态 + + ``` + gs_ctl -D /opt/huawei/install/data/dn/ query + ``` + + 信息如下: + + ``` + [2021-12-29 14:41:33.751][21110][][gs_ctl]: gs_ctl query ,datadir is /opt/huawei/install/data/dn + HA state: + local_role : Primary + static_connections : 1 + db_state : Normal + detail_information : Normal + + Senders info: + sender_pid : 9716 + local_role : Primary + peer_role : Standby + peer_state : Normal + state : Streaming + sender_sent_location : 0/401A080 + sender_write_location : 0/401A080 + sender_flush_location : 0/401A080 + sender_replay_location : 0/401A080 + receiver_received_location : 0/401A080 + receiver_write_location : 0/401A080 + receiver_flush_location : 0/401A080 + receiver_replay_location : 0/401A080 + sync_percent : 100% + sync_state : Quorum + sync_priority : 1 + sync_most_available : Off + channel : 192.168.56.227:26001-->192.168.56.228:35144 + Receiver info: + No information + ``` + + diff --git "a/content/zh/post/2022/opengauss\346\225\260\346\215\256\345\272\223-\345\217\202\346\225\260\344\274\230\345\214\226.md" "b/content/zh/post/2022/opengauss\346\225\260\346\215\256\345\272\223-\345\217\202\346\225\260\344\274\230\345\214\226.md" new file mode 100644 index 0000000000000000000000000000000000000000..f0acf8db759cebbb49c178d0e29f9441db487f0e --- /dev/null +++ "b/content/zh/post/2022/opengauss\346\225\260\346\215\256\345\272\223-\345\217\202\346\225\260\344\274\230\345\214\226.md" @@ -0,0 +1,162 @@ ++++ + +title = "opengauss数据库-参数优化" + +date = "2021-12-31" + +tags = [ "opengauss数据库-参数优化"] + +archives = "2021-12" + +author = "周琦放" + +summary = "opengauss数据库-参数优化" + +img = "/zh/post/2022/title/img18.png" + +times = "12:30" + ++++ + +# opengauss数据库-参数优化 + + 2021/12/31 + +## 参数调整 + +``` +cat opt_params.sh +#!/bin/bash +source ~/.bashrc +memory=`free|awk '{print $2}' |sed -n 2p` +if [[ $memory -lt 10*1024*1024 ]] +then +max_process_memory=2GB +shared_buffers=128MB +max_connections=500 +work_mem=4MB +maintenance_work_mem=256MB +echo "If the database fails to start, lower the parameters max_process_memory and shared_buffers" +elif [[ $memory -gt 4*1024*1024 ]] && [[ $memory -lt 8*1024*1024 ]] +then +max_process_memory=5GB +shared_buffers=1GB +max_connections=1000 +work_mem=16MB +maintenance_work_mem=1GB +else +max_process_memory=$((memory*6/10/1024/1024)) +shared_buffers=$((memory*3/10/1024/1024)) +max_connections=3000 +work_mem=64MB +maintenance_work_mem=2GB +fi + +##内存相关参数 +gs_guc set -I all -N all -c "max_process_memory=${max_process_memory}" +gs_guc set -I all -N all -c "shared_buffers=${shared_buffers}" +gs_guc set -I all -N all -c "work_mem=${work_mem}" +gs_guc set -I all -N all -c "maintenance_work_mem=${maintenance_work_mem}" +gs_guc set -I all -N all -c "cstore_buffers=16MB" +gs_guc set -I all -N all -c "wal_buffers=1GB" +gs_guc set -I all -N all -c "local_syscache_threshold=32MB" +gs_guc set -I all -N all -c "standby_shared_buffers_fraction=1" + +##连接访问相关参数 +gs_guc set -I all -N all -c "max_connections=${max_connections}" +gs_guc set -I all -N all -c "max_prepared_transactions=${max_connections}" +gs_guc set -I all -N all -c "listen_addresses = '*'" +gs_guc set -I all -N all -c "remote_read_mode=non_authentication" +gs_guc set -I all -N all -c "password_encryption_type=1" +gs_guc set -I all -N all -c "password_reuse_time=0" +gs_guc set -I all -N all -c "password_lock_time=0" +gs_guc set -I all -N all -c "password_effect_time=0" +gs_guc set -I all -N all -c "session_timeout=0" + +##wal相关参数 +gs_guc set -I all -N all -c "wal_level=logical" +gs_guc set -I all -N all -c "full_page_writes=off" +gs_guc set -I all -N all -c "wal_log_hints=off" +gs_guc set -I all -N all -c "xloginsert_locks=48" +gs_guc set -I all -N all -c "advance_xlog_file_num=10" + +##复制相关参数 +gs_guc set -I all -N all -c "synchronous_commit=on" +gs_guc set -I all -N all -c "wal_keep_segments=1024" +gs_guc set -I all -N all -c "max_wal_senders=16" +gs_guc set -I all -N all -c "recovery_max_workers=4" +gs_guc set -I all -N all -c "most_available_sync=on" +gs_guc set -I all -N all -c "max_size_for_xlog_prune=104857600" +gs_guc set -I all -N all -c "catchup2normal_wait_time=0" +gs_guc set -I all -N all -c "enable_slot_log=on" +gs_guc set -I all -N all -c "max_replication_slots=32" +gs_guc set -I all -N all -c "wal_receiver_timeout=60s" +gs_guc set -I all -N all -c "sync_config_strategy=none_node" + +##日志相关参数 +gs_guc set -I all -N all -c "logging_collector=on" +gs_guc set -I all -N all -c "log_duration=on" +gs_guc set -I all -N all -c "log_line_prefix='%m %u %d %r %p %S'" +gs_guc set -I all -N all -c "log_checkpoints=on" +gs_guc set -I all -N all -c "plog_merge_age=0" + +gs_guc set -I all -N all -c "archive_dest='/ogarchive'" + + + +##性能统计相关参数 +gs_guc set -I all -N all -c "vacuum_cost_limit=1000" +gs_guc set -I all -N all -c "autovacuum_max_workers=10" +gs_guc set -I all -N all -c "autovacuum_naptime=20s" +gs_guc set -I all -N all -c "autovacuum_vacuum_cost_delay=10" +gs_guc set -I all -N all -c "autovacuum_vacuum_scale_factor=0.05" +gs_guc set -I all -N all -c "autovacuum_analyze_scale_factor=0.02" +gs_guc set -I all -N all -c "autovacuum_vacuum_threshold=200" +gs_guc set -I all -N all -c "autovacuum_analyze_threshold=200" +gs_guc set -I all -N all -c "autovacuum_io_limits=104857600" +gs_guc set -I all -N all -c "instr_unique_sql_count=20000" +gs_guc set -I all -N all -c "enable_save_datachanged_timestamp=off" +gs_guc set -I all -N all -c "track_sql_count=off" +gs_guc set -I all -N all -c "enable_instr_rt_percentile=off" +gs_guc set -I all -N all -c "enable_instance_metric_persistent=off" +gs_guc set -I all -N all -c "enable_logical_io_statistics=off" +gs_guc set -I all -N all -c "enable_user_metric_persistent=off" +gs_guc set -I all -N all -c "enable_mergejoin=on" +gs_guc set -I all -N all -c "enable_nestloop=on" +gs_guc set -I all -N all -c "enable_pbe_optimization=off" +gs_guc set -I all -N all -c "enable_resource_track=on" +gs_guc set -I all -N all -c "enable_wdr_snapshot=on" +gs_guc set -I all -N all -c "instr_unique_sql_count=5000" + +##客户端白名单 +gs_guc set -I all -N all -h "host all all 0.0.0.0/0 md5" + +##其他参数 +gs_guc set -I all -N all -c "checkpoint_segments=1024" +gs_guc set -I all -N all -c "checkpoint_completion_target=0.8" +gs_guc set -I all -N all -c "pagewriter_sleep=200" + +gs_guc set -I all -N all -c "enable_alarm=off" +gs_guc set -I all -N all -c "enable_codegen=off" +gs_guc set -I all -N all -c "audit_enabled=on" +gs_guc set -I all -N all -c "enable_asp=off" + +gs_guc set -I all -N all -c "lc_messages='en_US.UTF-8'" +gs_guc set -I all -N all -c "lc_monetary='en_US.UTF-8'" +gs_guc set -I all -N all -c "lc_numeric='en_US.UTF-8'" +gs_guc set -I all -N all -c "lc_time='en_US.UTF-8'" + +gs_guc set -I all -N all -c "update_lockwait_timeout=1min" +gs_guc set -I all -N all -c "lockwait_timeout=1min" + +gs_guc set -I all -N all -c "max_files_per_process=100000" +gs_guc set -I all -N all -c "behavior_compat_options='display_leading_zero'" +gs_guc set -I all -N all -c "enable_thread_pool=off" +``` + +## 重启生效 + +``` +gs_om -t stop && gs_om -t start +``` + diff --git "a/content/zh/post/2022/opengauss\346\225\260\346\215\256\345\272\223-\345\234\260\347\220\206\345\235\220\346\240\207gis\345\256\211\350\243\205.md" "b/content/zh/post/2022/opengauss\346\225\260\346\215\256\345\272\223-\345\234\260\347\220\206\345\235\220\346\240\207gis\345\256\211\350\243\205.md" new file mode 100644 index 0000000000000000000000000000000000000000..e1fba9d7b865aa9c3c4d930d8071273b4c31065c --- /dev/null +++ "b/content/zh/post/2022/opengauss\346\225\260\346\215\256\345\272\223-\345\234\260\347\220\206\345\235\220\346\240\207gis\345\256\211\350\243\205.md" @@ -0,0 +1,88 @@ ++++ + +title = "pengauss数据库-地理坐标gis安装" + +date = "2021-12-31" + +tags = [ "pengauss数据库-地理坐标gis安装"] + +archives = "2021-12" + +author = "周琦放 " + +summary = "pengauss数据库-地理坐标gis安装" + +img = "/zh/post/2022/title/img19.png" + +times = "12:30" + ++++ + +# opengauss数据库-地理坐标gis安装 + + 如无特殊说明,请在所有节点执行,执行用户为root + +## 组件下载 + +下载gis组件:Yukon-1.0-Alpha-openGauss2.1.0-CentOS\_x64.tar.gz,请注意下载的组件版本一定要和数据库版本严格一致 + +下载地址https://gitee.com/opengauss/Yukon/releases/v1.0-alpha + +## 环境配置要求 + +本次安装使用的是root 用户进行安装,而opengauss数据库的系统用户为omm,需要把omm用户的环境变量配置到root下的/etc/profile中, + +如下:加粗部分为omm的环境变量,请根据实际情况修改,omm用户的环境变量一般在/home/omm/.bashrc + +``` +more /etc/profile +...... +export ORACLE_HOME=/root/ora2pg/instantclient_11_2 +export PATH=$PATH:$ORACLE_HOME +export LD_LIBRARY_PATH=$ORACLE_HOME:$LD_LIBRARY_PATH +export TNS_ADMIN=$ORACLE_HOME +export PYTHON_HOME=/usr/local/python3 +export PATH=$PATH:$PYTHON_HOME/bin +export PATH=/root/gauss_om/omm/script:$PATH +export GPHOME=/opt/huawei/install/om +export PATH=$GPHOME/script/gspylib/pssh/bin:$GPHOME/script:$PATH +export LD_LIBRARY_PATH=$GPHOME/lib:$LD_LIBRARY_PATH +export PYTHONPATH=$GPHOME/lib +export GAUSSHOME=/opt/huawei/install/app +export PATH=$GAUSSHOME/bin:$PATH +export LD_LIBRARY_PATH=$GAUSSHOME/lib:$LD_LIBRARY_PATH +export S3_CLIENT_CRT_FILE=$GAUSSHOME/lib/client.crt +export GAUSS_VERSION=2.1.0 +export PGHOST=/opt/huawei/tmp +export GAUSSLOG=/var/log/omm/omm +umask 077 +export GAUSS_ENV=2 +export GS_CLUSTER_NAME=Cluster01 +``` + +## 安装Yukon gis组件 + +``` +tar -zxvf Yukon-1.0-Alpha-openGauss2.1.0-CentOS_x64.tar.gz +cd Yukon-1.0-Alpha-openGauss2.1.0/Yukon-1.0/ +sh install_yukon.sh -i +``` + +## 验证测试 + +``` +[omm@ogpri ~]$ gsql -d postgres -p 26000 +gsql ((openGauss 2.1.0 build 590b0f8e) compiled at 2021-09-30 14:29:04 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +openGauss=# create database test; +CREATE DATABASE +openGauss=# \c test; +Non-SSL connection (SSL connection is recommended when requiring high-security) +You are now connected to database "test" as user "omm". +test=# create extension postgis; +CREATE EXTENSION +test=# +``` + diff --git "a/content/zh/post/2022/opengauss\346\225\260\346\215\256\345\272\223-\351\253\230\345\217\257\347\224\250jdbc\344\275\277\347\224\250\346\226\271\346\263\225.md" "b/content/zh/post/2022/opengauss\346\225\260\346\215\256\345\272\223-\351\253\230\345\217\257\347\224\250jdbc\344\275\277\347\224\250\346\226\271\346\263\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..dca4506e79a040bb92a5436f00a20cfa464a2f10 --- /dev/null +++ "b/content/zh/post/2022/opengauss\346\225\260\346\215\256\345\272\223-\351\253\230\345\217\257\347\224\250jdbc\344\275\277\347\224\250\346\226\271\346\263\225.md" @@ -0,0 +1,201 @@ ++++ + +title = "opengauss数据库-高可用jdbc使用方法" + +date = "2021-12-31" + +tags = [ "opengauss数据库-高可用jdbc使用方法"] + +archives = "2021-12" + +author = "周琦放" + +summary = "opengauss数据库-高可用jdbc使用方法" + +img = "/zh/post/2022/title/img20.png" + +times = "12:30" + ++++ + +# opengauss数据库-高可用jdbc使用方法 + +## 驱动下载 + +下载jdbc驱动 openGauss-2.1.0-JDBC.tar.gz + +下载地址:https://opengauss.org/zh/download.html + +## 表demo 案例 + +``` +create database test; +create schema demo; +CREATE TABLE demo.websites ( + id int NOT NULL, + name char(20) NOT NULL DEFAULT '', + url varchar(255) NOT NULL DEFAULT '', + PRIMARY KEY (id) +); +COMMENT ON COLUMN demo.websites.name IS '站点名称'; +INSERT INTO demo.websites VALUES + +('1', 'openGauss', 'https://opengauss.org/zh/'), +('2', '华为云', 'https://www.huaweicloud.com/'), +('3', 'openEuler', 'https://openeuler.org/zh/'), +('4', '华为support中心', 'https://support.huaweicloud.com/'); +``` + +## vip 使用方法 + +请注意192.168.56.229 为mogha组件中的vip 地址 + +``` +package com.company; +import java.sql.*; + +public class openGaussSelect { + static final String JDBC_DRIVER = "org.postgresql.Driver"; + static final String DB_URL = "jdbc:postgresql://192.168.56.229:26000/test"; + // 数据库的用户名与密码,需要根据自己的设置 + static final String USER = "test"; + static final String PASS = "zhou0815FANG"; + public static void main(String[] args) { + Connection conn = null; + Statement stmt = null; + try{ + // 注册 JDBC 驱动 + Class.forName(JDBC_DRIVER); + // 打开链接 + System.out.println("连接数据库..."); + conn = DriverManager.getConnection(DB_URL,USER,PASS); + + // 执行查询 + System.out.println(" 实例化Statement对象..."); + stmt = conn.createStatement(); + String sql; + sql = "SELECT id, name, url FROM demo.websites"; + ResultSet rs = stmt.executeQuery(sql); + // 展开结果集数据库 + while(rs.next()){ + // 通过字段检索 + int id = rs.getInt("id"); + String name = rs.getString("name"); + String url = rs.getString("url"); + // 输出数据 + System.out.print("ID: " + id); + System.out.print(", 站点名称: " + name); + System.out.print(", 站点 URL: " + url); + System.out.print("\n"); + } + // 完成后关闭 + rs.close(); + stmt.close(); + conn.close(); + }catch(SQLException se){ + + // 处理 JDBC 错误 + se.printStackTrace(); + }catch(Exception e){ + // 处理 Class.forName 错误 + e.printStackTrace(); + }finally{ + // 关闭资源 + try{ + if(stmt!=null) stmt.close(); + }catch(SQLException se2){ + }// 什么都不做 + try{ + if(conn!=null) conn.close(); + }catch(SQLException se){ + se.printStackTrace(); + } + } + System.out.println("Goodbye!"); + } +} +``` + +## 测试验证 + +![](figures/测试验证.png) + +## JDBC轮询 + +使用jdbc中targetServerType=master属性 + +``` +package com.company; + +import java.sql.*; +public class openGaussSelect { + static final String JDBC_DRIVER = "org.postgresql.Driver"; + static final String DB_URL = "jdbc:postgresql://192.168.56.227:26000,192.168.56.228:26000/test?targetServerType=master"; + // 数据库的用户名与密码,需要根据自己的设置 + static final String USER = "test"; + static final String PASS = "zhou0815FANG"; + public static void main(String[] args) { + Connection conn = null; + Statement stmt = null; + try{ + // 注册 JDBC 驱动 + Class.forName(JDBC_DRIVER); + + // 打开链接 + System.out.println("连接数据库..."); + conn = DriverManager.getConnection(DB_URL,USER,PASS); + + // 执行查询 + System.out.println(" 实例化Statement对象..."); + stmt = conn.createStatement(); + String sql; + sql = "SELECT id, name, url FROM demo.websites"; + ResultSet rs = stmt.executeQuery(sql); + + // 展开结果集数据库 + while(rs.next()){ + // 通过字段检索 + int id = rs.getInt("id"); + String name = rs.getString("name"); + String url = rs.getString("url") + + // 输出数据 + System.out.print("ID: " + id); + System.out.print(", 站点名称: " + name); + + System.out.print(", 站点 URL: " + url); + System.out.print("\n"); + } + // 完成后关闭 + rs.close(); + stmt.close(); + conn.close(); + }catch(SQLException se){ + // 处理 JDBC 错误 + se.printStackTrace(); + }catch(Exception e){ + // 处理 Class.forName 错误 + e.printStackTrace(); + }finally{ + // 关闭资源 + try{ + if(stmt!=null) stmt.close(); + }catch(SQLException se2){ + }// 什么都不做 + try{ + if(conn!=null) conn.close(); + }catch(SQLException se){ + se.printStackTrace(); + } + } + System.out.println("Goodbye!"); + } +} +``` + +## 测试验证 + +![](figures/测试验证1.png) + +![](figures/2测试验证.png) + diff --git "a/content/zh/post/2022/opengauss\346\225\260\346\215\256\345\272\223-\351\253\230\345\217\257\347\224\250\351\203\250\347\275\262mogha.md" "b/content/zh/post/2022/opengauss\346\225\260\346\215\256\345\272\223-\351\253\230\345\217\257\347\224\250\351\203\250\347\275\262mogha.md" new file mode 100644 index 0000000000000000000000000000000000000000..4013994f685722734f1a0a709317c849291d6722 --- /dev/null +++ "b/content/zh/post/2022/opengauss\346\225\260\346\215\256\345\272\223-\351\253\230\345\217\257\347\224\250\351\203\250\347\275\262mogha.md" @@ -0,0 +1,234 @@ ++++ + +title = "opengauss数据库-高可用部署mogha" + +date = "2021-12-31" + +tags = [ "opengauss数据库-高可用部署mogha"] + +archives = "2021-12" + +author = "周琦放" + +summary = "opengauss数据库-高可用部署mogha" + +img = "/zh/post/2022/title/img21.png" + +times = "12:30" + ++++ + +# opengauss数据库-高可用部署mogha + +## 高可用 + +- 组件下载 + + 如无特殊说明,请在所有节点执行,执行用户为root + + 下载高可用组件 mogha-2.3.0-CentOS-x86\_64.tar.gz + + 下载地址:https://docs.mogdb.io/zh/mogha/v2.3/release-notes/ + + +## 环境配置要求 + +由于 MogHA 需要自动挂虚拟IP的操作,内部需要通过 ifconfig 指令来操作网卡,MogHA 是通过数据库安装用户进行启动的,要想执行网卡操作就需要 sudo 权限,在安装期间脚本会检查 /etc/sudoers 配置中是否配置了运行用户的权限,如果存在就跳过配置,如果没有,会尝试自动的将 omm 用户添加到 /etc/sudoers 中,并赋予 ifconfig 的权限。 + +所以建议在部署 MogHA 服务之前,先检查一下 /etc/sudoers 中是否成功配置了 运行用户的 sudo 权限,配置参考如下: + +``` +# 追加下列 1 行到文件末尾 +omm ALL=(ALL) NOPASSWD: /usr/sbin/ifconfig +``` + +数据库配置要求 + +数据库要求至少为一主一备,数据库模式为同步。postgresql.conf中参数要求如下: + +``` +synchronous_commit = on +``` + +## 安装mogha + +建议存放在用户的家目录下,如omm用户,建议存放在/home/omm目录下 + +``` +su - root +tar -zxvf mogha-2.3.0-CentOS-x86_64.tar.gz +cd mogha/ +./install.sh omm /opt/huawei/install/data/dn +``` + +## 配置文件 + +安装完成后,会生成一个node.conf文件,修改此配置文件 + +请注意需要修改的内容 + +``` +# docs: https://docs.mogdb.io/zh/mogha/v2.3/overview +[config] + +# 数据库端口 + +db_port=26000 + +# 数据库的操作系统用户,通常为omm +db_user=omm +# 数据库的数据目录 +db_datadir=/opt/huawei/install/data/dn +# 本地主库元数据存储路径 +# primary_info=/root/mogha/primary_info +# 本地备库元数据存储路径 +# standby_info=/root/mogha/standby_info +# 是否使用 lite 模式,可选值:True / False +lite_mode=True +# HA节点之间通信端口,如果有防火墙,需要配置互通 +agent_port=8081 +# [2.3.0新增] +# HA节点间HTTP API 请求超时时间(秒) +# http_req_timeout=3 +# 心跳间隔时间 +# heartbeat_interval=3 +# 主库丢失的探测时间 +# primary_lost_timeout=10 +# 主库的孤单时间 +# primary_lonely_timeout=10 +# 双主确认超时时间 +# double_primary_timeout=10 +# 本地元数据文件类型,支持 json/bin +# meta_file_type=json +# 是否为数据库实例进程限制cpu +# taskset=False +# 设置输出的日志格式 +# logger_format=%(asctime)s %(levelname)s [%(filename)s:%(lineno)d]: %(message)s +# [2.3.0新增]设置日志存储目录 +# log_dir=/root/mogha +# [2.3.0新增] 日志文件最大字节数(接近该值时,将发生日志滚动) +# 支持的单位:KB, MB, GB (忽略大小写) +# log_max_size=512MB +# [2.3.0新增] 日志保留的文件个数 +# log_backup_count=10 +# 设置除了主备相关的机器,允许可以访问到web接口的IP列表, 多个IP时逗号分隔 +# allow_ips= +# [2.1新增] 主实例进程未启动时,是否需要 HA 进行拉起或切换 +# 搭配 primary_down_handle_method 使用 +# handle_down_primary=True +# [2.1新增] 备库进程未启动时,是否需要 HA 进行拉起 +# handle_down_standby=True +# [2.1新增] 主库实例进程未启动时,如何处理 +# 支持两种处理方式: +# - restart: 尝试重启,尝试次数在 restart_strategy 参数中设定 +# - failover: 直接切换 +# primary_down_handle_method=restart +# [2.1新增] 重启实例最大尝试条件: times/minutes +# 例如: 10/3 最多尝试10次或者3分钟,任何一个条件先满足就不再尝试。 +# restart_strategy=10/3 +# [2.1.1新增] UCE(uncorrected error)故障感知功能,默认开启 +# uce_error_detection=True +# [2.1.1新增] UCE检查时,读取最后多少行日志数据进行判断 +# uce_detect_max_lines=200 +# [2.2.1新增] +# debug_mode=False +# (选填) 元数据库的连接参数(openGauss类数据库) +# [meta] +# ha_name= # HA集群的名称,全局唯一,禁止两套HA集群共用一个名字 +# host= # 机器IP +# port= # 端口 +# db= # 数据库名 +# user= # 用户名 +# password= # 密码 +# connect_timeout=3 # 连接超时,单位秒 +# host1-9,每个代表一个机器(最多支持1主8备) +# (lite模式需仅配置 host1 和 host2 即可,) +# - ip: 业务IP +# - heartbeat_ips: 心跳网络ip,允许配置多个心跳网络,以逗号隔开 +[host1] +ip=192.168.56.227 +heartbeat_ips= +[host2] +ip=192.168.56.228 +heartbeat_ips= +# [host3] +# ip= +# heartbeat_ips= +# [host4] +# ip= +# heartbeat_ips= +# [host5] +# ip= +# heartbeat_ips= +# [host6] +# ip= +# heartbeat_ips= +# [host7] +# ip= +# heartbeat_ips= +# [host8] +# ip= +# heartbeat_ips= +# [host9] +# ip= +# heartbeat_ips= + +# zone1~3 用于定义机房,不同机房配置独立虚拟IP, +# 切换不会切过去,作为异地保留项目 +# - vip: 机房虚拟IP (没有不填) +# - hosts: 本机房内机器列表, 填写机器在配置文件中对应的配置模块名 host1~9,示例:host1,host2 + +# - ping_list: 用于检查网络是否通畅的仲裁节点,例如网关,支持填写多个IP (逗号分隔) + +# - cascades: 机房内的级联机器列表 (配置方式同 hosts, 没有不填) +# - arping: (选填) 机房的 arping 地址 + +[zone1] +## 该VIP为虚拟ip地址,请根据实际情况填写 +vip=192.168.56.229 + +hosts=host1,host2 +ping_list=192.168.56.1 +cascades= +arping= +# [zone2] +# vip= +# hosts= +# ping_list= +# cascades= +# arping= +# [zone3] +# vip= +# hosts= +# ping_list= +# cascades= +# arping= +``` + +## 启动 + +``` +systemctl start mogha.service +``` + +## 查看日志信息 + +- 主库心跳日志信息 + + ``` + 2021-12-29 13:20:49,211 INFO [__init__.py:59]: ping result: {'192.168.56.1': True, '192.168.56.228': True} + 2021-12-29 13:20:49,323 INFO [__init__.py:84]: local instance is alive Primary, state: Normal + 2021-12-29 13:20:54,593 INFO [__init__.py:59]: ping result: {'192.168.56.1': True, '192.168.56.228': True} + 2021-12-29 13:20:54,719 INFO [__init__.py:84]: local instance is alive Primary, state: Normal + ``` + +- 备库心跳日志信息 + + ``` + 2021-12-29 13:32:07,774 INFO [__init__.py:59]: ping result: {'192.168.56.1': True, '192.168.56.227': True} + 2021-12-29 13:32:07,890 INFO [__init__.py:84]: local instance is alive Standby, state: Normal + 2021-12-29 13:32:13,109 INFO [__init__.py:59]: ping result: {'192.168.56.1': True, '192.168.56.227': True} + 2021-12-29 13:32:13,219 INFO [__init__.py:84]: local instance is alive Standby, state: Normal + ``` + + diff --git a/content/zh/post/2022/title/img1.png b/content/zh/post/2022/title/img1.png new file mode 100644 index 0000000000000000000000000000000000000000..2af578504062e5fa7a7aaf7e1c2014531e51e9c2 Binary files /dev/null and b/content/zh/post/2022/title/img1.png differ diff --git a/content/zh/post/2022/title/img10.png b/content/zh/post/2022/title/img10.png new file mode 100644 index 0000000000000000000000000000000000000000..ce35c3cd313c8e4ed939ae18b91b9a64767ab504 Binary files /dev/null and b/content/zh/post/2022/title/img10.png differ diff --git a/content/zh/post/2022/title/img11.png b/content/zh/post/2022/title/img11.png new file mode 100644 index 0000000000000000000000000000000000000000..7ebe22cb03c6ee1e735b29bce766c1e10d334f0c Binary files /dev/null and b/content/zh/post/2022/title/img11.png differ diff --git a/content/zh/post/2022/title/img12.png b/content/zh/post/2022/title/img12.png new file mode 100644 index 0000000000000000000000000000000000000000..0ec8535146c6a1d5e0b78ee6c1a6b3a8ede1cdf3 Binary files /dev/null and b/content/zh/post/2022/title/img12.png differ diff --git a/content/zh/post/2022/title/img13.png b/content/zh/post/2022/title/img13.png new file mode 100644 index 0000000000000000000000000000000000000000..86a420b92fb8289658d807d49f137b6d13862f6d Binary files /dev/null and b/content/zh/post/2022/title/img13.png differ diff --git a/content/zh/post/2022/title/img14.png b/content/zh/post/2022/title/img14.png new file mode 100644 index 0000000000000000000000000000000000000000..1da9e55bd25cbc7cfc6fdef1800b4c95b077829b Binary files /dev/null and b/content/zh/post/2022/title/img14.png differ diff --git a/content/zh/post/2022/title/img15.jpg b/content/zh/post/2022/title/img15.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ebe22cb03c6ee1e735b29bce766c1e10d334f0c Binary files /dev/null and b/content/zh/post/2022/title/img15.jpg differ diff --git a/content/zh/post/2022/title/img16.png b/content/zh/post/2022/title/img16.png new file mode 100644 index 0000000000000000000000000000000000000000..2af578504062e5fa7a7aaf7e1c2014531e51e9c2 Binary files /dev/null and b/content/zh/post/2022/title/img16.png differ diff --git a/content/zh/post/2022/title/img17.png b/content/zh/post/2022/title/img17.png new file mode 100644 index 0000000000000000000000000000000000000000..b903c7f8d5a3ba8b66b2d6be883a4bac7230915e Binary files /dev/null and b/content/zh/post/2022/title/img17.png differ diff --git a/content/zh/post/2022/title/img18.png b/content/zh/post/2022/title/img18.png new file mode 100644 index 0000000000000000000000000000000000000000..1697caef6995dd16977bb9aa96af762e19fb7102 Binary files /dev/null and b/content/zh/post/2022/title/img18.png differ diff --git a/content/zh/post/2022/title/img19.png b/content/zh/post/2022/title/img19.png new file mode 100644 index 0000000000000000000000000000000000000000..5537c95b900978a3020269be7ec52ce914224844 Binary files /dev/null and b/content/zh/post/2022/title/img19.png differ diff --git a/content/zh/post/2022/title/img2.png b/content/zh/post/2022/title/img2.png new file mode 100644 index 0000000000000000000000000000000000000000..5537c95b900978a3020269be7ec52ce914224844 Binary files /dev/null and b/content/zh/post/2022/title/img2.png differ diff --git a/content/zh/post/2022/title/img20.png b/content/zh/post/2022/title/img20.png new file mode 100644 index 0000000000000000000000000000000000000000..ce35c3cd313c8e4ed939ae18b91b9a64767ab504 Binary files /dev/null and b/content/zh/post/2022/title/img20.png differ diff --git a/content/zh/post/2022/title/img21.png b/content/zh/post/2022/title/img21.png new file mode 100644 index 0000000000000000000000000000000000000000..b71bb7d740d0f375bbea6116ffde9175c0dbcacf Binary files /dev/null and b/content/zh/post/2022/title/img21.png differ diff --git a/content/zh/post/2022/title/img22.png b/content/zh/post/2022/title/img22.png new file mode 100644 index 0000000000000000000000000000000000000000..31e776c19ddc9b62b4b88171d015b1b94ff2b022 Binary files /dev/null and b/content/zh/post/2022/title/img22.png differ diff --git a/content/zh/post/2022/title/img3.png b/content/zh/post/2022/title/img3.png new file mode 100644 index 0000000000000000000000000000000000000000..b903c7f8d5a3ba8b66b2d6be883a4bac7230915e Binary files /dev/null and b/content/zh/post/2022/title/img3.png differ diff --git a/content/zh/post/2022/title/img4.png b/content/zh/post/2022/title/img4.png new file mode 100644 index 0000000000000000000000000000000000000000..6b7b474933a31c6a20d0d1708e8909163293b4ad Binary files /dev/null and b/content/zh/post/2022/title/img4.png differ diff --git a/content/zh/post/2022/title/img5.png b/content/zh/post/2022/title/img5.png new file mode 100644 index 0000000000000000000000000000000000000000..830c8bc490a1b830e759df1f04b453909a097406 Binary files /dev/null and b/content/zh/post/2022/title/img5.png differ diff --git a/content/zh/post/2022/title/img6.png b/content/zh/post/2022/title/img6.png new file mode 100644 index 0000000000000000000000000000000000000000..b71bb7d740d0f375bbea6116ffde9175c0dbcacf Binary files /dev/null and b/content/zh/post/2022/title/img6.png differ diff --git a/content/zh/post/2022/title/img7.png b/content/zh/post/2022/title/img7.png new file mode 100644 index 0000000000000000000000000000000000000000..830c8bc490a1b830e759df1f04b453909a097406 Binary files /dev/null and b/content/zh/post/2022/title/img7.png differ diff --git a/content/zh/post/2022/title/img8.png b/content/zh/post/2022/title/img8.png new file mode 100644 index 0000000000000000000000000000000000000000..31e776c19ddc9b62b4b88171d015b1b94ff2b022 Binary files /dev/null and b/content/zh/post/2022/title/img8.png differ diff --git a/content/zh/post/2022/title/img9.png b/content/zh/post/2022/title/img9.png new file mode 100644 index 0000000000000000000000000000000000000000..1da9e55bd25cbc7cfc6fdef1800b4c95b077829b Binary files /dev/null and b/content/zh/post/2022/title/img9.png differ diff --git "a/content/zh/post/2022/zabbix-\347\233\221\346\216\247-MogDB-openGauss-\344\271\213-\351\207\207\351\233\206prometheus\346\225\260\346\215\256.md" "b/content/zh/post/2022/zabbix-\347\233\221\346\216\247-MogDB-openGauss-\344\271\213-\351\207\207\351\233\206prometheus\346\225\260\346\215\256.md" new file mode 100644 index 0000000000000000000000000000000000000000..edecaa106677cea3bc43caff0bd2c1702f875234 --- /dev/null +++ "b/content/zh/post/2022/zabbix-\347\233\221\346\216\247-MogDB-openGauss-\344\271\213-\351\207\207\351\233\206prometheus\346\225\260\346\215\256.md" @@ -0,0 +1,140 @@ ++++ + +title = "zabbix 监控 MogDB/openGauss 之 采集prometheus数据" + +date = "2021-12-16" + +tags = [ "zabbix 监控 MogDB/openGauss 之 采集prometheus数据"] + +archives = "2021-12" + +author = "高云龙" + +summary = "zabbix 监控 MogDB/openGauss 之 采集prometheus数据" + +img = "/zh/post/2022/title/img3.png" + +times = "12:30" + ++++ + +# zabbix 监控 MogDB/openGauss 之 采集prometheus数据 + +## 前言 + +市场上比较的监控方式有两种:zabbix和prometheus架构,对于MogDB/openGauss数据库来说,已经通过[grafana + prometheus + opengauss\_exporter](https://www.modb.pro/db/173483)的方式完成了监控部署,如何通过zabbix完成对MogDB/openGauss数据库完成监控呢,通过zabbix官网我们知道从zabbix 4.2版本开始支持了Prometheus 数据源,那本篇文章先实现通过zabbix采集prometheus数据,zabbix底层的数据存储采用MogDB数据库。 + +## 软件信息 + +- OS: CentOS 7.9 on x86 +- database:MogDB 2.0.1 +- prometheus:2.31.1 +- opengauss\_exporter: 0.0.9 + +本环境已经安装好MogDB数据库、prometheus和opengauss\_exporter,这里主要介绍zabbix安装及与prometheus适配。 + +--安装依赖包 + +``` +yum -y install gcc gcc-c++ curl curl-devel net-snmp net-snmp-devel readline.x86_64 readline-devel.x86_64 zlib.x86_64 zlib-devel.x86_64 libevent.x86_64 libevent-devel.x86_64 postgresql-devel.x86_64 golang.x86_64 libmcrypt-devel mhash-devel libxslt-devel libjpeg libjpeg-devel libpng libpng-devel freetype freetype-devel libxml2 libxml2-devel zlib zlib-devel glibc glibc-devel glib2 glib2-devel bzip2 bzip2-devel ncurses ncurses-devel curl curl-devel e2fsprogs e2fsprogs-devel krb5 krb5-devel libidn libidn-devel openssl openssl-devel sqlite-devel.x86_64 sqlite.x86_64 oniguruma-devel oniguruma +``` + +## zabbix安装部署 + +[参考zabbix官网快速部署](https://www.zabbix.com/cn/download?zabbix=5.0&os_distribution=centos&os_version=7&db=postgresql&ws=nginx) + +- 安装准备 + + --安装zabbix源 + + ``` + # rpm -Uvh https://repo.zabbix.com/zabbix/5.0/rhel/7/x86_64/zabbix-release-5.0-1.el7.noarch.rpm + # yum clean all + + --安装zabbix server 和 agent + # yum install zabbix-server-pgsql zabbix-agent + + --配置Zabbix前端 + # yum install centos-release-scl + + --编辑zabbix.repo + vim /etc/yum.repos.d/zabbix.repo + [zabbix-frontend] + ... + enabled=1 + ... + Install Zabbix frontend packages. + + --安装pgsql和nginx + # yum install zabbix-web-pgsql-scl zabbix-nginx-conf-scl + + --为Zabbix前端配置PHP + vim /etc/opt/rh/rh-nginx116/nginx/conf.d/zabbix.conf + + listen 80; + server_name 172.16.3.90; + + *** + vim /etc/opt/rh/rh-php72/php-fpm.d/zabbix.conf + + listen.acl_users = apache,nginx + php_value[date.timezone] = Europe/Riga + ``` + + +- MogDB数据库配置 + + ``` + --创建数据库 + postgres=# create database zabbix DBCOMPATIBILITY='PG'; + + --创建用户 + postgres=# \c zabbix + abbix=# create user zabbix encrypted password 'zabbix@123';create user zabbix encrypted password 'zabbix@123'; + + --修改pg_hba.conf + host all zabbix 172.16.3.90/32 md5 + + --导入数据 + $ zcat /usr/share/doc/zabbix-server-pgsql*/create.sql.gz | gsql -h 172.16.3.90 -U zabbix zabbix -f + + ``` + + +- 启动Zabbix server和agent进 + + ``` + --启动Zabbix server和agent进程,并为它们设置开机自启: + + # systemctl restart zabbix-server zabbix-agent rh-nginx116-nginx rh-php72-php-fpm + # systemctl enable zabbix-server zabbix-agent rh-nginx116-nginx rh-php72-php-fpm + ``` + +- 展示Zabbix前端 + + 连接到新安装的Zabbix前端,直接浏览器输入:172.16.3.90 ,如有下图展示说明zabbix启动成功,配置完前段界面后,zabbix初始账号是:Admin,密码:zabbix + + ![](figures/20211203-7294cdd5-5b8a-41dd-9558-468c56d0e49d.png) + + ![](figures/20211203-8632d683-5aa7-4e1f-907c-3952796968f4.png) + + +## zabbix配置prometheus + +- 配置监控项 + + 在zabbix界面:Configuration --\> Hosts --\> Items --\> Create Item + + ![](figures/20211216-cd0ca2d6-dd3c-41d5-9643-775edc3e9035.png) + +- 添加监控项信息 + + ![](figures/20211216-05611555-f74d-47d5-8057-a86a6fd5e38f.png) + + ![](figures/20211216-2e9cd439-b92e-4fcd-8180-ef7096c80a16.png) + +- 查看监控项 + + ![](figures/20211216-b9c6b9ce-6a77-4ce0-a064-291015801db2.png) + + diff --git "a/content/zh/post/2022/\345\215\216\344\270\272openGauss\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\344\270\216\344\275\277\347\224\250.md" "b/content/zh/post/2022/\345\215\216\344\270\272openGauss\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\344\270\216\344\275\277\347\224\250.md" new file mode 100644 index 0000000000000000000000000000000000000000..ef94d00e41b1721b8055688718145dec1f89b9e2 --- /dev/null +++ "b/content/zh/post/2022/\345\215\216\344\270\272openGauss\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\344\270\216\344\275\277\347\224\250.md" @@ -0,0 +1,851 @@ ++++ + +title = "华为openGauss数据库安装与使用" + +date = "2021-12-29" + +tags = [ "华为openGauss数据库安装与使用"] + +archives = "2021-12" + +author = "Vector " + +summary = "华为openGauss数据库安装与使用" + +img = "/zh/post/2022/title/img6.png" + +times = "12:30" + ++++ + +# 华为openGauss数据库安装与使用 + +主要参考博客:[opengauss单机部署-墨天轮](https://www.modb.pro/doc/4705) + +[企业版安装 | openGauss](https://opengauss.org/zh/docs/2.0.0/docs/installation/%E4%BC%81%E4%B8%9A%E7%89%88%E5%AE%89%E8%A3%85.html) + +## 1. 虚拟机安装 + +先做安装准备,点击链接[下载](https://download3.vmware.com/software/wkst/file/VMware-workstation-full-16.1.2-17966106.exe)VMware Workstation Pro16,此处为Windows 10使用。 + +- 1.1 VMware安装 + + 打开下载好的exe文件,即开始安装: + + ![](figures/615c183c2ab3f51d914bfbb6.png) + + 安装位置默认在C盘,点击更改可以修改安装位置,我安装到了E:\\VMware\\下,安装路径尽量不要有中文,记得勾选PATH按钮,这样不用自己再添加环境变量,可勾选增强型键盘驱动程序,此功能可更好地处理国际键盘和带有额外按键的键盘: + + ![](figures/615c15c42ab3f51d91484e93.png) + + 一直点击下一步: + + ![](figures/615c11832ab3f51d914222f4.png) + + ![](figures/615c11832ab3f51d914222dd.png) + + ![](figures/615c11832ab3f51d914222e9.png) + + ![](figures/615c11832ab3f51d91422301.png) + + 点击输入许可证,密钥可以自己购买,或者百度搜索以下,多尝试几个,下面是我当时安装使用的密钥,不知道现在失效没有: + + ![](figures/615c183c2ab3f51d914bfbaf.png) + + 安装后可能要求重启系统,重启后进入软件。依次点击导航栏中的 帮助 -\> 关于 VMware Workstation ,查看许可证信息的状态,如下图所示即为激活成功。 + + ![](figures/615c15c42ab3f51d91484e9e.png) + +- 1.2 虚拟机部署centos + + 可以在官方网站下载centos7,只有centos7.6支持安装opengauss,如果找不到7.6版本的centos,也可安装稍高版本的centos,安装完之后需要在系统文件中做相关修改,我下载的是centos7.9,文件太大了,需要下一段时间,记得更改下载保存的位置,我放在了E:\\Linux\\下。我第一次安装时不知道必须安装centos7,安装成了centos8,而重新安装时部分截图忘记保存,所以下面部分截图出现的centos8,大家视为centos7就好。 + + ![](figures/615c15c42ab3f51d91484ead.png) + + 下载完成,打开VMware选择新建虚拟机: + + ![](figures/615c191d2ab3f51d914d3f1b.png) + + ![](figures/615c191d2ab3f51d914d3f25.png) + + 浏览文件,选择centos7的下载目录,选择镜像文件: + + ![](figures/615c191d2ab3f51d914d3f32.png) + + 设置虚拟机的名称和账户名,以及密码: + + ![](figures/615c191d2ab3f51d914d3f43.png) + + 选择虚拟机的安装位置: + + ![](figures/615c19272ab3f51d914d4e90.png) + + 设置磁盘的容量,默认为20GB,我修改为了40GB,点击下一步即可: + + ![](figures/615c19272ab3f51d914d4e97.png) + + 自定义硬件可以根据自己的需求,修改centos的设置: + + ![](figures/615c19272ab3f51d914d4e9d.png) + + 内存大小默认为1GB,我设置为了2GB: + + ![](figures/615c19272ab3f51d914d4ea8.png) + + 网络适配器选择NAT模式,设置完成之后点击确定: + + ![](figures/615c19272ab3f51d914d4eaf.png) + + ![](figures/615c19302ab3f51d914d5de4.png) + + 等待安装: + + ![](figures/615c19302ab3f51d914d5dd8.png) + + ![](figures/615c19302ab3f51d914d5df7.png) + + ![](figures/615c19302ab3f51d914d5e02.png) + + ![](figures/615c19302ab3f51d914d5e11.png) + + ![](figures/615c193f2ab3f51d914d72c2.png) + + 中间会出现这个页面让你设置,如果你没赶快进行操作,就跳过去了,设置不设置都没有关系,安装完成之后也可以设置: + + ![](figures/615c193f2ab3f51d914d72ba.png) + + 如下是,点击各个按钮进行时间、显示、输入法的设置: + + ![](figures/615c193f2ab3f51d914d72fc.png) + + ![](figures/615c19492ab3f51d914d811b.png) + + ![](figures/615c19492ab3f51d914d8137.png) + + ![](figures/615c19492ab3f51d914d8153.png) + + 设置完成之后继续安装,安装完毕,输入设置的密码之后,回车: + + ![](figures/615c193f2ab3f51d914d72e9.png) + + 安装成功! + + ![](figures/615c19492ab3f51d914d8161.png) + +- 1.3 centos配置 + - 1.3.1 设置系统版本 + + 因为opengauss要求的centos版本是7.6,因此我们需要修改/etc/redhat-release文件: + + ![](figures/615c15c42ab3f51d91484ed6.png) + + ``` + #进入管理员模式 + su + #打开文件,进行编辑 + vi /etc/redhat-release + ``` + + 修改成如下内容CentOS Linux release 7.6 \(Core\): + + ![](figures/615c15c42ab3f51d91484ec6.png) + + - 1.3.2 网络设置 + + 使用ifconfig或者ip addr可以查看自己的ip地址 + + ![](figures/615c16922ab3f51d914979b2.png) + + 我的网卡的名字为ens-33,接下来,给网卡增加DNS:echo 'DNS1=114.114.114.114'\>\>/etc.sysconfig/network-scripts/ifcfg-ens33 + + 重启网卡:systemctl restart network,测试是否可以访问:ping www.baidu.com + + ![](figures/615c16922ab3f51d914979bf.png) + + 如上图所示,则可以访问。 + + - 1.3.3 修改主机名 + + ``` + echo "vector" > /etc/hostname + echo "192.168.48.128 vector" >>/etc/hostd + ``` + + 最后系统重启后记得查看主机名是否修改成功: + + ``` + cat /etc/hostname + ``` + + - 1.3.4 配置YUM源 + + 删除系统自带的yum源 + + ``` + rm -rf /etc/yum.repos.d/* + ``` + + 下载阿里云yum源 + + ``` + wget -O /etc/yum.repos.d/CentOS-Base http://mirrors.aliyun.com/repo/Centos7.repo + ``` + + 生成仓库缓存 + + ``` + yum makecache + ``` + + 安装python3.6,一定要装3.6版本 + + ``` + sudo yum install epel-release + sudo yum install python36 + ``` + + - 1.3.5 关闭防火墙 + + ``` + sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config + ``` + + - 1.3.6 设置字符集 + + ``` + cat >>/etc/profile< + +- 2.1 安装前准备 + + 我下载的安装包是[企业版2.0.0版本](https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.0/x86/openGauss-2.0.0-CentOS-64bit-all.tar.gz),刚开始装的是极简版,但是极简版缺少安装包,缺少工具,最后回归企业版。安装过程,参考了[官方文档](https://opengauss.org/zh/docs/2.0.0/docs/installation/%E4%BC%81%E4%B8%9A%E7%89%88%E5%AE%89%E8%A3%85.html)。 + + 将下载好的安装包解压,我放在了目录/opt/software/openGauss/: + + ``` + #先创建文件夹 + mkdir -p /opt/software/openGauss + #设置访问权限 + chmod 755 -R /opt/software + ``` + + - 不建议把安装包的存放目录规划到openGauss用户的根目录或其子目录下,可能导致权限问题。 + - openGauss用户须具有/opt/software/openGauss目录的读写权限。 + + 在安装包所在的目录下,解压安装包openGauss-2.0.0-CentOS-64bit-all.tar.gz。安装包解压后,会有om安装包和server安装包。继续解压om安装包,会在/opt/software/openGauss路径下自动生成script子目录,并且在script目录下生成gs\_preinstall等各种om工具脚本。 + + 建议跟我目录放的一样,不然容易出问题,解压命令如下: + + ``` + cd /opt/software/openGauss + tar -zxvf openGauss-2.0.0-CentOS-64bit-all.tar.gz + tar -zxvf openGauss-2.0.0-CentOS-64bit-om.tar.gz + ``` + + ![](figures/615c16932ab3f51d914979dd.png) + + 在该目录下获取XML文件script/gspylib/etc/conf/cluster\_config\_template.xml,重命名为cluster\_config.xml放在/opt/software/openGauss/下,并将以下模板修改为自己的信息放入xml文件,第37行的15400表示设置了数据库的端口号,以下模板只需要更改两点:ip地址和主机名: + + ``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ``` + + 根据我的ip地址192.168.48.128和我的主机名vector更改之后文件内容如下: + + ``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ``` + + 执行以下命令准备安装环境: + + ``` + cd /opt/software/openGauss/script + ./gs_preinstall -U omm -G dbgrp -L -X /opt/software/openGauss/cluster_config.xml + ``` + + ![](figures/615c14052ab3f51d9145c394.png) + + 如上表示预安装成功! + +- 2.2 执行安装 + - 2.2.1 检查 + + 检查安装包和openGauss配置文件在规划路径下是否已存在,如果没有,重新执行预安装,确保预安装成功,再执行以下步骤。 + + - 2.2.2 切换用户 + + 登录到openGauss的主机,并切换到omm用户: + + ``` + su omm + ``` + + omm指的是前置脚本gs\_preinstall中-U参数指定的用户。 + + 安装脚本gs\_install必须以前置脚本中指定的omm执行,否则,脚本执行会报错。 + + - 2.2.3 安装 + + 使用gs\_install安装openGauss。 + + ``` + gs_install -X /opt/software/openGauss/cluster_config.xml + ``` + + /opt/software/openGauss/cluster\_config.xml为openGauss配置文件的路径。在执行过程中,用户需根据提示输入数据库的密码,密码具有一定的复杂度,为保证用户正常使用该数据库,请记住输入的数据库密码。 + + 设置的密码要符合复杂度要求: + + - 最少包含8个字符。 + - 不能和用户名、当前密码(ALTER)、或当前密码反序相同。 + - 至少包含大写字母(A-Z),小写字母(a-z),数字,非字母数字字符(限定为\~!@\#$%^&\*\(\)-\_=+|\[\{\}\];:,<.\>/?)四类字符中的三类字符。 + + ![](figures/615c14052ab3f51d9145c3a9.png) + + 执行如下命令检查数据库状态是否正常: + + ``` + gs_om -t status + ``` + + ![](figures/615c16932ab3f51d914979e7.png) + + cluster\_state 显示“Normal”表示数据库可正常使用。 + + ![](figures/615c14a32ab3f51d9146b96f.png) + + 如首次安装数据库不成功,则卸载后重新安装,卸载方式如下: + + ``` + gs_uninstall ‐‐delete‐data + ``` + + - 2.2.4 初始化数据库 + + 使用SQL语句创建数据库database时,指定数据库的字符集为GBK。 + + ``` + #后面跟的是端口号,我的是15400 + gsql -d postgres -p 15400 + ``` + + ``` + CREATE DATABASE mydb WITH ENCODING 'GBK' template = template0; + ``` + + ![](figures/615c14a32ab3f51d9146b994.png) + + 显示如下信息: + + ``` + CREATE DATABASE + ``` + + 创建schema: + + ``` + CREATE SCHEMA tpcds; + ``` + + 创建表: + + ``` + CREATE TABLE tpcds.warehouse_t1 + ( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2) + ); + ``` + + ![](figures/615ffa572ab3f51d91af9b67.jpg) + + 查看表信息: + + ![](figures/615ffb2b2ab3f51d91b0c00c.jpg) + + ``` + insert into tpcds.warehouse_t1(w_warehouse_sk,w_warehouse_id) values(12,'000001'); + insert into tpcds.warehouse_t1(w_warehouse_sk,w_warehouse_id) values(25,'000002'); + select w_warehouse_sk, w_warehouse_id from tpcds.warehouse_t1; + ``` + + 向数据库中添加数据之后查看: + + ![](figures/615ffbbb2ab3f51d91b187c6.jpg) + + 如果不知道自己的端口号,可根据以下方式查看: + + 查看自己的cluster\_config.xml文件,查看自己将端口号设置为了多少. + + 使用如下命令查看: + + ``` + gs_om -t status --detail + cd /opt/huawei/install/data/dn + ``` + + ![](figures/615c14a32ab3f51d9146b960.png) + + +- 2.3 JDBC连接数据库 + - 2.3.1 准备java环境 + + 查看centos的java环境,centos自带java1.8,需要安装配套的javac,注意要是1.8.0版。 + + ``` + yum install java-1.8.0-openjdk-devel.x86_64 + ``` + + 下载驱动包2.0.0版本[postgresql.jar](https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.0/x86/openGauss-2.0.0-JDBC.tar.gz),放在路径/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7\_9.x86\_64/jre/lib/ext下: + + ``` + cp postgresql.jar /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64/jre/lib/ext + ``` + + ![](figures/615c16f62ab3f51d914a1b92.png) + + 测试是否具备运行java代码的环境: + + ``` + java -version + javac -version + ``` + + ![](figures/615c16f62ab3f51d914a1ba8.png) + + 已具备运行环境! + + - 2.3.2 准备好连接的java代码 + + 记得替换成你设置的用户名、密码、端口号,如果你是按照我前面的操作,用户名应该是omm, + + ``` + import java.sql.Connection; + import java.sql.DriverManager; + import java.sql.PreparedStatement; + import java.sql.SQLException; + import java.sql.Statement; + import java.sql.CallableStatement; + + public class test{//keep + public static Connection getConnect(String username, String passwd) + { + //驱动类。 + String driver = "org.postgresql.Driver"; + //数据库连接描述符。将15400替换为自己的端口号 + String sourceURL = "jdbc:postgresql://127.0.0.1:15400/postgres"; + Connection conn = null; + + try + { + //加载驱动。 + Class.forName(driver); + } + catch( Exception e ) + { + e.printStackTrace(); + return null; + } + + try + { + //创建连接。 + conn = DriverManager.getConnection(sourceURL, username, passwd); + System.out.println("Connection succeed!"); + } + catch(Exception e) + { + e.printStackTrace(); + return null; + } + + return conn; + }; + + //try to connect + public static void main(String[] args) + { + // TODO Auto-generated method stub + Connection conn = getConnect("user", "password");//replace by my user and password + //BatchInsertData(conn); + try + { + conn.close(); + } + catch (SQLException e) + { + e.printStackTrace(); + } + } + } + ``` + + - 2.3.3 配置服务端远程连接 + + 以操作系统用户omm登录数据库。 + + 配置listen\_addresses,即远程客户端连接使用的数据库主节点ip或者主机名。 + + 使用如下命令查看数据库主节点目前的listen\_addresses配置。 + + ``` + gs_guc check -I all -c "listen_addresses" + ``` + + 使用如下命令把要查询出的ip追加到listen\_addresses后面,多个配置项之间用英文逗号分隔。例如,追加ip地址10.11.12.13。 + + ``` + gs_guc set -I all -c "listen_addresses='localhost,10.11.12.13'" + ``` + + 执行如下命令重启openGauss + + ``` + gs_om -t stop && gs_om -t start + ``` + + ![](figures/615c15482ab3f51d9147a2ba.png) + + - 2.3.4 连接 + + 首先需要启动数据库 + + ``` + su omm + gs_om -t start + ``` + + 运行java代码 + + ``` + javac test.java + java test + ``` + + ![](figures/615c15482ab3f51d9147a2b3.png) + + - 2.3.5 操纵数据 + + 使用如下java代码访问并对表中数据进行查询(记得替换用户、密码和端口): + + ``` + import java.sql.Connection; + import java.sql.DriverManager; + import java.sql.PreparedStatement; + import java.sql.SQLException; + import java.sql.Statement; + import java.sql.CallableStatement; + import java.sql.ResultSet; + import java.sql.SQLException; + + public class gausstest{//keep + public static Connection getConnect(String username, String passwd) + { + //驱动类。 + String driver = "org.postgresql.Driver"; + //数据库连接描述符。 + String sourceURL = "jdbc:postgresql://127.0.0.1:15400/postgres"; + Connection conn = null; + + try + { + //加载驱动。 + Class.forName(driver); + } + catch( Exception e ) + { + e.printStackTrace(); + return null; + } + + try + { + //创建连接。 + conn = DriverManager.getConnection(sourceURL, username, passwd); + System.out.println("Connection succeed!"); + } + catch(Exception e) + { + e.printStackTrace(); + return null; + } + + return conn; + }; + + //try to connect + public static void main(String[] args) throws SQLException + { + // TODO Auto-generated method stub + Connection conn = getConnect("user", "password");//replace by my user and password + //BatchInsertData(conn); + Statement st = conn.createStatement(); + String sql = "select w_warehouse_sk,w_warehouse_id from tpcds.warehouse_t1"; + ResultSet rs = st.executeQuery(sql); + while(rs.next()) { + int w_warehouse_sk = rs.getInt("w_warehouse_sk"); + String w_warehouse_id = rs.getString("w_warehouse_id"); + System.out.println("w_warehouse_sk = " + w_warehouse_sk + "; w_warehouse_id = " + w_warehouse_id); + } + try + { + conn.close(); + st.close(); + rs.close(); + } + catch (SQLException e) + { + e.printStackTrace(); + } + } + } + ``` + + ![](figures/615ffdad2ab3f51d91b42898.jpg) + + + +## 3. 遇到的问题 + +我感觉我把所有能遇到的问题都遇到了,最后成功是重装一遍,什么问题没遇到。 + +- 3.1 使用gs\_ctl提示找不到命令 + +如下图所示: + +![](figures/615c13152ab3f51d91446977.png) + +参看博客[Linux下解决命令未找到的问题 - ML。 - 博客园 \(cnblogs.com\)](https://www.cnblogs.com/mnote/p/8832806.html),对于本问题主要使用的命令是: + +``` +#进入管理员模式 +su +which gs_ctl +``` + +![](figures/615c16f62ab3f51d914a1b6d.png) + +接下来需要做的是把查找出的路径直接链接到/usr/bin下。操作如下: + +``` +ln -s xxx/xxx /usr/bin +``` + +以上xxx代表你查出来的路径。 + +![](figures/615c533b2ab3f51d91a72523.jpg) + +- 3.2 gs\_om命令找不到 + + 不得不说极简版安装包下没有gs\_om文件,我搜遍了也没有,在企业版中,我因为懒得重装把我同学下载的企业版中的gs\_之类的文件全拷过来了,但是后来遇到了其他问题,我又重装了,不知道我这个操作最终会带来什么影响。 + +- 3.3 sudo和su都用不了 + + sudo chmod -R 777 / 修改根目录权限问题修复,参考了[ 关于不小心777导致没法sudo权限后的修改解决办法\_空木格子的博客-CSDN博客](https://blog.csdn.net/qq_39543212/article/details/84107240) + + 我应该是因为sudo用不了提示sudo: must be setuid root,然后我进入根目录下修改了某个文件为777,直接导致su也用不了。这下好了,要用su让我先用sudo修改相关文件,要用sudo让我先用su修改文件! + + 解决这个问题需要先进入安全模式,进入方法为:在开机的过程中按shift或ESC键,好像在系统中按F1还是F2也可以。 + + 此时,已经进入到具有root权限的字符界面,输入以下命令解决了。 + + ``` + ls -l /usr/bin/sudo + chown root:root /usr/bin/sudo + chmod 4755 /usr/bin/sudo + ``` + +- 3.4 预安装失败 + + ![](figures/615c53892ab3f51d91a7b1e6.png) + + 本问题先参考了链接[openGaussDB 初体验(上) - 云+社区 - 腾讯云 \(tencent.com\)](https://cloud.tencent.com/developer/article/1675265)以下内容,但是没有解决。 + + ![](figures/615c14052ab3f51d9145c37e.png) + + 我解决这个问题的过程是这样的:找到虚拟网络编辑器,电脑连了自己的热点(我听我同学说她的用校园网就不行),然后还原默认设置: + + ![](figures/615c16f62ab3f51d914a1b7d.png) + + ![](figures/615c14a32ab3f51d9146b955.png) + + 然后配置了静态的ip地址,参考了[ CentOS 7 连接不到网络解决方法\(设置静态ip\)_gaokcl的博客-CSDN博客_centos7无法连接网络](https://blog.csdn.net/gaokcl/article/details/82834925?utm_medium=distribute.pc_relevant.none-task-blog-2~default~CTRLIST~default-2.no_search_link&depth_1-utm_source=distribute.pc_relevant.none-task-blog-2~default~CTRLIST~default-2.no_search_link)。但是神奇的是,这样就可以了。不过后来还是重装了。 + +- 3.5 重装openGauss时端口被占用 + + 报错:\[GAUSS-50601\] : The port \[15400\] is occupied or the ip address is incorrectl,有两种方法: + + 修改xml文件中的端口号 + + 杀掉占用端口的进程 + +- 3.6 右上角网络连接图标消失 + + 参考了[centos7右上角网络连接图标消失_shuest的博客-CSDN博客_centos7右上角没有网络图标](https://blog.csdn.net/zs391077005/article/details/106885104?utm_medium=distribute.pc_relevant.none-task-blog-2~default~CTRLIST~default-1.no_search_link&depth_1-utm_source=distribute.pc_relevant.none-task-blog-2~default~CTRLIST~default-1.no_search_link) + + ``` + chkconfig network off + chkconfig network on + service NetworkManager stop + service NetworkManager start + ``` + + 但是有可能遇到后两条命令用不了,然后又去查怎么办,最后也没解决,我重装了。累了累了。 + +- 3.7 循环显示登录界面无法进入 + + 看图吧,我最后又进安全模式解决的,最后修改/etc/selinux/config配置,将SELINUX选项由SELINUX=enforcing改成SELINUX=disabled,重启系统后发现就可以正常登陆系统了: + + ![](figures/615c14052ab3f51d9145c371.png) + + +- 3.8 Connection refused + + 首先需要启动数据库,不启动数据库会出现如下错误: + + 未设置服务端远程连接也会出现以上问题,见2.3.3 + + ![](figures/615c15482ab3f51d9147a2aa.png) + +- 3.9 加载驱动出现问题 + + 以下是开发流程: + + ![](figures/采用JDBC开发应用程序的流程.png) + + 驱动需要按照2.3.1所说,放在指定文件夹下,不然在加载驱动的时候会出现问题。 + + +- 3.10 unreported exception SQLException + + 在本地编译java服务的时候,编译报错:未报告的异常错误; 必须对其进行捕获或声明以便抛出。 + + ![](figures/615fff622ab3f51d91b644eb.jpg) + + 添加代码throw SQLException即可: + + ![](figures/615ffeef2ab3f51d91b5bb72.jpg) + + diff --git "a/content/zh/post/2022/\345\233\275\344\272\247\345\274\200\346\272\220\346\225\260\346\215\256\345\272\223openGauss\347\232\204\345\256\211\350\243\205\350\277\220\350\241\214.md" "b/content/zh/post/2022/\345\233\275\344\272\247\345\274\200\346\272\220\346\225\260\346\215\256\345\272\223openGauss\347\232\204\345\256\211\350\243\205\350\277\220\350\241\214.md" new file mode 100644 index 0000000000000000000000000000000000000000..0dfb1bd4d3d1f11a57815166552857383961676a --- /dev/null +++ "b/content/zh/post/2022/\345\233\275\344\272\247\345\274\200\346\272\220\346\225\260\346\215\256\345\272\223openGauss\347\232\204\345\256\211\350\243\205\350\277\220\350\241\214.md" @@ -0,0 +1,547 @@ ++++ + +title = "国产开源数据库openGauss的安装运行" + +date = "2021-12-27" + +tags = [ "国产开源数据库openGauss的安装运行"] + +archives = "2021-12" + +author = "adadaadadade" + +summary = "国产开源数据库openGauss的安装运行" + +img = "/zh/post/2022/title/img5.png" + +times = "12:30" + ++++ + +# 国产开源数据库openGauss的安装运行 + +## 步骤一:OpenGauss的安装 + +- 环境 + + OS:openEuler 20.03 64bit with ARM + + 架构:arm64 + + 部署:单机 + + +- 安装过程 + - 1、环境配置 + + 安装依赖包: + + ``` + yum install libaio-devel flex bison ncurses-devel glibc-devel patch readline-devel + ``` + + - 2、创建xml配置文件 + + 创建cluster\_config.xml配置文件并进行配置 + + ``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ``` + + 注意节点hostname应与/etc/hostname中保持一致 + + - 3、初始化安装环境 + + 1.以root用户登录待安装openGauss的任意主机,并按规划创建存放安装包的目录。 + + ``` + mkdir -p /opt/software/openGauss + chmod 755 -R /opt/software + ``` + + 2.下载安装包并将配置文件“cluster\_config.xml”都上传至上一步所创建的目录中。 + + ``` + wget https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.1/arm/openGauss-2.0.1-openEuler-64bit-all.tar.gz + ``` + + 3.解压安装包。 + + ``` + tar -zxvf openGauss-2.0.1-openEuler-64bit-all.tar.gz + tar jxvf openGauss-2.0.1-openEuler-64bit.tar.bz2 + tar -zxvf openGauss-2.0.1-openEuler-64bit-om.tar.gz + tar -zxvf upgrade_sql.tar.gz + ``` + + 4.进入到工具脚本存放目录下。 + + ``` + cd /opt/software/openGauss/script + ``` + + 5.如果是openEuler的操作系统为确保适配python版本,执行如下命令打开gspylib/common/CheckPythonVersion.py文件,将if not pythonVersion = = \(3, 6\):修改为if not pythonVersion \> = \(3, 6\):,键入“ESC”键进入指令模式,执行\*\*:wq\*\*保存并退出修改。(我在实际操作中进入后发现无需修改) + + ``` + vi gspylib/common/CheckPythonVersion.py + ``` + + 6.如果是openEuler的操作系统,执行如下命令打开performance.sh文件,用\#注释sysctl -w vm.min\_free\_kbytes=112640 &\> /dev/null,键入“ESC”键进入指令模式,执行\*\*:wq\*\*保存并退出修改。 + + ``` + vi /etc/profile.d/performance.sh + ``` + + 7.为确保openssl版本正确,执行预安装前请加载安装包中lib库。执行命令如下,其中\_\{packagePath\}\_为用户安装包放置的路径,本示例中为/opt/software/openGauss。 + + ``` + export LD_LIBRARY_PATH=/opt/software/openGauss/script/gspylib/clib:$LD_LIBRARY_PATH + ``` + + 8.为确保成功安装,检查 hostname 与 /etc/hostname 是否一致。预安装过程中,会对hostname进行检查。 + + 9.使用gs\_preinstall准备好安装环境。若为共用环境需加入–sep-env-file=ENVFILE参数分离环境变量,避免与其他用户相互影响,ENVFILE为用户自行指定的环境变量分离文件的路径。 + + 采用交互模式执行前置,并在执行过程中自动创建操作系统root用户互信和omm用户互信: + + ./gs\_preinstall -U omm -G dbgrp -X /opt/software/openGauss/cluster\_config.xml + + 在执行中会要求输入omm用户的密码。 + + 运行结果应类似: + + ``` + plat1:/opt/software/openGauss/script # ./gs_preinstall -U omm -G dbgrp -X /opt/software/openGauss/cluster_config.xml + Parsing the configuration file. + Successfully parsed the configuration file. + Installing the tools on the local node. + Successfully installed the tools on the local node. + Are you sure you want to create trust for root (yes/no)? yes + Please enter password for root. + Password: + Creating SSH trust for the root permission user. + Checking network information. + All nodes in the network are Normal. + Successfully checked network information. + Creating SSH trust. + Creating the local key file. + Successfully created the local key files. + Appending local ID to authorized_keys. + Successfully appended local ID to authorized_keys. + Updating the known_hosts file. + Successfully updated the known_hosts file. + Appending authorized_key on the remote node. + Successfully appended authorized_key on all remote node. + Checking common authentication file content. + Successfully checked common authentication content. + Distributing SSH trust file to all node. + Successfully distributed SSH trust file to all node. + Verifying SSH trust on all hosts. + Successfully verified SSH trust on all hosts. + Successfully created SSH trust. + Successfully created SSH trust for the root permission user. + Setting pssh path + Successfully set core path. + Distributing package. + Begin to distribute package to tool path. + Successfully distribute package to tool path. + Begin to distribute package to package path. + Successfully distribute package to package path. + Successfully distributed package. + Are you sure you want to create the user[omm] and create trust for it (yes/no)? yes + Please enter password for cluster user. + Password: + Please enter password for cluster user again. + Password: + Successfully created [omm] user on all nodes. + Preparing SSH service. + Successfully prepared SSH service. + Installing the tools in the cluster. + Successfully installed the tools in the cluster. + Checking hostname mapping. + Successfully checked hostname mapping. + Creating SSH trust for [omm] user. + Checking network information. + All nodes in the network are Normal. + Successfully checked network information. + Creating SSH trust. + Creating the local key file. + Successfully created the local key files. + Appending local ID to authorized_keys. + Successfully appended local ID to authorized_keys. + Updating the known_hosts file. + Successfully updated the known_hosts file. + Appending authorized_key on the remote node. + Successfully appended authorized_key on all remote node. + Checking common authentication file content. + Successfully checked common authentication content. + Distributing SSH trust file to all node. + Successfully distributed SSH trust file to all node. + Verifying SSH trust on all hosts. + Successfully verified SSH trust on all hosts. + Successfully created SSH trust. + Successfully created SSH trust for [omm] user. + Checking OS software. + Successfully check os software. + Checking OS version. + Successfully checked OS version. + Creating cluster's path. + Successfully created cluster's path. + Setting SCTP service. + Successfully set SCTP service. + Set and check OS parameter. + Setting OS parameters. + Successfully set OS parameters. + Preparing CRON service. + Successfully prepared CRON service. + Setting user environmental variables. + Successfully set user environmental variables. + Setting the dynamic link library. + Successfully set the dynamic link library. + Setting Core file + Successfully set core path. + Setting pssh path + Successfully set pssh path. + Set ARM Optimization. + No need to set ARM Optimization. + Fixing server package owner. + Setting finish flag. + Successfully set finish flag. + Preinstallation succeeded. + ``` + + - 4、执行安装 + + 内存小于安装要求的32G应该做一些配置修改: + + ``` + # vim /etc/sysctl.conf + kernel.shmall = 1125899906842624 + kernel.shmmax = 1351079888211149 + + # vim /opt/huawei/install/data/db1/postgresql.conf + cstore_buffers=16MB + bulk_write_ring_size=128MB + shared_buffers=128MB + max_process_memory=2GB + max_connections=10 + ``` + + 切换到omm用户下执行安装: + + ``` + su - omm + gs_install -X /opt/software/openGauss/cluster_config.xml + ``` + + + +## 步骤二 数据库的简单链接与使用 + +- 1、创建新用户,新数据库并赋予权限 + + 使用gsql 用omm 管理账号登陆,创建新用户jack,创建新数据库testjack,赋予权限,执行 + + ``` + CREATE USER jack PASSWORD 'Gaussdba@Mpp'; + CREATE DATABASE testjack OWNER jack; + GRANT SELECT ON pg_catalog.pg_roles to jack; + GRANT SELECT ON pg_catalog.pg_user_status to jack; + GRANT ALL PRIVILEGES on TABLESPACE pg_default,pg_global TO jack; + ``` + + 然后退出,使用jack用户登录gsql + + ``` + gsql -U jack -d testjack -p "Gaussdba@Mpp" + ``` + + 创建 SCHEMA + + ``` + CREATE SCHEMA jack AUTHORIZATION jack; + ``` + + 退出gsql,赋予jack权限,这里client\_address是客户端的地址 + + ``` + gs_guc set -N all -I all -h "host all jack client_address/32 sha256 + ``` + + 或者也可以修改pg\_hba.conf,添加 + + ``` + host all jack client_address/32 sha256 + ``` + +- 2、允许客户端访问数据库 + + 执行,这里的client\_address是要客户端的地址, listen\_addresses是参数名。 + + ``` + gs_guc set -I all -c "listen_addresses='client_address'" + ``` + + 或在使用omm账号在gsql中 + + ``` + ALTER SYSTEM SET listen_addresses TO "client_address"; + ``` + + 之后重启数据库 + +- 3、关闭防火墙,打开端口 +- 4、使用Data Studio 访问数据库 + + 可在opengauss官网下载DATA STUDIO应用 + + 填入对应参数,注意这里应去掉启用SSL的选项,因为SSL还需要配置证书或密钥。 + + 连接后的界面 + +- 5、使用JDBC访问数据库 + + 我这里使用windows系统作为客户端连接数据库。 + + 在opengauss网站下载对应的JDBC包,并解压。 + + 创建Gauss.java文件 + + ``` + import java.sql.Connection; + import java.sql.DriverManager; + import java.sql.PreparedStatement; + import java.sql.ResultSet; + import java.sql.ResultSetMetaData; + import java.sql.SQLException; + import java.sql.Statement; + import java.sql.CallableStatement; + import java.sql.Types; + import java.util.Collections; + import java.util.Properties; + + public class Gauss { + + public static void main(String[] args) { + Connection connection; + ResultSet resultSet; + String url = "jdbc:postgresql://address:port/testjack"; //address 地址 port 端口 testjack 数据库名 + String user = "××××"; // 数据库用户名 + String password = "××××"; // 对应密码 + String sql; + if(args.length > 0) + { + sql = args[0]; + } + else + { + System.out.println("输入一条sql语句"); + return; + } + + if ((connection = getConnect(user, password, url)) != null) { + System.out.println(connection.toString()); + } + + if ((resultSet = execSql(connection, sql)) != null) + { + + + } + } + + // 以下代码将获取数据库连接操作封装为一个接口,可通过给定用户名和密码来连接数据库。 + public static Connection getConnect(String username, String passwd, String url) { + // 驱动类。 + String driver = "org.postgresql.Driver"; + // 数据库连接描述符。 + String sourceURL = url; + Connection conn = null; + + try { + // 加载驱动。 + Class.forName(driver); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + + try { + // 创建连接。 + conn = DriverManager.getConnection(sourceURL, username, passwd); + System.out.println("Connection succeed!"); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + + return conn; + }; + + // 以下代码将使用Properties对象作为参数建立连接 + public static Connection getConnectUseProp(String username, String passwd, String url) { + // 驱动类。 + String driver = "org.postgresql.Driver"; + // 数据库连接描述符。 + String sourceURL = url + "?"; + Connection conn = null; + Properties info = new Properties(); + + try { + // 加载驱动。 + Class.forName(driver); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + + try { + info.setProperty("user", username); + info.setProperty("password", passwd); + // 创建连接。 + conn = DriverManager.getConnection(sourceURL, info); + System.out.println("Connection succeed!"); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + + return conn; + }; + + public static ResultSet execSql(Connection conn, String sql) { + Statement stmt = null; + ResultSet rs = null; + SQLWarning sqlw = null; + try { + stmt = conn.createStatement(); + // 执行普通SQL语句。 + stmt.execute(sql); + if((sqlw = stmt.getWarnings()) != null) + System.out.println(sqlw.toString()); + if((rs = stmt.getResultSet()) != null) + printResultSet(rs); + + stmt.close(); + } catch (SQLException e) { + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } + e.printStackTrace(); + } + return rs; + } + + + private static void printResultSet(ResultSet rs) + { + String line = ""; + try { + ResultSetMetaData rsmd = rs.getMetaData(); + for(int i = 1; i <= rsmd.getColumnCount(); i ++) + { + String label = rsmd.getColumnLabel(i).toString(); + System.out.print(label + "\t"); + line += String.join("", Collections.nCopies(label.length(), "-")) + "\t"; + } + System.out.println("\n" + line); + + while(rs.next()) + { + for(int i = 1; i <= rsmd.getColumnCount(); i ++) + { + System.out.print(rs.getObject(i).toString() + "\t"); + } + System.out.println(""); + + } + } catch (Exception e) { + e.printStackTrace(); + } + } + + } + ``` + + 编译 + + ``` + javac .\Gauss.java -encoding "utf-8" + ``` + + 运行,我这里将postgresql.jar放在同一目录下,创建一个表nt作为测试 + + ``` + java -cp ".;postgresql.jar" Gauss "CREATE TABLE nt(id INTEGER, name VARCHAR(20))" + java -cp ".;postgresql.jar" Gauss "INSERT into nt(id, name) VALUES (1,'n1'),(2,'n2'),(3,'n3');" + java -cp ".;postgresql.jar" Gauss "SELECT * FROM nt;" + ``` + + 最后一句输出结果为,可以看到成功进行了连接和操作。 + + ``` + 九月 13, 2021 11:58:25 上午 org.postgresql.core.v3.ConnectionFactoryImpl openConnectionImpl + 信息: [75000bb7-1475-4579-94cb-f53a01bec9eb] Try to connect. IP: *.*.*.*:**** + 九月 13, 2021 11:58:26 上午 org.postgresql.core.v3.ConnectionFactoryImpl openConnectionImpl + 信息: [*.*.*.*:****/*.*.*.*:****] Connection is established. ID: 75000bb7-1475-4579-94cb-f53a01bec9eb + 九月 13, 2021 11:58:26 上午 org.postgresql.core.v3.ConnectionFactoryImpl openConnectionImpl + 信息: Connect complete. ID: 75000bb7-1475-4579-94cb-f53a01bec9eb + Connection succeed! + id name + -- ---- + 1 n1 + 2 n2 + 3 n3 + ``` + + diff --git "a/content/zh/post/2022/\345\237\272\344\272\216openGauss\345\255\246\344\271\240Docker.md" "b/content/zh/post/2022/\345\237\272\344\272\216openGauss\345\255\246\344\271\240Docker.md" new file mode 100644 index 0000000000000000000000000000000000000000..a95c4c20775eb13c02216c8a41f79983428a2705 --- /dev/null +++ "b/content/zh/post/2022/\345\237\272\344\272\216openGauss\345\255\246\344\271\240Docker.md" @@ -0,0 +1,700 @@ ++++ + +title = "基于openGauss学习Docker" + +date = "2022-01-07" + +tags = [ "基于openGauss学习Docker"] + +archives = "2022-01" + +author = "张玉龙 " + +summary = "基于openGauss学习Docker" + +img = "/zh/post/2022/title/img7.png" + +times = "12:30" + ++++ + +# 基于openGauss学习Docker + +学习了一些开源产品,在潜移默化的发现,是时候该学习下 Docker 了,很多产品都制作了 Docker 镜像,想测试这些产品的时候,使用 Docker 安装就会显得特别方便。 + +## 简单介绍 + +openGauss 是一款高性能、高安全、高可靠的企业级开源关系型数据库。 + +opengauss 在开源后,云和恩墨第一时间制造了docker版本。 + +![](figures/20211204-10f40098-2578-4da8-83c9-dd493f7d3111.png) + +Docker 是基于 Go 语言开发的,开源项目 + +- 官网: [https://www.docker.com/](https://www.docker.com/) +- 文档: [https://docs.docker.com/](https://docs.docker.com/) +- 仓库: [https://hub.docker.com/](https://hub.docker.com/) + +![](figures/20211204-17ff081b-5a00-4c19-974a-69a531902983.png) + +## 安装 Docker + +需要注意 Docker 支持的平台:[https://docs.docker.com/engine/install/](https://docs.docker.com/engine/install/) + +![](figures/20211204-00e0901d-e71f-46d3-95ed-9e14cb28b1ac.png) + +![](figures/20211204-32f1f188-106d-4627-8b7c-c939ddcb1c59.png) + +我的测试环境是 CentOS 7.6 + +``` +# 1. Uninstall old versions 卸载旧版本 +yum remove docker \ + docker-client \ + docker-client-latest \ + docker-common \ + docker-latest \ + docker-latest-logrotate \ + docker-logrotate \ + docker-engine + +# 2. 需要的安装包,默认是国外的地址,很慢,这里使用阿里云的 +yum install -y yum-utils + +# 3. 设置镜像的仓库 +yum-config-manager \ + --add-repo \ + https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/docker-ce.repo + +# 4. 更新软件包的索引 +yum makecache fast + +# 5. 安装 dokcer docker-ce是指社区版 +yum install -y docker-ce docker-ce-cli containerd.io +``` + +安装的时候遇到一个小插曲,安装了python3没有修改yum-config-manager文件 + +``` +[root@mogdb ~]# yum-config-manager \ +> --add-repo \ +> https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/docker-ce.repo + File "/usr/bin/yum-config-manager", line 135 + except yum.Errors.RepoError, e: + ^ +SyntaxError: invalid syntax + +[root@mogdb ~]# which yum-config-manager +/usr/bin/yum-config-manager +[root@mogdb ~]# vi /usr/bin/yum-config-manager # 换成 python2 +#!/usr/bin/python2 -tt + +[root@mogdb ~]# yum-config-manager \ + --add-repo \ + https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/docker-ce.repo + +Loaded plugins: fastestmirror +adding repo from: https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/docker-ce.repo +grabbing file https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.repo +repo saved to /etc/yum.repos.d/docker-ce.repo +``` + +启动并测试 Docker + +``` +# 启动dokcer +[root@mogdb ~]# systemctl start docker +[root@mogdb ~]# docker version +Client: Docker Engine - Community + Version: 20.10.11 + API version: 1.41 + Go version: go1.16.9 + Git commit: dea9396 + Built: Thu Nov 18 00:38:53 2021 + OS/Arch: linux/amd64 + Context: default + Experimental: true + +Server: Docker Engine - Community + Engine: + Version: 20.10.11 + API version: 1.41 (minimum version 1.12) + Go version: go1.16.9 + Git commit: 847da18 + Built: Thu Nov 18 00:37:17 2021 + OS/Arch: linux/amd64 + Experimental: false + containerd: + Version: 1.4.12 + GitCommit: 7b11cfaabd73bb80907dd23182b9347b4245eb5d + runc: + Version: 1.0.2 + GitCommit: v1.0.2-0-g52b36a2 + docker-init: + Version: 0.19.0 + GitCommit: de40ad0 + +# 测试 hello-world +[root@mogdb ~]# docker run hello-world + +# 查看下载的 hello-world 镜像 +[root@mogdb ~]# docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +hello-world latest feb5d9fea6a5 2 months ago 13.3kB +``` + +## 卸载 Docker + +``` +# 1. 卸载 +yum remove docker-ce docker-ce-cli containerd.io +# 2. 删除 +rm -rf /var/lib/docker +rm -rf /var/lib/containerd +``` + +## 配置阿里云镜像加速 + +``` +mkdir -p /etc/docker +tee /etc/docker/daemon.json <<-'EOF' +{ + "registry-mirrors": ["https://xe6vk78x.mirror.aliyuncs.com"] +} +EOF +systemctl daemon-reload +systemctl restart docker +``` + +## Docker为什么比 VM 快 + +![](figures/20211204-c6e730da-357c-4f03-92d7-95d47f015284.png) + +Docker 有着比虚拟机更少的抽象层 + +Docker 利用的是宿主机的内核,vm需要虚拟机操作系统 + +## Docker的常用命令 + +![](figures/20211204-a73fa928-786e-406e-8289-c87c4275ab5f.png) + +## 基于openGauss 学习这些命令 + +- 基础命令 + + ``` + docker version # 显示 docker 的版本信息 + docker info # 显示 docker 的系统信息,包括镜像和容器的数量 + docker command --help # 帮助 + ``` + +- 镜像命令 + - 搜索镜像,搜索 opengauss 的镜像 + + ``` + [root@mogdb ~]# docker search opengauss + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + enmotech/opengauss openGauss latest images created by Enmotech 12 + ... ... + # 可选项 + --filter=STARS=3000 # 搜索出来的镜像就是STARS(收藏数)大于3000的 + ``` + + - 下载镜像, 下载 opengauss 的镜像 + + ``` + # Usage: docker pull [OPTIONS] NAME[:TAG|@DIGEST] + [root@mogdb ~]# docker pull enmotech/opengauss + Using default tag: latest # 如果不写tag, 默认就是latest + latest: Pulling from enmotech/opengauss + 284055322776: Pull complete # 分成下载,docker images的核心,联合文件系统 + a7ca82b898d7: Pull complete + 2f93c23d8eb5: Pull complete + 3842013b7685: Pull complete + 6bc7e92855e3: Pull complete + 39c9c4e5b487: Pull complete + 1f9d76df94b5: Pull complete + 44db1c59ef84: Pull complete + 63ab02376fd3: Pull complete + cf751b0b3be9: Pull complete + 9dc428e2c8b4: Pull complete + Digest: sha256:d5a3e38fa2553a44e7fa1cd5cad0b4f0845a679858764067d7b0052a228578a0 # 签名 + Status: Downloaded newer image for enmotech/opengauss:latest + docker.io/enmotech/opengauss:latest # 真实地址 + + # 指定版本下载 + [root@mogdb ~]# docker pull enmotech/opengauss:2.0.1 + ``` + + - 查看镜像 + + ``` + [root@mogdb ~]# docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + enmotech/opengauss latest b4dd24d09223 2 months ago 383MB + enmotech/opengauss 2.0.1 c3860afd8014 3 months ago 404MB + + # 可选项 + -a, --all Show all images (default hides intermediate images) + -q, --quiet Only show image IDs + ``` + + - 删除镜像 + + ``` + [root@mogdb ~]# docker rmi -f c3860afd8014 + Untagged: enmotech/opengauss:2.0.1 + Untagged: enmotech/opengauss@sha256:d156596b2900f7eda102aadfd951daad97412b610b96d3dd97d2cdd9d5b70024 + Deleted: sha256:c3860afd80148a6cfbb50269ef47f87257f2ed3fbf143f52b861303b98834833 + Deleted: sha256:193b45dffb62df01fa6c74bef9cf21774fdd550c5995f02bef28f30070db6859 + Deleted: sha256:32ba36efbf27ac2f485d1915fea35ec2a17c9d1b19d373d1edf49fd0f4b6a8de + Deleted: sha256:ca0fd6097e9cf0aae5a1d5047f9b6bda30305a13396313b5cd021530be69bc9d + Deleted: sha256:5d7f04d4882448ed954afc8d797069f3aede8ccc65a650086b9436f75fa11700 + Deleted: sha256:40a09fca023bf0d0353606c2684ba47d73979ffc6cae2dd4a4953d5796c8cb0d + Deleted: sha256:8828e1e7978fba035a5305d8684b94ed322842ed095eb46bffcdef17ad2e091a + Deleted: sha256:e7c2553c8389d79197d6c3ba7c731292cd772588d252683cf706cb660c6e46f0 + Deleted: sha256:d2292dd078208e84e70124123ffc4ebac5c304816a753db61da04f1e7d8a3663 + Deleted: sha256:8d78df12722212e140ae7ba4441c7f9a36365074779b6ca880f097c6e237f9e3 + Deleted: sha256:4f785c07c19d588e80313fc0ee644a19ac6e17a550a9e694c22babc355152367 + Deleted: sha256:21639b09744fc39b4e1fe31c79cdf54470afe4d7239a517c4060bd181f8e3039 + + # 通过ID删除全部容器 + [root@mogdb ~]# docker rmi -f $(docker images -aq) + ``` + + + +- 容器命令 + - 新建容器并启动 + + ``` + # Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] + # 参数说明 + --name # 容器名字,区分容器 + --privileged=true # 使用该参数,容器内的 root 拥有真正的 root 权限。 + -d # 后台方式运行 + -e # 设置环境变量 + -it # 使用交互方式运行,进入容器查看内容 + -p(小) # 容器和宿主机的端口映射 + -p ip:主机端口:容器端口 + -p 主机端口:容器端口 (常用) + -p 容器端口 + -P(大) # 随机指定端口 + + # 新建 opengauss 的容器并后台启动,配置密码 Enmo@123,端口映射 + # docker run --name opengauss --privileged=true -d -e GS_PASSWORD=Enmo@123 -p 5432:5432 enmotech/opengauss:latest + ad8892ff8b45fc3329ed76afd634de136ec7b67fb2ba02221a0ee8886ee932b8 + ``` + + - 列出所有运行的容器 + + ``` + # docker ps + -a # 列出当前正在运行的容器 + 历史运行过的容器 + -n=? # 显示最近创建的容器 + -q # 只显示容器的编号 + + [root@mogdb ~]# docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + ad8892ff8b45 enmotech/opengauss:latest "entrypoint.sh gauss…" 5 minutes ago Up 5 minutes 0.0.0.0:5432->5432/tcp, :::5432->5432/tcp opengauss + ``` + + - 启停容器 + + ``` + docker start 容器id + docker restart 容器id + docker stop 容器id + docker kill 容器id + ``` + + - 进入当前正在运行的容器 + + ``` + docker exec -it 容器id /bin/bash # 进入容器后开启一个新的终端,可以在里面操作(常用) + docker attach 容器id # 进入容器正在执行的终端,不会启动新的进程 + + # 进入 opengauss 的容器内,在数据库中创建普通用户,测试外部连接 + [root@mogdb ~]# docker exec -it ad8892ff8b45 /bin/bash + root@ad8892ff8b45:/# su - omm + omm@ad8892ff8b45:~$ gsql + gsql ((openGauss 2.1.0 build 590b0f8e) compiled at 2021-09-30 14:29:04 commit 0 last mr ) + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + + omm=# CREATE USER tpcc_usr WITH PASSWORD "tpcc@1234"; + NOTICE: The encrypted password contains MD5 ciphertext, which is not secure. + CREATE ROLE + omm=# alter user tpcc_usr sysadmin; + ALTER ROLE + omm=# GRANT ALL ON schema public TO tpcc_usr; + GRANT + ``` + + ![](figures/20211204-fc1c14b8-f666-4600-b21e-b73aec582740.png) + + - 退出容器 + + ``` + exit # 退出并停止容器,后台运行的容器不会停止 + Ctrl + P + Q # 容器不停止的退出 + ``` + + - 删除容器 + + ``` + docker rm 容器ID # 删除指定的容器,不能删除正在运行的容器,如果强制删除 rm -f + docker rm -f $(docker ps -aq) # 删除所有容器 + docker ps -aq|xargs docker rm # 删除所有容器 + ``` + + +- 其他命令 + - 查看日志 + + ``` + [root@mogdb ~]# docker logs -f -t --tail 10 ad8892ff8b45 + 2021-12-04T12:24:31.809995352Z 2021-12-04 12:24:31.809 [unknown] [unknown] localhost 140460925998016 0[0:0#0] 0 [BACKEND] LOG: the configure file /usr/local/opengauss/etc/gscgroup_omm.cfg doesn't exist or the size of configure file has changed. Please create it by root user! + 2021-12-04T12:24:31.810007421Z 2021-12-04 12:24:31.809 [unknown] [unknown] localhost 140460925998016 0[0:0#0] 0 [BACKEND] LOG: Failed to parse cgroup config file. + 2021-12-04T12:24:31.831906329Z 2021-12-04 12:24:31.831 [unknown] [unknown] localhost 140460925998016 0[0:0#0] 0 [EXECUTOR] WARNING: Failed to obtain environment value $GAUSSLOG! + 2021-12-04T12:24:31.831931488Z 2021-12-04 12:24:31.831 [unknown] [unknown] localhost 140460925998016 0[0:0#0] 0 [EXECUTOR] DETAIL: N/A + 2021-12-04T12:24:31.831934584Z 2021-12-04 12:24:31.831 [unknown] [unknown] localhost 140460925998016 0[0:0#0] 0 [EXECUTOR] CAUSE: Incorrect environment value. + 2021-12-04T12:24:31.831936999Z 2021-12-04 12:24:31.831 [unknown] [unknown] localhost 140460925998016 0[0:0#0] 0 [EXECUTOR] ACTION: Please refer to backend log for more details. + 2021-12-04T12:24:31.833046968Z 2021-12-04 12:24:31.832 [unknown] [unknown] localhost 140460925998016 0[0:0#0] 0 [EXECUTOR] WARNING: Failed to obtain environment value $GAUSSLOG! + 2021-12-04T12:24:31.833057677Z 2021-12-04 12:24:31.832 [unknown] [unknown] localhost 140460925998016 0[0:0#0] 0 [EXECUTOR] DETAIL: N/A + 2021-12-04T12:24:31.833060758Z 2021-12-04 12:24:31.832 [unknown] [unknown] localhost 140460925998016 0[0:0#0] 0 [EXECUTOR] CAUSE: Incorrect environment value. + 2021-12-04T12:24:31.833063164Z 2021-12-04 12:24:31.832 [unknown] [unknown] localhost 140460925998016 0[0:0#0] 0 [EXECUTOR] ACTION: Please refer to backend log for more details. + ``` + + - 查看容器内的进程信息 + + ``` + [root@mogdb ~]# docker top ad8892ff8b45 + UID PID PPID C STIME TTY TIME CMD + 70 26782 26762 2 20:24 ? 00:00:21 gaussdb + ``` + + - 查询所有容器的资源使用信息 + + ``` + [root@mogdb ~]# docker stats + CONTAINER ID NAME CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O PIDS + ad8892ff8b45 opengauss 2.65% 374.9MiB / 7.62GiB 4.80% 10.2kB / 58.2kB 6.77MB / 38.1MB 34 + ``` + + - 查看容器的元数据 + + ``` + [root@mogdb ~]# docker inspect ad8892ff8b45 + ``` + + - 从容器内复制文件到宿主机上 + + ``` + docker cp 容器id:PWD/file /home + ``` + + + +## Docker镜像加载原理 + +[https://blog.csdn.net/pjsdsg/article/details/90445128](https://blog.csdn.net/pjsdsg/article/details/90445128) + +- 容器的快照 commit + + ``` + docker commit # 保存容器成为一个新的副本 + docker commit -m="提交的描述信息" -a="作者” 容器id 目标镜像名:[TAG] + ``` + + 测试 commit,前面为了测试外部连接数据库,在数据库中创建了一个普通用户 tpcc\_usr ,下面保存这个容器成为一个新的副本 + + ``` + [root@mogdb ~]# docker commit -a="create database user tpcc" ad8892ff8b45 opengauss:1.0 + sha256:1e760f8f2f3ddf707cb661bdd8549728bdb0ecd83d1166c6f9f350880464c654 + [root@mogdb ~]# docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + opengauss 1.0 1e760f8f2f3d 15 seconds ago 1.21GB #<<<<<<< + enmotech/opengauss latest b4dd24d09223 2 months ago 383MB + ``` + + 使用新创建的副本来新建一个容器,查看用户是否存在 + + ``` + # 容器名opengauss2,端口映射为5433,使用镜像ID是1e760f8f2f3d + [root@mogdb ~]# docker run --name opengauss2 --privileged=true -d -e GS_PASSWORD=Enmo@123 -p 5433:5432 1e760f8f2f3d + 0a1c49aaa9114f19e33fef20753be092f923ffe558aa1d4251c55d3948dff486 + [root@mogdb ~]# docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 0a1c49aaa911 1e760f8f2f3d "entrypoint.sh gauss…" 8 seconds ago Up 8 seconds 0.0.0.0:5433->5432/tcp, :::5433->5432/tcp opengauss2 #<<<<<<< + ad8892ff8b45 enmotech/opengauss:latest "entrypoint.sh gauss…" 46 minutes ago Up 37 minutes 0.0.0.0:5432->5432/tcp, :::5432->5432/tcp opengauss + + [root@mogdb ~]# docker exec -it 0a1c49aaa911 /bin/bash + root@0a1c49aaa911:/# su - omm + omm@0a1c49aaa911:~$ gsql + gsql ((openGauss 2.1.0 build 590b0f8e) compiled at 2021-09-30 14:29:04 commit 0 last mr ) + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + + omm=# \du + List of roles + Role name | Attributes | Member of + -----------+------------------------------------------------------------------------------------------------------------------+----------- + gaussdb | Sysadmin | {} + omm | Sysadmin, Create role, Create DB, Replication, Administer audit, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} + tpcc_usr | Sysadmin | {} + + omm=# + ``` + + tpcc\_usr 用户存在,使用外部工具测试连接 + + ![](figures/20211204-ba7b78a2-3978-45b4-b868-61334e4087f2.png) + +- 容器数据卷,持久化数据 + + ![](figures/20211204-f08b84a5-2be7-4bc4-826a-397c9ad77d79.png) + + 查看 openGauss 的数据文件路径 + + ``` + omm@0a1c49aaa911:~$ gsql + gsql ((openGauss 2.1.0 build 590b0f8e) compiled at 2021-09-30 14:29:04 commit 0 last mr ) + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + + omm=# show data_directory; + data_directory + ------------------------- + /var/lib/opengauss/data + (1 row) + ``` + + 指定路径挂载 + + ``` + # 指定路径挂载,ro只读,rw读写,设置 ro ,只能在宿主机上执行写操作,容器内部是只读的 + docker run -it -v 主机目录:容器内目录:ro/rw + + # 测试 + # mkdir /root/data + docker run --name opengauss03 --privileged=true -d \ + -e GS_PASSWORD=Enmo@123 -p 5434:5432 \ + -v /root/data:/var/lib/opengauss/data \ + 1e760f8f2f3d + + ee6e88d47dd90c7efbae1d85e33cedee3d649e33518a64f3ae99d6b8575bffb8 + + [root@mogdb ~]# docker inspect ee6e88d47dd9 + + "Mounts": [ + { + "Type": "bind", + "Source": "/root/data", + "Destination": "/var/lib/opengauss/data", + "Mode": "", + "RW": true, + "Propagation": "rprivate" + } + ], + + [root@mogdb ~]# cd /root/data/ + [root@mogdb data]# ll + total 4900 + drwx------ 3 70 70 21 Dec 4 11:51 asp_data + drwx------ 7 70 70 71 Dec 4 11:51 base + -rw------- 1 70 70 72 Dec 4 21:16 gaussdb.state + drwx------ 2 70 70 4096 Dec 4 21:17 global + -rw------- 1 70 70 354 Dec 4 11:50 gs_gazelle.conf + drwx------ 3 70 70 21 Dec 4 11:51 gs_profile + -rw------- 1 70 70 4915200 Dec 4 11:51 gswlm_userinfo.cfg + -rw------- 1 70 70 20238 Dec 4 11:51 mot.conf + drwx------ 3 70 70 50 Dec 4 11:51 pg_audit + drwx------ 2 70 70 26 Dec 4 11:50 pg_clog + drwx------ 2 70 70 26 Dec 4 11:50 pg_csnlog + -rw------- 1 70 70 0 Dec 4 11:51 pg_ctl.lock + drwx------ 2 70 70 6 Dec 4 11:50 pg_errorinfo + -rw------- 1 70 70 4553 Dec 4 11:51 pg_hba.conf + -rw------- 1 70 70 1636 Dec 4 11:50 pg_ident.conf + drwx------ 4 70 70 39 Dec 4 11:50 pg_llog + drwx------ 2 70 70 6 Dec 4 11:50 pg_location + drwx------ 2 70 70 126 Dec 4 21:16 pg_log + drwx------ 4 70 70 36 Dec 4 11:50 pg_multixact + drwx------ 2 70 70 26 Dec 4 21:16 pg_notify + drwx------ 3 70 70 21 Dec 4 11:51 pg_perf + drwx------ 2 70 70 6 Dec 4 11:50 pg_replslot + drwx------ 2 70 70 6 Dec 4 11:50 pg_serial + drwx------ 2 70 70 6 Dec 4 11:50 pg_snapshots + drwx------ 2 70 70 25 Dec 4 22:10 pg_stat_tmp + drwx------ 2 70 70 6 Dec 4 11:50 pg_tblspc + drwx------ 2 70 70 6 Dec 4 11:50 pg_twophase + -rw------- 1 70 70 4 Dec 4 11:50 PG_VERSION + drwx------ 3 70 70 4096 Dec 4 11:51 pg_xlog + -rw------- 1 70 70 31669 Dec 4 11:51 postgresql.conf + -rw------- 1 70 70 1024 Dec 4 11:50 postgresql.conf.lock + -rw------- 1 70 70 33 Dec 4 21:16 postmaster.opts + -rw------- 1 70 70 68 Dec 4 21:16 postmaster.pid + drwx------ 3 70 70 21 Dec 4 11:51 sql_monitor + drwx------ 5 70 70 67 Dec 4 21:16 undo + ``` + + 具名和匿名挂载 + + ``` + # 匿名挂载 -v 容器内的路径 + docker run --name opengauss04 --privileged=true -d \ + -e GS_PASSWORD=Enmo@123 -p 5435:5432 \ + -v /var/lib/opengauss/data \ + 1e760f8f2f3d + + "Mounts": [ + { + "Type": "volume", + "Name": "e1f39b76c16ef76392b3a3a8312edc0f8c3e033c8c59d6ab60a6429c20236f62", + "Source": "/var/lib/docker/volumes/e1f39b76c16ef76392b3a3a8312edc0f8c3e033c8c59d6ab60a6429c20236f62/_data", + "Destination": "/var/lib/opengauss/data", + "Driver": "local", + "Mode": "", + "RW": true, + "Propagation": "" + } + ], + + # 查看所有 volume 的情况 + [root@mogdb ~]# docker volume ls + DRIVER VOLUME NAME + local 6cede63c42f882b1044b13c0aa20dd788eda6764940b9b8054db9e15087569a3 + local 20df1e593053e108028cd2ada3084042b2f0d96827f236ea809f1b6663d90ef4 + local a1601a649c6828db873110887ade959f86fdf18ccfd6e25c972a4edde661fd35 + local a20478a2a42c64f4ac332f7067acdd5dd72e67ab7b3d8a85e609aaa4cc35d4bf + local df1f97eda08c32d45f11a0faff8522e564ed2442274e6e0609fed30c3947b06b + local e1f39b76c16ef76392b3a3a8312edc0f8c3e033c8c59d6ab60a6429c20236f62 + + # 具名挂载 + docker run --name opengauss05 --privileged=true -d \ + -e GS_PASSWORD=Enmo@123 -p 5436:5432 \ + -v juming:/var/lib/opengauss/data \ + 1e760f8f2f3d + + "Mounts": [ + { + "Type": "volume", + "Name": "juming", + "Source": "/var/lib/docker/volumes/juming/_data", + "Destination": "/var/lib/opengauss/data", + "Driver": "local", + "Mode": "z", + "RW": true, + "Propagation": "" + } + ], + + # 查看挂载的具体位置 + [root@mogdb ~]# docker volume inspect juming + [ + { + "CreatedAt": "2021-12-04T22:17:29+08:00", + "Driver": "local", + "Labels": null, + "Mountpoint": "/var/lib/docker/volumes/juming/_data", + "Name": "juming", + "Options": null, + "Scope": "local" + } + ] + + [root@mogdb ~]# docker volume ls + DRIVER VOLUME NAME + local 6cede63c42f882b1044b13c0aa20dd788eda6764940b9b8054db9e15087569a3 + local 20df1e593053e108028cd2ada3084042b2f0d96827f236ea809f1b6663d90ef4 + local a1601a649c6828db873110887ade959f86fdf18ccfd6e25c972a4edde661fd35 + local a20478a2a42c64f4ac332f7067acdd5dd72e67ab7b3d8a85e609aaa4cc35d4bf + local df1f97eda08c32d45f11a0faff8522e564ed2442274e6e0609fed30c3947b06b + local e1f39b76c16ef76392b3a3a8312edc0f8c3e033c8c59d6ab60a6429c20236f62 + local juming + ``` + + +## DockerFile 制作 openGauss 镜像,源码中已经提供制作脚本,可以直接用 + +- 下载安装包: [https://opengauss.org/zh/download.html](https://opengauss.org/zh/download.html) + + ![](figures/20211204-eb905549-76da-4976-aaa6-dfef16877d00.png) + +- 下载源码包: [https://gitee.com/opengauss/openGauss-server?\_from=gitee\_search](https://gitee.com/opengauss/openGauss-server?_from=gitee_search) + + ![](figures/20211204-7e5f33ac-8420-463d-9639-f67586ad76ed.png) + +- 将安装包和源码包上传 Docker 服务器 + + ``` + [root@mogdb ~]# ll openGauss* + -rw-r--r-- 1 root root 100623501 Dec 4 22:35 openGauss-2.1.0-CentOS-64bit-all.tar.gz + -rw-r--r-- 1 root root 193144438 Dec 4 22:40 openGauss-server-master.zip + ``` + + ``` + # 解压源码包和安装包 + [root@mogdb ~]# unzip openGauss-server-master.zip + [root@mogdb ~]# tar -zxvf openGauss-2.1.0-CentOS-64bit-all.tar.gz + + # 准备目录和文件 + [root@mogdb ~]# cd /root/openGauss-server-master/docker/dockerfiles + [root@mogdb dockerfiles]# mkdir 2.1.0 + [root@mogdb dockerfiles]# cp 1.1.0/* 2.1.0/ + [root@mogdb dockerfiles]# cp /root/openGauss-2.1.0-CentOS-64bit.tar.bz2 2.1.0/ + + # 修改 dockerfile_amd 文件 + [root@mogdb dockerfiles]# sed -i "s/openGauss-1.1.0-CentOS-64bit.tar.bz2/openGauss-2.1.0-CentOS-64bit.tar.bz2/g" 2.1.0/dockerfile_amd + + # 创建 opengauss 镜像 + [root@mogdb dockerfiles]# sh buildDockerImage.sh -v 2.1.0 -i + + Successfully built e336672f2857 + Successfully tagged opengauss:2.1.0 + + + openGauss Docker Image 2.1.0 is ready to be extended: + + --> opengauss:2.1.0 + + Build completed in 42 seconds. + + # 生成opengauss容器 + [root@mogdb ~]# docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + opengauss 2.1.0 e336672f2857 46 seconds ago 616MB + opengauss 1.0 1e760f8f2f3d 3 hours ago 1.21GB + enmotech/opengauss latest b4dd24d09223 2 months ago 383MB + centos 7.6.1810 f1cb7c7d58b7 2 years ago 202MB + + [root@mogdb ~]# docker run --name opengauss10 --privileged=true -d -e GS_PASSWORD=Enmo@123 -p 5866:5432 -v /var/lib/opengauss opengauss:2.1.0 + 30124a1b285a6fe92b4ea55bc340603148e5ba52db481aacf23354e242cfaa9c + [root@mogdb ~]# docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 30124a1b285a opengauss:2.1.0 "entrypoint.sh gauss…" 11 seconds ago Up 10 seconds 0.0.0.0:5866->5432/tcp, :::5866->5432/tcp opengauss10 + + # 登录opengauss容器,创建用户 + ​```sh + [root@mogdb ~]# docker exec -it 30124a1b285a /bin/bash + [root@72094285e528 /]# su - omm + [omm@72094285e528 ~]$ gsql + gsql ((openGauss 2.1.0 build 590b0f8e) compiled at 2021-09-30 14:29:04 commit 0 last mr ) + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + + omm=# CREATE USER tpcc_usr WITH PASSWORD "tpcc@1234"; + NOTICE: The encrypted password contains MD5 ciphertext, which is not secure. + CREATE ROLE + omm=# alter user tpcc_usr sysadmin; + ALTER ROLE + omm=# GRANT ALL ON schema public TO tpcc_usr; + GRANT + ``` + + ![](figures/20211204-ae369c99-359e-419f-a4c2-9dba1f855cd5.png) + + +先写到这吧,其实还有好多内容,后面有时间再写续篇 + diff --git "a/content/zh/post/2022/\345\276\252\345\272\217\346\270\220\350\277\233-openGauss-GUC-\345\217\202\346\225\260\347\232\204\345\256\232\344\271\211-\345\274\225\345\257\274\345\222\214\345\210\227\350\241\250.md" "b/content/zh/post/2022/\345\276\252\345\272\217\346\270\220\350\277\233-openGauss-GUC-\345\217\202\346\225\260\347\232\204\345\256\232\344\271\211-\345\274\225\345\257\274\345\222\214\345\210\227\350\241\250.md" new file mode 100644 index 0000000000000000000000000000000000000000..52d3459dbb5052059bc9149572a184361f764c0c --- /dev/null +++ "b/content/zh/post/2022/\345\276\252\345\272\217\346\270\220\350\277\233-openGauss-GUC-\345\217\202\346\225\260\347\232\204\345\256\232\344\271\211-\345\274\225\345\257\274\345\222\214\345\210\227\350\241\250.md" @@ -0,0 +1,727 @@ ++++ + +title = "循序渐进 openGauss :GUC 参数的定义、引导和列表" + +date = "2021-12-24" + +tags = [ "循序渐进 openGauss :GUC 参数的定义、引导和列表"] + +archives = "2021-12" + +author = "eygle" + +summary = "循序渐进 openGauss :GUC 参数的定义、引导和列表" + +img = "/zh/post/2022/title/img16.png" + +times = "12:30" + ++++ + +# 循序渐进 openGauss :GUC 参数的定义、引导和列表 + +在添加GUC参数时,需要注意你添加的参数属于什么类别的参数。 + +例如如果你想让普通用户能随时修改它,那么你需要将参数级别设置为PGC\_USERSET。如果你想让超级用户能在线修改它,那么你需要将它设置为PGC\_SUSET。如果你想让它能够在修改配置参数并通过信号生效,那么需要设置为PGC\_SIGHUP。 + +在 openGauss 中,GUC参数相关的代码如下 + +[src/common/backend/utils/misc/guc.cpp](https://gitee.com/opengauss/openGauss-server/blob/master/src/common/backend/utils/misc/guc.cpp) + +## 参数级别介绍 + +``` +/* + * Displayable names for context types (enum GucContext) + * + * Note: these strings are deliberately not localized. + */ +const char* const GucContext_Names[] = { + /* PGC_INTERNAL */ "internal", + /* PGC_POSTMASTER */ "postmaster", + /* PGC_SIGHUP */ "sighup", + /* PGC_BACKEND */ "backend", + /* PGC_SUSET */ "superuser", + /* PGC_USERSET */ "user"}; +``` + +以下是这些参数品类的说明: + +- PGC\_INTERNAL:参数只能通过内部设定,用户不能设定。 +- PGC\_POSTMASTER:参数只能在Postmaster启动时通过读配置文件或处理命令行参数来配置。 +- PGC\_SIGHUP:参数只能在Postmaster启动时配置,或当我们改变了配置文件并发送信号SIGUP通知Postmaster或Postgres的时候进行配置。 +- PGC\_BACKEND:参数只能在Postmaster启动时读配置文件设置,或由客户端在进行连接请求时设置。已经启动的后台进程会忽略此类参数的改变。 +- PGC\_SUSET:参数只能在Postmaster启动时或由超级用户通过SQL语言(SET命令)进行设置。 +- PGC\_USERSET:可以用用户在任何时候进行配置。 + +## 参数来源定义 + +在 [master/src/include/utils/guc.h](https://gitee.com/opengauss/openGauss-server/blob/master/src/include/utils/guc.h) 中,以下数据结构定义了 GUC 参数的来源: + +``` +typedef enum { + PGC_S_DEFAULT, /* hard-wired default ("boot_val") */ + PGC_S_DYNAMIC_DEFAULT, /* default computed during initialization */ + PGC_S_ENV_VAR, /* postmaster environment variable */ + PGC_S_FILE, /* postgresql.conf */ + PGC_S_ARGV, /* postmaster command line */ + PGC_S_DATABASE, /* per-database setting */ + PGC_S_USER, /* per-user setting */ + PGC_S_DATABASE_USER, /* per-user-and-database setting */ + PGC_S_CLIENT, /* from client connection request */ + PGC_S_OVERRIDE, /* special case to forcibly set default */ + PGC_S_INTERACTIVE, /* dividing line for error reporting */ + PGC_S_TEST, /* test per-database or per-user setting */ + PGC_S_SESSION /* SET command */ +} GucSource; +``` + +## 参数的引导 + +在数据库启动时,初始化参数的引导分为三个步骤 + +- **初始化GUC参数** + + Postmaster将首先调用InitializeGUCOptions函数将参数设置为默认值: + + 1)首先调用build\_guc\_variables函数来统计参数个数并分配相应的config\_generic类型的全局指针数组guc\_variables以保存每个参数结构体的地址,并且对该数组进行排序。由于参数是通过全局静态数组ConfigureNamesBool、ConfigureNamesInt、ConfigureNamesReal、ConfigureNamesString、ConfigureNamesEnum存储的,因此在build\_guc\_variables函数中只需要遍历相应的数组,统计参数的个数并将参数结构体中config\_generic域的参数vartype设置为相应的参数类型。当遍历完所有参数后,根据总的参数个数分配config\_generic指针数组guc\_vars,然后再次遍历静态参数数组,将每个参数结构的首地址保存到guc\_vars数组中(这里分配的数组个数为当前参数总数的1.25倍,主要是为了方便以后参数的扩充)。接着将全局变量guc\_variables也指向guc\_vars数组。最后通过快速排序法把guc\_variables按照参数名进行排序。 + + 2)接下来将每个参数设置为默认值。对于guc\_variables中的每个参数,initializeGUCOptions函数先将其config\_generic域中的status设置为0,将reset\_source、tentative\_source、source设置为PGC\_S\_DEFAULT表示默认;stack、sourcefile设置为NULL;然后根据参数值vartype的不同类型分别调用相应的assign\_hook函数(如果该参数设置了该函数),assign\_hook函数用来设置boot\_val,最后将boot\_val赋值给reset\_val和variable指向的变量,通过这样一系列的步骤就将参数设置为了默认值。 + + 3)通过系统调用getenv来获得环境变量PGPORT、PGDATESTYLE、PGCLIENTENCODING的值,不为空则调用SetConfigOption函数来设置这三个变量对应的参数的值。 + + 4)最后,检测系统的最大安全栈深度,如果这个深度值大于100KB且不超过2MB,则用它设置max\_stack\_depth参数。 + +- **配置GUC参数** + + 如果用户启动Postmaster进程时通过命令行参数指定了一些GUC的参数值,那么Postmaster需要从命令行参数中将这些GUC参数的值解析出来并且设置到相应的GUC参数中。根据命令行设置参数主要是通过getopt和SetConfigOption这两个函数来完成的。 + + 对于getopt返回的每一个参数选项及其参数值,通过一个switch语句根据参数选项的不同分别调用SetConfigOption函数设置相应的参数。 + + SetConfigOption函数的第一个参数为参数名;第二个参数为参数值,其值存放在getopt函数返回的optarg字符串中;第三个参数为参数类型最后一个参数为参数来源。由于在这里Postmaster只在处理,命令行参数,所以这里的参数类型和参数来源分别设置为PGC\_POSTMASTER和PGC\_S\_ARGV。 + + SetConfigOption函数是通过调用set\_config\_option\(const char \*name, const char \* value, GucContext context, GucSource source, bool isLocal, bool changeVal\)函数来实现的,其中最后两个参数统一设置为false和true。该函数首先从guc\_variables指向的参数数组中搜索参数名为name的参数,如果没有找到则出错;否则将找到的参数的结构体中GucContext的值与传过来的参数context比较,判断在当前的上下文中参数是否可以设置,如果不能设置的话就报错,否则再将参数结构体中的GucSource与传过来的参数source进行比较,判断当前操作的优先级是否大于或者等于先前的优先级,如果大于或者等于先前的优先级则根据具体参数值的类型将value转化为相应的数据,然后设置参数结构体中的相应数据项即可。 + +- **读取配置文件** + + 当完成了命令行参数的设置之后,接着读配置文件重新配置参数。需要注意的是,在配置文件中设置的参数都不能修改之前通过命令行已经设置的参数,因为其优先级没有通过命令行设置的优先级高。 + + 这个过程主要是调用SelectConfigFiles\(const char \* userDoption, const char \* progname\)函数来实现的,其中第一个参数是通过命令行设置的用户的数据目录,如果没有设置会通过环境变量PGDATA找到;第二个参数为程序名,主要用于错误处理。 + + 该函数首先在数据目录下找到配置文件,然后调用词法分析程序解析文件。对于解析到的每个参数及其参数值,调用SetConfigOption来完成参数的修改。 + + 通过上述三个步骤设置完参数后还要检验参数的合法性。比如,数据目录的用户ID应该等于当前进程的有效用户ID、数据目录应该禁止组用户和其他用户的一切访问、缓冲区的数量至少是允许连接的进程数的两倍并且至少为16,等等。如果一切合法,则将当前目录转入数据目录,然后进行后续的操作。 + + +## 如何查看所有参数级别 + +``` +omm=# \pset pager +Pager usage is off. +omm=# select context,name,short_desc from pg_settings order by context,category,name; + context | name | short_desc +------------+----------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + backend | local_preload_libraries | Lists shared libraries to preload into each backend. + backend | remotetype | Sets the type of Postgres-XC remote connection + backend | ignore_system_indexes | Disables reading from system indexes. + backend | post_auth_delay | Waits N seconds on connection startup after authentication. + backend | log_connections | Logs each successful connection. + backend | log_disconnections | Logs end of a session, including duration. + internal | lc_collate | Shows the collation order locale. + internal | lc_ctype | Shows the character classification and case conversion locale. + internal | server_encoding | Sets the server (database) character set encoding. + internal | instr_unique_sql_track_type | unique sql track type + internal | block_size | Shows the size of a disk block. + internal | integer_datetimes | Datetimes are integer based. + internal | percentile | Sets the percentile of sql responstime that DBA want to know. + internal | enable_adio_function | Enable adio function. + internal | max_function_args | Shows the maximum number of function arguments. + internal | max_identifier_length | Shows the maximum identifier length. + internal | max_index_keys | Shows the maximum number of index keys. + internal | segment_size | Shows the number of pages per disk file. + internal | server_version | Shows the server version. + internal | server_version_num | Shows the server version as an integer. + internal | wal_block_size | Shows the block size in the write ahead log. + internal | wal_segment_size | Shows the number of pages per write ahead log segment. + internal | update_process_title | Updates the process title to show the active SQL command. + internal | current_logic_cluster | Shows current logic cluster. + internal | sql_compatibility | Choose which SQL format to adapt. + postmaster | audit_data_format | Sets the data format for audit files. + postmaster | audit_directory | Sets the destination directory for audit files. + postmaster | available_zone | Sets the available zone of current instance. + postmaster | elastic_search_ip_addr | Controls elastic search IP address in the system. + postmaster | use_elastic_search | Enables elastic search in the system. + postmaster | autovacuum_freeze_max_age | Age at which to autovacuum a table. + postmaster | autovacuum_max_workers | Sets the maximum number of simultaneously running autovacuum worker processes. + postmaster | comm_tcp_mode | Whether use tcp commucation mode for stream + postmaster | enable_global_plancache | enable to use global plan cache. + postmaster | enable_thread_pool | enable to use thread pool. + postmaster | thread_pool_attr | Spare Cpu that can not be used in thread pool. + postmaster | cn_send_buffer_size | Sets the send buffer size used in CN, unit in KB. + postmaster | asp_sample_num | Sets the active session profile max sample nums in buff + postmaster | comm_control_port | Sets the stream control port the server listens on. + postmaster | comm_max_receiver | Maximum number of internal receiver threads. + postmaster | comm_memory_pool | Sets the memory pool size for communication(in kB). + postmaster | comm_memory_pool_percent | Sets the percent of comm_memory_pool for dynamic workload. + postmaster | comm_quota_size | Sets the stream quota size in kB. + postmaster | comm_sctp_port | Sets the STCP port the server listens on. + postmaster | comm_usable_memory | Sets the total usable memory for communication(in kB). + postmaster | listen_addresses | Sets the host name or IP address(es) to listen to. + postmaster | local_bind_address | Sets the host name or IP address(es) to connect to for sctp. + postmaster | max_connections | Sets the maximum number of concurrent connections for clients. + postmaster | max_inner_tool_connections | Sets the maximum number of concurrent connections for inner tools. + postmaster | port | Sets the TCP port the server listens on. + postmaster | unix_socket_group | Sets the owning group of the Unix-domain socket. + postmaster | unix_socket_permissions | Sets the access permissions of the Unix-domain socket. + postmaster | enableSeparationOfDuty | Enables the user's separation of privileges. + postmaster | sysadmin_reserved_connections | Sets the number of connection slots reserved for system admin. + postmaster | unix_socket_directory | Sets the directory where the Unix-domain socket will be created. + postmaster | ssl | Enables SSL connections. + postmaster | ssl_ca_file | Location of the SSL certificate authority file. + postmaster | ssl_cert_file | Location of the SSL server certificate file. + postmaster | ssl_ciphers | Sets the list of allowed SSL ciphers. + postmaster | ssl_crl_file | Location of the SSL certificate revocation list file. + postmaster | ssl_key_file | Location of the SSL server private key file. + postmaster | pgxc_node_name | The Coordinator or Datanode name. + postmaster | enable_stateless_pooler_reuse | Pooler stateless reuse mode. + postmaster | allow_system_table_mods | Allows modifications of the structure of system tables. + postmaster | comm_sender_buffer_size | The libcomm sender's buffer size in every interaction between DN and CN, or DN and DN, unit(KB) + postmaster | lastval_supported | Enable functionality of lastval() function. + postmaster | support_extended_features | Enables unofficial supported extended features. + postmaster | data_sync_retry | Whether to continue running after a failure to sync data files. + postmaster | config_file | Sets the server's main configuration file. + postmaster | data_directory | Sets the server's data directory. + postmaster | enable_default_cfunc_libpath | Enable check for c function lib path. + postmaster | external_pid_file | Writes the postmaster PID to the specified file. + postmaster | hba_file | Sets the server's "hba" configuration file. + postmaster | ident_file | Sets the server's "ident" configuration file. + postmaster | mot_config_file | Sets mot main configuration file. + postmaster | job_queue_processes | Number of concurrent jobs, optional: [1...1000], default: 10. + postmaster | max_locks_per_transaction | Sets the maximum number of locks per transaction. + postmaster | max_pred_locks_per_transaction | Sets the maximum number of predicate locks per transaction. + postmaster | enable_delta_store | Enable delta for column store. + postmaster | string_hash_compatible | Enables the hash compatibility of char() and varchar() datatype + postmaster | enable_orc_cache | Enable orc metadata cache. + postmaster | enable_mix_replication | All the replication log sent by the wal streaming. + postmaster | data_replicate_buffer_size | Sets the buffer size of data replication. + postmaster | max_replication_slots | Sets the maximum number of simultaneously defined replication slots. + postmaster | max_wal_senders | Sets the maximum number of simultaneously running WAL sender processes. + postmaster | catchup2normal_wait_time | The maximal allowed duration for waiting from catchup to normal state. + postmaster | hot_standby | Allows connections and queries during recovery. + postmaster | wal_receiver_buffer_size | Sets the buffer size to receive data from master. + postmaster | asp_log_directory | Sets the destination directory for asp log files. + postmaster | event_source | Sets the application name used to identify PostgreSQL messages in the event log. + postmaster | logging_collector | Starts a subprocess to capture stderr output and/or csvlogs into log files. + postmaster | perf_directory | Sets the destination directory for perf json files. + postmaster | query_log_directory | Sets the destination directory for slow query log files. + postmaster | numa_distribute_mode | Sets the NUMA node distribution mode. + postmaster | max_files_per_process | Sets the maximum number of simultaneously open files for each server process. + postmaster | shared_preload_libraries | Lists shared libraries to preload into server. + postmaster | cstore_buffers | Sets the number of CStore buffers used by the server. + postmaster | enable_memory_limit | Using memory protect feature. + postmaster | local_syscache_threshold | Sets the maximum threshold for cleaning cache. + postmaster | max_compile_functions | max compile results in postmaster + postmaster | max_prepared_transactions | Sets the maximum number of simultaneously prepared transactions. + postmaster | max_process_memory | Sets the maximum number of memory used by the process. + postmaster | memorypool_enable | Using memory pool. + postmaster | memorypool_size | Sets the number of memory pool used by the server. + postmaster | shared_buffers | Sets the number of shared memory buffers used by the server. + postmaster | track_activity_query_size | Sets the size reserved for pg_stat_activity.query, in bytes. + postmaster | udf_memory_limit | Sets the maximum number of memory used by UDF Master and UDF Workers. + postmaster | UDFWorkerMemHardLimit | Sets the hard memory limit to be used for fenced UDF. + postmaster | walsender_max_send_size | Size of walsender max send size. + postmaster | recovery_max_workers | The max number of recovery threads allowed to run in parallel. + postmaster | recovery_parallelism | The actual number of recovery threads running in parallel. + postmaster | recovery_parse_workers | The number of recovery threads to do xlog parse. + postmaster | recovery_redo_workers | The number belonging to one parse worker to do xlog redo. + postmaster | bbox_blanklist_items | List of names of bbox blanklist items. + postmaster | enable_ffic_log | Enables First Failure Info Capture. + postmaster | max_concurrent_autonomous_transactions | Maximum number of concurrent autonomous transactions processes. + postmaster | alarm_component | Sets the component for alarm function. + postmaster | enable_alarm | Enables alarm or not. + postmaster | enable_nonsysadmin_execute_direct | Enables non-sysadmin users execute direct on CN/DN. + postmaster | max_cached_tuplebufs | how many memory reorderbuffer can use. + postmaster | max_changes_in_memory | how many memory a transaction can use in reorderbuffer. + postmaster | max_resource_package | The maximum number of the resource package(RP) for DN in the compute pool. + postmaster | remote_read_mode | decide way of remote read + postmaster | transparent_encrypted_string | The encrypted string to test the transparent encryption key. + postmaster | transparent_encrypt_kms_region | The region to get transparent encryption key. + postmaster | transparent_encrypt_kms_url | The URL to get transparent encryption key. + postmaster | enable_page_lsn_check | Enable check page lsn when redo + postmaster | force_promote | Enable master update min recovery point. + postmaster | bgwriter_thread_num | Sets the number of background writer threads with incremental checkpoint on. + postmaster | enable_double_write | Enable master double write. + postmaster | enable_incremental_checkpoint | Enable master incremental checkpoint. + postmaster | pagewriter_thread_num | Sets the number of page writer threads. + postmaster | advance_xlog_file_num | Sets the number of xlog files to be initialized in advance. + postmaster | replication_type | Sets the dn's HA mode. + postmaster | sync_config_strategy | Synchronization strategy for configuration files between host and standby. + postmaster | wal_buffers | Sets the number of disk-page buffers in shared memory for WAL. + postmaster | wal_file_init_num | Sets the number of xlog segment files that WAL writer auxiliary thread creates at one time. + postmaster | wal_level | Sets the level of information written to the WAL. + postmaster | wal_log_hints | Writes full pages to WAL when first modified after a checkpoint, even for a non-critical modifications. + postmaster | wal_writer_cpu | Sets the binding CPU number for the WAL writer thread. + postmaster | xlog_idle_flushes_before_sleep | Number of idle xlog flushes before xlog flusher goes to sleep. + postmaster | xloginsert_locks | Sets the number of locks used for concurrent xlog insertions. + sighup | audit_copy_exec | audit copy execution. + sighup | audit_database_process | audit database start, stop, recover and switchover. + sighup | audit_dml_state | audit DML operation. + sighup | audit_dml_state_select | audit DML select operation. + sighup | audit_enabled | Starts a subprocess to capture audit output into audit files. + sighup | audit_file_remain_threshold | audit file remain threshold. + sighup | audit_file_remain_time | the days of the audit files can be remained + sighup | audit_function_exec | audit function execution. + sighup | audit_grant_revoke | audit grant and revoke privilege. + sighup | audit_login_logout | audit user login logout. + sighup | audit_resource_policy | the policy is used to determine how to cleanup the audit files; True means to cleanup the audit files based on space limitation and False means to cleanup the audit files when the remained time is arriving. + sighup | audit_rotation_interval | Automatic audit file rotation will occur after N minutes. + sighup | audit_rotation_size | Automatic audit file rotation will occur after N kilobytes. + sighup | audit_set_parameter | audit set operation. + sighup | audit_space_limit | audit data space limit in MB unit + sighup | audit_system_object | audit DDL operation on system object. + sighup | audit_user_locked | audit lock and unlock user. + sighup | audit_user_violation | audit user violation. + sighup | autoanalyze_timeout | Sets the timeout for auto-analyze action. + sighup | autovacuum | Starts the autovacuum subprocess. + sighup | autovacuum_analyze_scale_factor | Number of tuple inserts, updates, or deletes prior to analyze as a fraction of reltuples. + sighup | autovacuum_analyze_threshold | Minimum number of tuple inserts, updates, or deletes prior to analyze. + sighup | autovacuum_mode | Sets the behavior of autovacuum + sighup | autovacuum_naptime | Time to sleep between autovacuum runs. + sighup | autovacuum_vacuum_cost_delay | Vacuum cost delay in milliseconds, for autovacuum. + sighup | autovacuum_vacuum_cost_limit | Vacuum cost amount available before napping, for autovacuum. + sighup | autovacuum_vacuum_scale_factor | Number of tuple updates or deletes prior to vacuum as a fraction of reltuples. + sighup | autovacuum_vacuum_threshold | Minimum number of tuple updates or deletes prior to vacuum. + sighup | enable_router | enable to use router. + sighup | track_stmt_retention_time | The longest retention time of full SQL and slow query in statement_ history + sighup | support_batch_bind | Sets to use batch bind-execute for PBE. + sighup | max_cn_temp_file_size | Sets the maximum tempfile size used in CN, unit in MB. + sighup | asp_flush_rate | every Nth sample to disk, MOD(sample_id, N) = 0 will flush to dist + sighup | asp_retention_days | set max retention days for pg_asp + sighup | asp_sample_interval | Sets the active session profile max sample nums in buff + sighup | enable_asp | Enable active session profile + sighup | enable_instr_cpu_timer | Enables instruments cpu timer functionality. + sighup | enable_instr_rt_percentile | Calculate percentile info of sql responstime. + sighup | enable_instr_track_wait | Collects information about wait status. + sighup | enable_slow_query_log | Write slow query log. + sighup | enable_stmt_track | Enable full/slow sql feature + sighup | enable_wdr_snapshot | Enable wdr snapshot + sighup | instr_rt_percentile_interval | Sets the interval for calculating percentile in pgstat thread, in seconds + sighup | instr_unique_sql_count | Sets the number of entries collected in gs_instr_unique_sql. + sighup | track_stmt_session_slot | Sets the number of entries collected for full sql/slow sql in each session. + sighup | wdr_snapshot_interval | Sets the interval for wdr snapshot in snapshot thread, in min + sighup | wdr_snapshot_query_timeout | Sets the timeout for wdr snapshot query, in seconds + sighup | wdr_snapshot_retention_days | Sets the max time span for wdr snapshot, in seconds + sighup | authentication_timeout | Sets the maximum allowed time to complete client authentication. + sighup | auth_iteration_count | The iteration count used in RFC5802 authenication. + sighup | failed_login_attempts | max number of login attempts. + sighup | krb_srvname | Sets the name of the Kerberos service. + sighup | krb_caseins_users | Sets whether Kerberos and GSSAPI user names should be treated as case-insensitive. + sighup | krb_server_keyfile | Sets the location of the Kerberos server key file. + sighup | password_encryption_type | The encryption method of password. + sighup | password_lock_time | password lock time + sighup | modify_initial_password | modify the initial password of the initial user. + sighup | password_effect_time | password effective time. + sighup | password_max_length | max length of password. + sighup | password_min_digital | min number of digital character in password. + sighup | password_min_length | min length of password. + sighup | password_min_uppercase | min number of upper character in password. + sighup | password_notify_time | password deadline notice time. + sighup | password_policy | The password complexity-policy of the database system. + sighup | password_reuse_max | max times password can reuse. + sighup | password_reuse_time | max days password can reuse. + sighup | password_min_lowercase | min number of lower character in password. + sighup | password_min_special | min number of special character in password. + sighup | require_ssl | Requires SSL connections. + sighup | ssl_cert_notify_time | Alarm days before ssl cert expires. + sighup | pre_auth_delay | Waits N seconds on connection startup before authentication. + sighup | trace_recovery_messages | Enables logging of recovery-related debugging information. + sighup | wait_dummy_time | Wait for dummy starts or bcm file list received when catchup. + sighup | enable_debug_vacuum | This parameter is just used for logging some vacuum info. + sighup | restart_after_crash | Reinitializes server after backend crashes. + sighup | defer_csn_cleanup_time | Sets the interval time to push cut off csn num. + sighup | enable_prevent_job_task_startup | enable control whether the job task thread can be started. + sighup | enable_security_policy | enable security policy features. + sighup | most_available_sync | Enables master to continue when sync standbys failure. + sighup | synchronous_standby_names | List of names of potential synchronous standbys. + sighup | vacuum_defer_cleanup_age | Number of transactions by which VACUUM and HOT cleanup should be deferred, if any. + sighup | recovery_time_target | The target redo time in seconds for recovery + sighup | replconninfo2 | Sets the replconninfo2 of the HA to listen and authenticate. + sighup | replconninfo3 | Sets the replconninfo3 of the HA to listen and authenticate. + sighup | replconninfo1 | Sets the replconninfo1 of the HA to listen and authenticate. + sighup | replconninfo4 | Sets the replconninfo4 of the HA to listen and authenticate. + sighup | replconninfo5 | Sets the replconninfo5 of the HA to listen and authenticate. + sighup | replconninfo6 | Sets the replconninfo6 of the HA to listen and authenticate. + sighup | replconninfo7 | Sets the replconninfo7 of the HA to listen and authenticate. + sighup | replconninfo8 | Sets the replconninfo8 of the HA to listen and authenticate. + sighup | time_to_target_rpo | The time to the target recovery point in seconds + sighup | wal_keep_segments | Sets the number of WAL files held for standby servers. + sighup | wal_sender_timeout | Sets the maximum time to wait for WAL replication. + sighup | enable_incremental_catchup | Enable incremental searching bcm files when catchup. + sighup | enable_stream_replication | Allows stream replication to standby or secondary. + sighup | hot_standby_feedback | Allows feedback from a hot standby to the primary that will avoid query conflicts. + sighup | max_standby_archive_delay | Sets the maximum delay before canceling queries when a hot standby server is processing archived WAL data. + sighup | recovery_min_apply_delay | Sets the minimum delay for applying changes during recovery. + sighup | wal_receiver_connect_retries | Sets the maximum retries to connect master. + sighup | max_standby_streaming_delay | Sets the maximum delay before canceling queries when a hot standby server is processing streamed WAL data. + sighup | primary_slotname | Set the primary slot name. + sighup | wal_receiver_connect_timeout | Sets the maximum wait time to connect master. + sighup | wal_receiver_status_interval | Sets the maximum interval between WAL receiver status reports to the primary. + sighup | wal_receiver_timeout | Sets the maximum wait time to receive data from master. + sighup | debug_print_parse | Logs each query's parse tree. + sighup | debug_print_plan | Logs each query's execution plan. + sighup | debug_print_rewritten | Logs each query's rewritten parse tree. + sighup | log_autovacuum_min_duration | Sets the minimum execution time above which autovacuum actions will be logged. + sighup | log_checkpoints | Logs each checkpoint. + sighup | log_hostname | Logs the host name in the connection logs. + sighup | log_line_prefix | Controls information prefixed to each log line. + sighup | log_pagewriter | Logs pagewriter thread. + sighup | log_timezone | Sets the time zone to use in log messages. + sighup | asp_flush_mode | Sets the active session profile flush mode:file/table/all. + sighup | asp_log_filename | Sets the file name pattern for asp data files. + sighup | bbox_dump_path | Sets the path of core dump created by bbox_handler. + sighup | log_destination | Sets the destination for server log output. + sighup | log_filename | Sets the file name pattern for log files. + sighup | log_rotation_age | Automatic log file rotation will occur after N minutes. + sighup | log_rotation_size | Automatic log file rotation will occur after N kilobytes. + sighup | log_directory | Sets the destination directory for log files. + sighup | log_file_mode | Sets the file permissions for log files. + sighup | log_truncate_on_rotation | Truncates existing log files of same name during log rotation. + sighup | syslog_ident | Sets the program name used to identify PostgreSQL messages in syslog. + sighup | query_log_file | Sets the file name pattern for slow query log files. + sighup | syslog_facility | Sets the syslog "facility" to be used when syslog enabled. + sighup | cache_connection | pooler cache connection + sighup | bgwriter_delay | Background writer sleep time between rounds. + sighup | bgwriter_flush_after | Number of pages after which previously performed writes are flushed to disk. + sighup | bgwriter_lru_maxpages | Background writer maximum number of LRU pages to flush per round. + sighup | bgwriter_lru_multiplier | Multiple of the average buffer usage to free per round. + sighup | candidate_buf_percent_target | Sets the candidate buffers percent. + sighup | dirty_page_percent_max | Sets the dirty buffers percent. + sighup | enable_memory_context_control | check the max space size of memory context. + sighup | session_history_memory | Sets the maximum number of session history memory used by the process. + sighup | standby_shared_buffers_fraction | The max fraction of shared_buffers usage to standby. + sighup | autovacuum_io_limits | Sets io_limit for autovacum. + sighup | session_statistics_memory | Sets the maximum number of session statistics memory used by the process. + sighup | cpu_collect_timer | Sets the maximum cpu collect time. + sighup | enable_bbox_dump | Enables bbox_handler to create core dump. + sighup | enable_instance_metric_persistent | enable instance resource info persistent function. + sighup | enable_logical_io_statistics | enable logical io statistics function. + sighup | enable_resource_record | enable insert the session info into the user table. + sighup | enable_resource_track | enable resources tracking and recording functionality in the system. + sighup | enable_user_metric_persistent | enable user resource info persistent function. + sighup | instance_metric_retention_time | the instance resource info retention time. + sighup | io_control_unit | Sets the io control unit for reading or writing row tuple. + sighup | topsql_retention_time | the retention time of TopSql + sighup | unique_sql_retention_time | the retention time of unique sql text + sighup | user_metric_retention_time | the user resource info retention time. + sighup | use_workload_manager | Enables workload manager in the system. + sighup | fault_mon_timeout | how many miniutes to monitor lwlock. 0 will disable that + sighup | stats_temp_directory | Writes temporary statistics files to the specified directory. + sighup | alarm_report_interval | Sets the interval time between two alarm report. + sighup | connection_alarm_rate | Reports alarm if connection rate overload. + sighup | enable_access_server_directory | enable sysadmin to create directory + sighup | enable_copy_server_files | enable sysadmin to copy from/to file + sighup | enable_online_ddl_waitlock | Enable ddl wait advisory lock in online expansion. + sighup | operation_mode | Sets the operation mode. + sighup | upgrade_mode | Indicate the upgrade mode: inplace upgrade mode, grey upgrade mode or not in upgrade. + sighup | enable_cbm_tracking | Turn on cbm tracking function. + sighup | enable_xlog_prune | Enable xlog prune when not all standys connected and xlog size is largger than max_xlog_size + sighup | max_io_capacity | The I/O upper limit of batch flush dirty page every second. + sighup | max_redo_log_size | max redo log size. + sighup | max_size_for_xlog_prune | This param set by user is used for xlog to be recycled when not all are connected and the param enable_xlog_prune is on. + sighup | archive_command | Sets the shell command that will be called to archive a WAL file. + sighup | archive_dest | Sets the path that will be used to archive a WAL file. + sighup | archive_mode | Allows archiving of WAL files using archive_command. + sighup | archive_timeout | Forces a switch to the next xlog file if a new file has not been started within N seconds. + sighup | checkpoint_completion_target | Time spent flushing dirty buffers during checkpoint, as fraction of checkpoint interval. + sighup | checkpoint_flush_after | Number of pages after which previously performed writes are flushed to disk. + sighup | checkpoint_segments | Sets the maximum distance in log segments between automatic WAL checkpoints. + sighup | checkpoint_timeout | Sets the maximum time between automatic WAL checkpoints. + sighup | checkpoint_wait_timeout | Sets the maximum wait timeout for checkpointer to start. + sighup | checkpoint_warning | Enables warnings if checkpoint segments are filled more frequently than this. + sighup | datanode_heartbeat_interval | Sets the heartbeat interval of the standby nodes. + sighup | incremental_checkpoint_timeout | Sets the maximum time between automatic WAL checkpoints. + sighup | pagewriter_sleep | PageWriter sleep time. + sighup | fsync | Forces synchronization of updates to disk. + sighup | full_page_writes | Writes full pages to WAL when first modified after a checkpoint. + sighup | wal_sync_method | Selects the method used for forcing WAL updates to disk. + sighup | wal_writer_delay | WAL writer sleep time between WAL flushes. + superuser | lc_messages | Sets the language in which messages are displayed. + superuser | dynamic_library_path | Sets the path for dynamically loadable modules. + superuser | session_replication_role | Sets the session's behavior for triggers and rewrite rules. + superuser | pljava_vmoptions | Options sent to the JVM when it is created + superuser | enable_adio_debug | Enable log debug adio function. + superuser | ignore_checksum_failure | Continues processing after a checksum failure. + superuser | zero_damaged_pages | Continues processing past damaged page headers. + superuser | exit_on_error | Terminates session on any error. + superuser | deadlock_timeout | Sets the time to wait on a lock before checking for deadlock. + superuser | lockwait_timeout | Sets the max time to wait on a lock acquire. + superuser | update_lockwait_timeout | Sets the max time to wait on a lock acquire when concurrently update same tuple. + superuser | enable_extrapolation_stats | Enable extrapolation stats for date datatype. + superuser | enable_fast_numeric | Enable numeric optimize. + superuser | enable_global_stats | Enable global stats for analyze. + superuser | enable_kill_query | Enables cancelling a query that locks some relations owned by a user when the user is dropped. + superuser | enable_change_hjcost | Enable change hash join cost + superuser | enable_csqual_pushdown | Enables colstore qual push down. + superuser | log_duration | Logs the duration of each completed SQL statement. + superuser | log_error_verbosity | Sets the verbosity of logged messages. + superuser | log_lock_waits | Logs long lock waits. + superuser | log_statement | Sets the type of statements logged. + superuser | log_temp_files | Logs the use of temporary files larger than this number of kilobytes. + superuser | raise_errors_if_no_files | raise errors if no files to be imported. + superuser | backtrace_min_messages | Sets the message levels for print backtrace that are logged. + superuser | log_min_duration_statement | Sets the minimum execution time above which statements will be logged. + superuser | log_min_error_statement | Causes all statements generating error at or above this level to be logged. + superuser | log_min_messages | Sets the message levels that are logged. + superuser | temp_file_limit | Limits the total size of all temporary files used by each session. + superuser | fast_extend_file_size | Set fast extend file size used by async dirct IO interface for row store. + superuser | max_stack_depth | Sets the maximum stack depth, in kilobytes. + superuser | autoanalyze | Enable auto-analyze when querying tables with no statistic. + superuser | enable_analyze_check | Enable check if table is analyzed when querying. + superuser | log_executor_stats | Writes executor performance statistics to the server log. + superuser | log_parser_stats | Writes parser performance statistics to the server log. + superuser | log_planner_stats | Writes planner performance statistics to the server log. + superuser | log_statement_stats | Writes cumulative performance statistics to the server log. + superuser | track_activities | Collects information about executing commands. + superuser | track_counts | Collects statistics on database activity. + superuser | track_functions | Collects function-level statistics on database activity. + superuser | track_io_timing | Collects timing statistics for database I/O activity. + superuser | track_sql_count | Collects query info on database activity. + superuser | track_thread_wait_status_interval | Sets the interval for collecting thread status in pgstat thread, in minute + superuser | enable_fast_allocate | enable fallocate to improve file extend performance, make sure filesystem support it, ep:XFS + superuser | lo_compat_privileges | Enables backward compatibility mode for privilege checks on large objects. + superuser | max_keep_log_seg | Sets the threshold for implementing logical replication flow control. + superuser | enable_light_proxy | Turns on light proxy on coordinator. + superuser | enable_pbe_optimization | Turns on pbe optimization: force to reuse generic plan. + superuser | enforce_two_phase_commit | Enforces the use of two-phase commit on transactions thatmade use of temporary objects. + superuser | xc_maintenance_mode | Turns on XC maintenance mode. + user | router | set send node router for sql before unrouter. + user | client_encoding | Sets the client's character set encoding. + user | DateStyle | Sets the display format for date and time values. + user | default_text_search_config | Sets default text search configuration. + user | extra_float_digits | Sets the number of digits displayed for floating-point values. + user | IntervalStyle | Sets the display format for interval values. + user | lc_monetary | Sets the locale for formatting monetary amounts. + user | lc_numeric | Sets the locale for formatting numbers. + user | lc_time | Sets the locale for formatting date and time values. + user | TimeZone | Sets the time zone for displaying and interpreting time stamps. + user | timezone_abbreviations | Selects a file of time zone abbreviations. + user | gin_fuzzy_search_limit | Sets the maximum allowed result for exact search by GIN. + user | tcp_keepalives_count | Maximum number of TCP keepalive retransmits. + user | tcp_keepalives_idle | Time between issuing TCP keepalives. + user | tcp_keepalives_interval | Time between TCP keepalive retransmits. + user | analysis_options | enable/disable sql dfx option. + user | bytea_output | Sets the output format for bytea. + user | check_function_bodies | Checks function bodies during CREATE FUNCTION. + user | client_min_messages | Sets the message levels that are sent to the client. + user | current_schema | Sets the schema search order for names that are not schema-qualified. + user | default_tablespace | Sets the default tablespace to create tables and indexes in. + user | default_transaction_deferrable | Sets the default deferrable status of new transactions. + user | default_transaction_isolation | Sets the transaction isolation level of each new transaction. + user | default_transaction_read_only | Sets the default read-only status of new transactions. + user | enforce_a_behavior | GUC parameter of enforcing adapting to A db. + user | gin_pending_list_limit | Sets the maximum size of the pending list for GIN index. + user | max_query_retry_times | Sets the maximum sql retry times. + user | max_user_defined_exception | GUC parameter of max_user_defined_exception. + user | nls_timestamp_format | defines the default timestamp format to use with the TO_TIMESTAMP functions. + user | omit_encoding_error | Omits encoding convert error. + user | search_path | Sets the schema search order for names that are not schema-qualified. + user | session_timeout | Set the maximum allowed duration of any unused session. + user | statement_timeout | Sets the maximum allowed duration of any statement. + user | transaction_deferrable | Whether to defer a read-only serializable transaction until it can be executed with no possible serialization failures. + user | transaction_isolation | Sets the current transaction's isolation level. + user | temp_tablespaces | Sets the tablespace(s) to use for temporary tables and sort files. + user | transaction_read_only | Sets the current transaction's read-only status. + user | vacuum_freeze_min_age | Minimum age at which VACUUM should freeze a table row. + user | vacuum_freeze_table_age | Age at which VACUUM should scan whole table to freeze tuples. + user | vacuum_gtt_defer_check_age | The defer check age of GTT, used to check expired data after vacuum. + user | xmlbinary | Sets how binary values are to be encoded in XML. + user | xmloption | Sets whether XML data in implicit parsing and serialization operations is to be considered as documents or content fragments. + user | ssl_renegotiation_limit | SSL renegotiation is no longer supported, no matter what value is set. + user | application_type | application distribute type(perfect sharding or not) in gtm free mode. + user | allow_concurrent_tuple_update | Allows concurrent tuple update. + user | track_stmt_details_size | the maximum bytes of statement details to be gathered. + user | track_stmt_stat_level | specify which level statement's statistics to be gathered. + user | comm_debug_mode | Whether use libcomm debug mode for print debug information + user | comm_no_delay | Whether set NO_DELAY option for libcomm socket + user | comm_stat_mode | Whether use libcomm stat mode for print stat data + user | comm_timer_mode | Whether use libcomm timer debug mode for print timer data + user | debug_assertions | Turns on various assertion checks. + user | enable_beta_features | Enable features that ever supported in former version . + user | enable_show_any_tuples | This parameter is just valid when it's a read-only transction, just for analyse.The default_transaction_read_only and transaction_read_only should be true.You'd better keep enable_indexscan and enable_bitmapscan be false to keep seqscan occurs.When enable_show_any_tuples is true, all versions of the tuples are visible, including dirty versions. + user | ha_module_debug | debug ha module. + user | trace_notify | Generates debugging output for LISTEN and NOTIFY. + user | trace_sort | Emits information about resource usage in sorting. + user | minimum_pool_size | Initial pool size. + user | pooler_maximum_idle_time | Maximum idle time of the pooler links. + user | partition_lock_upgrade_timeout | Sets the timeout for partition lock upgrade, in seconds + user | codegen_strategy | Choose whether it is allowed to call C-function in codegen. + user | comm_ackchk_time | Send ack check package to stream sender periodically. + user | query_dop | User-defined degree of parallelism. + user | resource_track_log | Sets resource track log level + user | rewrite_rule | Sets the rewrite rule. + user | sql_beta_feature | Sets the beta feature for SQL engine. + user | geqo | Enables genetic query optimization. + user | geqo_effort | GEQO: effort is used to set the default for other GEQO parameters. + user | geqo_generations | GEQO: number of iterations of the algorithm. + user | geqo_pool_size | GEQO: number of individuals in the population. + user | geqo_seed | GEQO: seed for random path selection. + user | geqo_selection_bias | GEQO: selective pressure within the population. + user | geqo_threshold | Sets the threshold of FROM items beyond which GEQO is used. + user | constraint_exclusion | Enables the planner to use constraints to optimize queries. + user | cost_param | Bitmap controls the use of alternative cost model. + user | cursor_tuple_fraction | Sets the planner's estimate of the fraction of a cursor's rows that will be retrieved. + user | default_statistics_target | Sets the default statistics target. + user | enable_upgrade_merge_lock_mode | If true, use Exclusive Lock mode for deltamerge. + user | from_collapse_limit | Sets the FROM-list size beyond which subqueries are not collapsed. + user | hashagg_table_size | Sets the number of slot in the hash table. + user | join_collapse_limit | Sets the FROM-list size beyond which JOIN constructs are not flattened. + user | max_recursive_times | max recursive times when execute query with recursive-clause. + user | plan_cache_mode | Controls the planner's selection of custom or generic plan. + user | schedule_splits_threshold | The Max count of splits which can be scheduled in memory. + user | td_compatible_truncation | Enable string automatically truncated during insertion. + user | allocate_mem_cost | Sets the planner's estimate of the cost of allocate memory. + user | codegen_cost_threshold | Decided to use LLVM optimization or not. + user | cost_weight_index | Sets the planner's discount when evaluating index cost. + user | cpu_index_tuple_cost | Sets the planner's estimate of the cost of processing each index entry during an index scan. + user | cpu_operator_cost | Sets the planner's estimate of the cost of processing each operator or function call. + user | cpu_tuple_cost | Sets the planner's estimate of the cost of processing each tuple (row). + user | default_limit_rows | Sets the planner's default estimation when limit rows is unknown.Negative value means using percentage of the left tree rows, whereas positive value sets the estimation directly. + user | dngather_min_rows | minimum rows worth do dn gather, 0 meas always, -1 means disable + user | seq_page_cost | Sets the planner's estimate of the cost of a sequentially fetched disk page. + user | acceleration_with_compute_pool | If true, agg/scan may run in compute pool. + user | default_storage_nodegroup | Default storage group for create table. + user | effective_cache_size | Sets the planner's assumption about the size of the disk cache. + user | random_page_cost | Sets the planner's estimate of the cost of a nonsequentially fetched disk page. + user | enable_absolute_tablespace | Enable tablespace using absolute location. + user | enable_beta_opfusion | Enables beta opfusion features. + user | enable_bitmapscan | Enables the planner's use of bitmap-scan plans. + user | enable_bloom_filter | Enable bloom filter check + user | enable_broadcast | Enables the planner's use of broadcast stream plans. + user | enable_codegen | Enable llvm for executor. + user | enable_codegen_print | Enable dump() for llvm function. + user | enable_compress_hll | Enables hll use less memory on datanode. + user | enable_compress_spill | Enables spilling compress. + user | enable_constraint_optimization | Enable optimize query by using informational constraint. + user | enable_hashagg | Enables the planner's use of hashed aggregation plans. + user | enable_hashjoin | Enables the planner's use of hash join plans. + user | enable_dngather | Enables the planner's use of dngather plans. + user | enable_force_vector_engine | Forces to enable the vector engine. + user | enable_hadoop_env | Enable hadoop enviroment. + user | enable_index_nestloop | Enables the planner's use of index-nested join plans. + user | enable_hdfs_predicate_pushdown | Enable hdfs predicate pushdown. + user | enable_hypo_index | Enable hypothetical index for explain. + user | enable_indexonlyscan | Enables the planner's use of index-only-scan plans. + user | enable_indexscan | Enables the planner's use of index-scan plans. + user | enable_material | Enables the planner's use of materialization. + user | enable_mergejoin | Enables the planner's use of merge join plans. + user | enable_nestloop | Enables the planner's use of nested-loop join plans. + user | enable_nodegroup_debug | Enables the planner's node group debug mode. + user | enable_opfusion | Enables opfusion. + user | enable_parallel_ddl | Allow user to implement DDL parallel without dead lock. + user | enable_partition_opfusion | Enables partition opfusion features. + user | enable_partitionwise | Enables the planner's use of partitionwise join plans. + user | enable_seqscan | Enables the planner's use of sequential-scan plans. + user | enable_slot_log | Enables create slot log + user | enable_sonic_hashagg | Enable Sonic hashagg. + user | enable_sonic_hashjoin | Enable Sonic hashjoin. + user | enable_sonic_optspill | Enable Sonic optimized spill. + user | enable_sort | Enables the planner's use of explicit sort steps. + user | enable_tidscan | Enables the planner's use of TID-scan plans. + user | enable_trigger_shipping | Ship a trigger to DN if possible. + user | enable_valuepartition_pruning | Enable optimization for partitioned DFS table to be staticly/dynamically-pruned when possible. + user | enable_vector_engine | Enables the vector engine. + user | expected_computing_nodegroup | Computing node group mode or expected node group for query processing. + user | force_bitmapand | Force the planner's use of bitmap-and plans. + user | opfusion_debug_mode | opfusion debug mode. + user | plan_mode_seed | Specify which plan mode and seed the optimizer generation used. + user | qrw_inlist2join_optmode | Specify inlist2join opimitzation mode. + user | enable_data_replicate | Allows data replicate. + user | RepOriginId | RepOriginId. + user | application_name | Sets the application name to be reported in statistics and logs. + user | connection_info | Sets the connection info to be reported in statistics and logs. + user | debug_pretty_print | Indents parse and plan tree displays. + user | logging_module | enable/disable module logging. + user | gds_debug_mod | Enable GDS-related troubleshoot-logging. + user | plog_merge_age | how long to aggregate profile logs. + user | explain_dna_file | Sets the destination file for explain performance data. + user | backend_flush_after | Number of pages after which previously performed writes are flushed to disk. + user | vacuum_cost_limit | Vacuum cost amount available before napping. + user | vacuum_cost_page_dirty | Vacuum cost for a page dirtied by vacuum. + user | effective_io_concurrency | Number of simultaneous requests that can be handled efficiently by the disk subsystem. + user | vacuum_cost_delay | Vacuum cost delay in milliseconds. + user | vacuum_cost_page_hit | Vacuum cost for a page found in the buffer cache. + user | vacuum_cost_page_miss | Vacuum cost for a page not found in the buffer cache. + user | sql_use_spacelimit | Limit the single sql used space on a single DN. + user | backwrite_quantity | Sets the IO quantity of backwrite buffers used by async dirct IO interface. + user | bulk_read_ring_size | Size of bulk read buffer ring. + user | bulk_write_ring_size | Size of bulk write buffer ring. + user | cstore_backwrite_max_threshold | Cu cache threshold for cstore when do insert by async dirct IO + user | cstore_backwrite_quantity | Each column write threshold for cstore when do insert by async dirct IO + user | cstore_prefetch_quantity | Sets the IO quantity of prefetch CUs used by async dirct IO interface for column store. + user | disable_memory_protect | disable memory protect for query execution. + user | FencedUDFMemoryLimit | Sets the maximum memory to be used for fenced UDF by user. + user | maintenance_work_mem | Sets the maximum memory to be used for maintenance operations. + user | enable_early_free | Using memory early free policy. + user | max_loaded_cudesc | Sets the number of loaded cudesc per column. + user | memory_detail_tracking | Sets the operator name and peak size for triggering the memory logging in that time. + user | memory_tracking_mode | Choose which style to track the memory usage. + user | partition_max_cache_size | The max partition cache size for cstore when do insert + user | partition_mem_batch | Number of partition in-memory batch + user | prefetch_quantity | Sets the IO quantity of prefetch buffers used by async dirct IO interface. + user | psort_work_mem | Sets the maximum memory to be used for partial sort. + user | query_max_mem | Sets the max memory to be reserved for a statement. + user | uncontrolled_memory_context | Sets the white list of MemoryContext allocation. + user | query_mem | Sets the memory to be reserved for a statement. + user | temp_buffers | Sets the maximum number of temporary buffers used by each session. + user | work_mem | Sets the maximum memory to be used for query workspaces. + user | auto_explain_level | auto_explain_level. + user | bbox_dump_count | Sets the maximum number of core dump created by bbox_handler. + user | cgroup_name | Sets the cgroup name to control the queries resource. + user | enable_auto_explain | enable auto explain plans. + user | io_limits | Sets io_limit for each query. + user | io_priority | Sets the IO priority for queries. + user | query_band | Sets query band. + user | resource_track_level | Choose which level info to be collected. + user | session_respool | Sets the session resource pool to control the queries resource. + user | resource_track_cost | Sets the minimum cost to do resource track. + user | resource_track_duration | Sets the minimum duration to record history session info. + user | transaction_pending_time | Sets pend_time for transaction or Stored Procedure. + user | table_skewness_warning_rows | Sets the number of rows returned by DN to enable warning of table skewness. + user | table_skewness_warning_threshold | table skewness threthold + user | ngram_gram_size | N-value for N-gram parser + user | ngram_grapsymbol_ignore | Enables N-gram ignore grapsymbol. + user | check_implicit_conversions | check whether there is an implicit conversion on index column + user | convert_string_to_digit | Convert string to digit when comparing string and digit + user | ngram_punctuation_ignore | Enables N-gram ignore punctuation. + user | acce_min_datasize_per_thread | Used to estimate whether pushdown the plan to the compute pool. + user | cstore_insert_mode | decide destination of data inserted + user | dfs_partition_directory_length | The max length of the value partition directory. + user | enable_save_datachanged_timestamp | If true, save the timestamp when the data of the table changes. + user | explain_perf_mode | Choose which style to print the explain info. + user | hll_default_expthresh | Set parameter expthresh in hll. + user | hll_default_log2m | Set parameter log2m in hll. + user | hll_default_regwidth | Set parameter regwidth in hll. + user | hll_default_sparseon | Set parameter sparseon for hll. + user | hll_max_sparse | Set parameter max_sparse for hll + user | max_active_global_temporary_table | max active global temporary table. + user | show_acce_estimate_detail | If true, show details whether plan is pushed down to the compute pool. + user | skew_option | Choose data skew optimization strategy. + user | behavior_compat_options | compatibility options + user | transform_null_equals | Treats "expr=NULL" as "expr IS NULL". + user | array_nulls | Enables input of NULL elements in arrays. + user | backslash_quote | Sets whether "\'" is allowed in string literals. + user | default_with_oids | Creates new tables with OIDs by default. + user | escape_string_warning | Warn about backslash escapes in ordinary string literals. + user | quote_all_identifiers | When generating SQL fragments, quotes all identifiers. + user | sql_inheritance | Causes subtables to be included by default in various commands. + user | standard_conforming_strings | Causes '...' strings to treat backslashes literally. + user | synchronize_seqscans | Enables synchronized sequential scans. + user | basebackup_timeout | Sets the timeout in seconds for a reponse from gs_basebackup. + user | commit_delay | Sets the delay in microseconds between transaction commit and flushing WAL to disk. + user | commit_siblings | Sets the minimum concurrent open transactions before performing commit_delay. + user | synchronous_commit | Sets the current transaction's synchronization level. + user | retry_ecode_list | Set error code list for CN Retry. + user | enable_twophase_commit | Enable two phase commit when gtm free is on. +(601 rows) +``` + diff --git "a/content/zh/post/2022/\351\205\215\347\275\256MogDB-openGauss\347\232\204grafana-\347\232\204dashboard.md" "b/content/zh/post/2022/\351\205\215\347\275\256MogDB-openGauss\347\232\204grafana-\347\232\204dashboard.md" new file mode 100644 index 0000000000000000000000000000000000000000..4b8b2296887ae680c1e7d67936acf6bcbbe4dafa --- /dev/null +++ "b/content/zh/post/2022/\351\205\215\347\275\256MogDB-openGauss\347\232\204grafana-\347\232\204dashboard.md" @@ -0,0 +1,85 @@ ++++ + +title = "配置MogDB/openGauss的grafana 的dashboard" + +date = "2021-12-27" + +tags = [ "配置MogDB/openGauss的grafana 的dashboard"] + +archives = "2021-12" + +author = "高云龙 " + +summary = "配置MogDB/openGauss的grafana 的dashboard" + +img = "/zh/post/2022/title/img8.png" + +times = "12:30" + ++++ + +# 配置MogDB/openGauss的grafana 的dashboard + +## **概述** + +我们已经介绍了[prometheus + grafana + opengauss\_exporter](https://www.modb.pro/db/173483)完成对MogDB/openGauss 数据库的监控,但这只是第一步,我们还需要通过grafana的dashboard查看各个关注的指标项,本文主要介绍dashboard的配置。 + +## **监控指标汇总** + +数据源选择的是prometheus,主要关注的监控指标分为:基础信息、内存信息、连接信息、复制信息、锁及等待事件、统计信息、query信息以及数据库对象 + +![](figures/20211204-cfc47e9a-4272-48e2-9fba-ab5a17c9b323.png) + +- **基础信息** + + 基础信息是运维人员比较关注的,有变化第一时间可以看到的信息,比如实例IP、数据库版本、数据库运行时间、exporter状态、exporter运行时间等等 + + ![](figures/20211204-183e159b-ef0f-4134-b134-71f99ba6e89a.png) + +- **内存信息** + + 展示数据库内存总体使用情况,按会话状态分组占用内存情况,内存上下文占用内存情况以及占用内存最多的session及sql文本 + + ![](figures/20211204-ffad91b6-007a-441c-8af8-835a9c0e0597.png) + + ![](figures/20211204-b6e374da-906c-4f47-bc31-96f0ca3037fa.png) + +- **连接信息** + + 连接数总体使用情况,各状态连接使用情况以及各应用连接数 + + ![](figures/20211204-ec617df5-639c-43a2-a45e-5d84738909c5.png) + +- **复制信息** + + 复制槽使用占比、复制槽延时、备节点信息及主备之间的延迟 + + ![](figures/20211204-c0cfe4c4-d76b-4a8c-bd04-7a2f81f603a6.png) + +- **锁及等待事件** + + 锁阻塞源信息,锁阻塞详情,锁类型分布情况,锁冲突及死锁检测,等待事件汇总及等待时间汇总信息 + + ![](figures/20211204-aec67dd0-2b24-4f75-8d74-9ea4b2a22edd.png) + + ![](figures/20211204-cf9d6243-d31c-4e37-aa26-953e2822e0c1.png) + +- **统计信息** + + ![](figures/20211204-c8674984-9927-4b9d-bdde-fb9725ea88ee.png) + +- **query信息** + + ![](figures/20211204-41c59db9-f61d-4dae-b29d-7036223ba567.png) + +- **数据库对象** + + ![](figures/20211204-25c40a97-f135-48be-af18-f1fe9986db5b.png) + + +## **json文件下载地址** + +[exporter监控单数据库实例](https://www.modb.pro/download/272899) + +[exporter监控多数据库实例](https://www.modb.pro/download/293587) + diff --git a/content/zh/post/Frank/images/58eccf60-364f-424b-9785-ecad541fc26f.png b/content/zh/post/Frank/images/58eccf60-364f-424b-9785-ecad541fc26f.png new file mode 100644 index 0000000000000000000000000000000000000000..66f29be495c732515ba0312c0bdaa1d47b43d518 Binary files /dev/null and b/content/zh/post/Frank/images/58eccf60-364f-424b-9785-ecad541fc26f.png differ diff --git a/content/zh/post/Frank/images/8f11c785-f027-47b5-a1ba-726edaacb2f2.png b/content/zh/post/Frank/images/8f11c785-f027-47b5-a1ba-726edaacb2f2.png new file mode 100644 index 0000000000000000000000000000000000000000..9ea79b2c45f42df98fc615b98044dbb8e977c580 Binary files /dev/null and b/content/zh/post/Frank/images/8f11c785-f027-47b5-a1ba-726edaacb2f2.png differ diff --git a/content/zh/post/Frank/images/SkGqKxzpBFRYbQ3rSQpTWQnIQr8DenGIOcf61GaVkfY.png b/content/zh/post/Frank/images/SkGqKxzpBFRYbQ3rSQpTWQnIQr8DenGIOcf61GaVkfY.png new file mode 100644 index 0000000000000000000000000000000000000000..429bc38434a64b0118b2a4c46c519d986a23ec7f Binary files /dev/null and b/content/zh/post/Frank/images/SkGqKxzpBFRYbQ3rSQpTWQnIQr8DenGIOcf61GaVkfY.png differ diff --git a/content/zh/post/Frank/images/c73d9245-4405-40e8-89ea-3db746426cc9.png b/content/zh/post/Frank/images/c73d9245-4405-40e8-89ea-3db746426cc9.png new file mode 100644 index 0000000000000000000000000000000000000000..29631d7a1e5531a7a7b8a55d895dfa1abe6c89f1 Binary files /dev/null and b/content/zh/post/Frank/images/c73d9245-4405-40e8-89ea-3db746426cc9.png differ diff --git a/content/zh/post/Frank/images/qOrAkFDRBKSLD9HUwqbhpDj7gLXJxqdt1MWtava5aHY.png b/content/zh/post/Frank/images/qOrAkFDRBKSLD9HUwqbhpDj7gLXJxqdt1MWtava5aHY.png new file mode 100644 index 0000000000000000000000000000000000000000..9bed683ccfac87c53bfc6fb12851a166fd96b2da Binary files /dev/null and b/content/zh/post/Frank/images/qOrAkFDRBKSLD9HUwqbhpDj7gLXJxqdt1MWtava5aHY.png differ diff --git "a/content/zh/post/Frank/openGauss 3.0.0 \350\275\273\351\207\217\347\211\210\351\203\250\347\275\262.md" "b/content/zh/post/Frank/openGauss 3.0.0 \350\275\273\351\207\217\347\211\210\351\203\250\347\275\262.md" new file mode 100644 index 0000000000000000000000000000000000000000..ed5d68195f27dc892e86d6859391610d91e446de --- /dev/null +++ "b/content/zh/post/Frank/openGauss 3.0.0 \350\275\273\351\207\217\347\211\210\351\203\250\347\275\262.md" @@ -0,0 +1,166 @@ ++++ + +title = "OpenGauss3.0.0 轻量版部署" + +date = "2022-05-16" + +tags = ["OpenGauss3.0.0"] + +archives = "2020-05" + +author = "xingchen" + +summary = "OpenGauss3.0.0" + +img = "/zh/post/xingchen/title/58eccf60-364f-424b-9785-ecad541fc26f.png" + +times = "18:40" + ++++ +# OpenGauss3.0.0 轻量版部署 +# 背景 +openGauss 3.0.0 版本是openGauss社区继2.0.0之后发布的又一个Release版本,版本维护生命周期为3.5年。3.0.0版本在高性能、高可用、高安全、高智能、工具链等方面都有持续创新和突破。3.0.0版本除了包含企业版外同时发布了openGauss社区首个轻量版(Lite 版)。 + +今天是openGauss 3.0.0版本发布的第一天,忍不住搞一下试试~~ + +# 实验环境 +```bash +Architecture: aarch64 +CPU op-mode(s): 64-bit +Byte Order: Little Endian +CPU(s): 8 +On-line CPU(s) list: 0-7 +Thread(s) per core: 1 +Core(s) per socket: 8 +Socket(s): 1 +NUMA node(s): 1 +Vendor ID: HiSilicon +Model: 0 +Model name: Kunpeng-920 +Stepping: 0x1 +CPU max MHz: 2600.0000 +CPU min MHz: 2600.0000 +BogoMIPS: 200.00 +L1d cache: 512 KiB +L1i cache: 512 KiB +L2 cache: 4 MiB +L3 cache: 32 MiB +NUMA node0 CPU(s): 0-7 +Vulnerability Itlb multihit: Not affected +Vulnerability L1tf: Not affected +Vulnerability Mds: Not affected +Vulnerability Meltdown: Not affected +Vulnerability Spec store bypass: Not affected +Vulnerability Spectre v1: Mitigation; __user pointer sanitization +Vulnerability Spectre v2: Not affected +Vulnerability Tsx async abort: Not affected +Flags: fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp asimdhp cpuid asimdrdm jscvt fcma dcpop asimddp asimdfhm +``` + +```bash +[frank@ecs-fc4a ~]$ cat /etc/os-release +NAME="Kylin Linux Advanced Server" +VERSION="V10 (Tercel)" +ID="kylin" +VERSION_ID="V10" +PRETTY_NAME="Kylin Linux Advanced Server V10 (Tercel)" +ANSI_COLOR="0;31" +``` + +# 下载 + +![](images/58eccf60-364f-424b-9785-ecad541fc26f.png) +```bash +[frank@ecs-fc4a ~]$ mkdir opengauss +[frank@ecs-fc4a ~]$ wget -c https://opengauss.obs.cn-south-1.myhuaweicloud.com/3.0.0/arm/openGauss-Lite-3.0.0-openEuler-aarch64.tar.gz +--2022-04-02 11:37:41-- https://opengauss.obs.cn-south-1.myhuaweicloud.com/3.0.0/arm/openGauss-Lite-3.0.0-openEuler-aarch64.tar.gz +Resolving opengauss.obs.cn-south-1.myhuaweicloud.com (opengauss.obs.cn-south-1.myhuaweicloud.com)... 139.159.208.230, 121.37.63.38, 139.159.208.67, ... +Connecting to opengauss.obs.cn-south-1.myhuaweicloud.com (opengauss.obs.cn-south-1.myhuaweicloud.com)|139.159.208.230|:443... connected. +HTTP request sent, awaiting response... 200 OK +Length: 21142255 (20M) [application/gzip] +Saving to: ‘openGauss-Lite-3.0.0-openEuler-aarch64.tar.gz’ + +openGauss-Lite-3.0.0-openEuler-aarch64.ta 100%[==================================================================================>] 20.16M 22.4MB/s in 0.9s + +2022-04-02 11:37:42 (22.4 MB/s) - ‘openGauss-Lite-3.0.0-openEuler-aarch64.tar.gz’ saved [21142255/21142255] +``` + +# 安装 +- 解压 + +```bash +[frank@ecs-fc4a ~]$ mkdir opengauss +[frank@ecs-fc4a ~]$ cd opengauss/ +[frank@ecs-fc4a opengauss]$ tar -zxf openGauss-Lite-3.0.0-openEuler-aarch64.tar.gz +[frank@ecs-fc4a opengauss]$ ll +total 41684 +drwx------ 2 frank frank 4096 Apr 1 18:33 dependency +-rw------- 1 frank frank 38398 Apr 1 18:33 install.sh +-rw------- 1 frank frank 21032901 Apr 1 18:33 openGauss-Lite-3.0.0-openEuler-aarch64.bin +-rw------- 1 frank frank 65 Apr 1 18:33 openGauss-Lite-3.0.0-openEuler-aarch64.sha256 +-rw------- 1 frank frank 21142255 Apr 1 18:39 openGauss-Lite-3.0.0-openEuler-aarch64.tar.gz +-rw------- 1 frank frank 742 Apr 1 18:33 opengauss_lite.conf +-rw------- 1 frank frank 2852 Apr 1 18:33 uninstall.sh +-rw------- 1 frank frank 38674 Apr 1 18:33 upgrade_common.sh +-rw------- 1 frank frank 634 Apr 1 18:33 upgrade_config.sh +-rw------- 1 frank frank 392 Apr 1 18:33 upgrade_errorcode.sh +-rw------- 1 frank frank 1100 Apr 1 18:33 upgrade_GAUSSV5.sh +-rw------- 1 frank frank 65 Apr 1 18:33 upgrade_sql.sha256 +-rw------- 1 frank frank 385518 Apr 1 18:33 upgrade_sql.tar.gz +-rw------- 1 frank frank 37 Apr 1 18:33 version.cfg +``` + +- 修改install.sh +由于目前版本不支持kylin v10,所以,这里需要进行,修改,伪装成`openEuler`. +![](images/c73d9245-4405-40e8-89ea-3db746426cc9.png) + +- 安装 + +```bash +[frank@ecs-fc4a opengauss]$ echo OpenGauss@123 | sh ./install.sh --mode single -D ~/opengauss/data -R ~/opengauss/install --start +[frank@ecs-fc4a opengauss]$ source /home/frank/.bashrc +``` + +# 验证 +```bash +[frank@ecs-fc4a opengauss]$ ps ux | grep gaussdb +frank 10446 0.0 1.0 2451136 167808 ? Ssl 11:56 0:00 /home/frank/opengauss/install/bin/gaussdb -D /home/frank/opengauss/data +frank 10635 0.0 0.0 214016 1536 pts/0 S+ 12:36 0:00 grep gaussdb +``` + +```bash +[frank@ecs-fc4a opengauss]$ gs_ctl query -D /home/frank/opengauss/data +[2022-04-02 12:37:26.767][10661][][gs_ctl]: gs_ctl query ,datadir is /home/frank/opengauss/data + HA state: + local_role : Normal + static_connections : 0 + db_state : Normal + detail_information : Normal + + Senders info: +No information + Receiver info: +No information +``` + +# gsql链接openGauss +```SQL +[frank@ecs-fc4a opengauss]$ gsql -d postgres +gsql ((openGauss 3.0.0 build 02c14696) compiled at 2022-04-01 18:28:23 commit 0 last mr release) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +openGauss=# \l + List of databases + Name | Owner | Encoding | Collate | Ctype | Access privileges +-----------+-------+----------+-------------+-------------+------------------- + postgres | frank | UTF8 | en_US.UTF-8 | en_US.UTF-8 | + template0 | frank | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/frank + + | | | | | frank=CTc/frank + template1 | frank | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/frank + + | | | | | frank=CTc/frank +(3 rows) + +openGauss=# + +``` diff --git "a/content/zh/post/Frank/openGauss MogDB WDR\346\212\245\345\221\212\350\257\246\350\247\243.md" "b/content/zh/post/Frank/openGauss MogDB WDR\346\212\245\345\221\212\350\257\246\350\247\243.md" new file mode 100644 index 0000000000000000000000000000000000000000..a14a2e116b5f61e4380f243ff7eef568331fbc37 --- /dev/null +++ "b/content/zh/post/Frank/openGauss MogDB WDR\346\212\245\345\221\212\350\257\246\350\247\243.md" @@ -0,0 +1,279 @@ ++++ + +title = "openGauss/MogDB WDR报告详解" + +date = "2022-05-16" + +tags = ["openGauss/MogDB WDR报告详解"] + +archives = "2020-05" + +author = "xingchen" + +summary = "openGauss/MogDB WDR报告详解" + +img = "/zh/post/xingchen/title/qOrAkFDRBKSLD9HUwqbhpDj7gLXJxqdt1MWtava5aHY.png" + +times = "18:40" + ++++ +# openGauss/MogDB WDR报告详解 +# 摘要 +> WDR(Workload Diagnosis Report)**负载诊断报告**,是openGauss的工作负载诊断报告,常用于判断openGauss长期性能问题。WDR报告基于两次不同时间点系统的性能快照数据,生成这两个时间点之间的性能表现报表。 + +# 开启WDR快照 +## 参数简介 +### enable\_wdr\_snapshot +**参数说明**: 是否开启数据库监控快照功能。 + +该参数属于SIGHUP类型参数,请参考表[GUC参数分类](https://docs.mogdb.io/zh/mogdb/v2.1/30-appendix)中对应设置方法进行设置。 + +**取值范围**: 布尔型 + +* on: 打开数据库监控快照功能。 +* off: 关闭数据库监控快照功能。 + +**默认值**: off + + + +### wdr\_snapshot\_retention\_days +**参数说明**: 系统中数据库监控快照数据的保留天数,超过设置的值之后,系统每隔wdr\_snapshot\_interval时间间隔,清理snapshot\_id最小的快照数据。 + +该参数属于SIGHUP类型参数,请参考表[GUC参数分类](https://docs.mogdb.io/zh/mogdb/v2.1/30-appendix)中对应设置方法进行设置。 + +\*\*取值范围:\*\*整型,1~8。 + +**默认值**: 8 + + + +### wdr\_snapshot\_query\_timeout +**参数说明**: 系统执行数据库监控快照操作时,设置快照操作相关的sql语句的执行超时时间。如果语句超过设置的时间没有执行完并返回结果,则本次快照操作失败。 + +该参数属于SIGHUP类型参数,请参考表[GUC参数分类](https://docs.mogdb.io/zh/mogdb/v2.1/30-appendix)中对应设置方法进行设置。 + +\*\*取值范围:\*\*整型,100~INT\_MAX(秒)。 + +**默认值**: 100s + + + +### wdr\_snapshot\_interval +**参数说明**: 后台线程Snapshot自动对数据库监控数据执行快照操作的时间间隔。 + +该参数属于SIGHUP类型参数,请参考表[GUC参数分类](https://docs.mogdb.io/zh/mogdb/v2.1/30-appendix)中对应设置方法进行设置。 + +\*\*取值范围:\*\*整型,10~60(分钟)。 + +**默认值**: 1h + + + +## 查看当前wdr相关配置 +```sql +postgres@omm:local=#select name, setting from pg_settings where name like '%wdr%'; + name | setting +-----------------------------+--------- + enable_wdr_snapshot | off + wdr_snapshot_interval | 60 + wdr_snapshot_query_timeout | 100 + wdr_snapshot_retention_days | 8 +(4 rows) +``` +## 开启wdr日志 +```bash +omm@107707f966f0:/var/lib/mogdb/data$ gs_guc reload -D $PGDATA -c "enable_wdr_snapshot=on" +expected instance path: [/var/lib/mogdb/data/postgresql.conf] +gs_guc reload: enable_wdr_snapshot=on: [/var/lib/mogdb/data/postgresql.conf] +server signaled + +Total instances: 1. Failed instances: 0. +Success to perform gs_guc! + +omm@107707f966f0:/var/lib/mogdb/data$ gsql -d postgres -r +gsql ((MogDB 2.1.1 build b5f25b20) compiled at 2022-03-21 14:42:30 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +postgres@omm:local=#select name, setting from pg_settings where name like '%wdr%'; + name | setting +-----------------------------+--------- + enable_wdr_snapshot | on + wdr_snapshot_interval | 60 + wdr_snapshot_query_timeout | 100 + wdr_snapshot_retention_days | 8 +(4 rows) +``` +## 查看快照统计表 +```sql +postgres@omm:local=#show search_path; + search_path +---------------- + "$user",public +(1 row) + +postgres@omm:local=#alter session set search_path=snapshot; +SET +postgres@omm:local=#show search_path; + search_path +------------- + snapshot +(1 row) + +postgres@omm:local=#\d + List of relations + Schema | Name | Type | Owner | Storage +----------+------------------------------------------+----------+-------+---------------------------------- + snapshot | snap_class_vital_info | table | omm | {orientation=row,compression=no} + snapshot | snap_global_bgwriter_stat | table | omm | {orientation=row,compression=no} + snapshot | snap_global_ckpt_status | table | omm | {orientation=row,compression=no} + snapshot | snap_global_config_settings | table | omm | {orientation=row,compression=no} + snapshot | snap_global_double_write_status | table | omm | {orientation=row,compression=no} + snapshot | snap_global_file_iostat | table | omm | {orientation=row,compression=no} + snapshot | snap_global_file_redo_iostat | table | omm | {orientation=row,compression=no} + snapshot | snap_global_instance_time | table | omm | {orientation=row,compression=no} + snapshot | snap_global_memory_node_detail | table | omm | {orientation=row,compression=no} + snapshot | snap_global_os_runtime | table | omm | {orientation=row,compression=no} + snapshot | snap_global_os_threads | table | omm | {orientation=row,compression=no} + snapshot | snap_global_pagewriter_status | table | omm | {orientation=row,compression=no} + snapshot | snap_global_record_reset_time | table | omm | {orientation=row,compression=no} + snapshot | snap_global_recovery_status | table | omm | {orientation=row,compression=no} + snapshot | snap_global_redo_status | table | omm | {orientation=row,compression=no} + snapshot | snap_global_rel_iostat | table | omm | {orientation=row,compression=no} + snapshot | snap_global_replication_slots | table | omm | {orientation=row,compression=no} + snapshot | snap_global_replication_stat | table | omm | {orientation=row,compression=no} + snapshot | snap_global_rto_status | table | omm | {orientation=row,compression=no} + snapshot | snap_global_shared_memory_detail | table | omm | {orientation=row,compression=no} + snapshot | snap_global_stat_all_indexes | table | omm | {orientation=row,compression=no} + snapshot | snap_global_stat_all_tables | table | omm | {orientation=row,compression=no} + snapshot | snap_global_stat_bad_block | table | omm | {orientation=row,compression=no} + snapshot | snap_global_stat_database | table | omm | {orientation=row,compression=no} + snapshot | snap_global_stat_database_conflicts | table | omm | {orientation=row,compression=no} + snapshot | snap_global_stat_db_cu | table | omm | {orientation=row,compression=no} + snapshot | snap_global_stat_user_functions | table | omm | {orientation=row,compression=no} + snapshot | snap_global_statement_count | table | omm | {orientation=row,compression=no} + snapshot | snap_global_statio_all_indexes | table | omm | {orientation=row,compression=no} + snapshot | snap_global_statio_all_sequences | table | omm | {orientation=row,compression=no} + snapshot | snap_global_statio_all_tables | table | omm | {orientation=row,compression=no} + snapshot | snap_global_thread_wait_status | table | omm | {orientation=row,compression=no} + snapshot | snap_global_threadpool_status | table | omm | {orientation=row,compression=no} + snapshot | snap_global_transactions_prepared_xacts | table | omm | {orientation=row,compression=no} + snapshot | snap_global_transactions_running_xacts | table | omm | {orientation=row,compression=no} + snapshot | snap_global_wait_events | table | omm | {orientation=row,compression=no} + snapshot | snap_global_workload_transaction | table | omm | {orientation=row,compression=no} + snapshot | snap_seq | sequence | omm | + snapshot | snap_statement_responsetime_percentile | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_file_iostat | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_file_redo_iostat | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_rel_iostat | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_stat_all_indexes | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_stat_all_tables | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_stat_bad_block | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_stat_database | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_stat_database_conflicts | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_stat_user_functions | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_statement | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_statement_count | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_statio_all_indexes | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_statio_all_sequences | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_statio_all_tables | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_transactions_prepared_xacts | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_transactions_running_xacts | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_user_login | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_workload_sql_count | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_workload_sql_elapse_time | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_workload_transaction | table | omm | {orientation=row,compression=no} + snapshot | snapshot | table | omm | {orientation=row,compression=no} + snapshot | tables_snap_timestamp | table | omm | {orientation=row,compression=no} +(61 rows) +``` + + +# 手动生产快照 +### SNAPSHOT.SNAPSHOT +SNAPSHOT表记录当前系统中存储的WDR快照数据的索引信息、开始、结束时间。只能在系统库中查询到结果,在用户库中无法查询。 + +**表 1** SNAPSHOT表属性 + +|名称|类型|描述|示例| +| ----- | ----- | ----- | ----- | +|snapshot\_id|bigint|WDR快照序号。|1| +|start\_ts|timestamp|WDR快照的开始时间。|2019-12-28 17:11:27.423742+08| +|end\_ts|timestamp|WDR快照的结束时间。|2019-12-28 17:11:43.67726+08| + + + +```sql +postgres@omm:local=#select * from snapshot.snapshot; + snapshot_id | start_ts | end_ts +-------------+-------------------------------+------------------------------- + 1 | 2022-05-02 11:19:37.239977+00 | 2022-05-02 11:19:37.865708+00 +(1 row) + +postgres@omm:local=#select create_wdr_snapshot(); + create_wdr_snapshot +----------------------------------------- + WDR snapshot request has been submitted +(1 row) + +postgres@omm:local=#select * from snapshot.snapshot; + snapshot_id | start_ts | end_ts +-------------+-------------------------------+------------------------------- + 1 | 2022-05-02 11:19:37.239977+00 | 2022-05-02 11:19:37.865708+00 + 2 | 2022-05-02 11:42:28.047396+00 | 2022-05-02 11:42:28.617173+00 +(2 rows) +``` +# 生成性能报告 +## a. 执行如下命令生成格式化性能报告文件。 +```Plain Text +\a \t \o 服务器文件路径 +``` +上述命令涉及参数说明如下: + +* \\a: 切换非对齐模式。 +* \\t: 切换输出的字段名的信息和行计数脚注。 +* \\o: 把所有的查询结果发送至服务器文件里。 +* 服务器文件路径:生成性能报告文件存放路径。用户需要拥有此路径的读写权限。 + +## b. 执行如下命令将查询到的信息写入性能报告中。 +```Plain Text +select generate_wdr_report(begin_snap_id bigint, end_snap_id bigint, report_type cstring, report_scope cstring, node_name cstring); +``` +命令中涉及的参数说明如下。 + +**表 3** generate\_wdr\_report函数参数说明 + +|参数|说明|取值范围| +| ----- | ----- | ----- | +|begin\_snap\_id|查询时间段开始的snapshot的id(表snapshot.snaoshot中的snapshot\_id)。|\-| +|end\_snap\_id|查询时间段结束snapshot的id。默认end\_snap\_id大于begin\_snap\_id(表snapshot.snaoshot中的snapshot\_id)。|\-| +|report\_type|指定生成report的类型。例如,summary/detail/all。|summary: 汇总数据。
detail: 明细数据。
all: 包含summary和detail。| +|report\_scope|指定生成report的范围,可以为cluster或者node。|cluster: 数据库级别的信息。
node: 节点级别的信息。| +|node\_name|在report\_scope指定为node时,需要把该参数指定为对应节点的名称。(节点名称可以执行select \* from pg\_node\_env;查询)。在report\_scope为cluster时,该值可以指定为省略、空或者为NULL。| | + + + +执行操作 + +```sql +postgres@omm:local=#select * from pg_node_env; + node_name | host | process | port | installpath | datapath | log_directory +-----------+-----------+---------+------+------------------+---------------------+--------------- + mogdb | localhost | 1 | 5432 | /usr/local/mogdb | /var/lib/mogdb/data | pg_log +(1 row) +postgres@omm:local=# +postgres@omm:local=#\a \t \o wdr_20220502.html +postgres@omm:local=#select generate_wdr_report(1,2,'all','node','mogdb'); +``` +## c.执行如下命令关闭输出选项及格式化输出命令。 +```Plain Text +\o \a \t +``` + + +# 查看报告 +![image](images/qOrAkFDRBKSLD9HUwqbhpDj7gLXJxqdt1MWtava5aHY.png) + +![image](images/SkGqKxzpBFRYbQ3rSQpTWQnIQr8DenGIOcf61GaVkfY.png) + diff --git "a/content/zh/post/Frank/openGauss MogDB\350\260\203\347\224\250C FUNCTION.md" "b/content/zh/post/Frank/openGauss MogDB\350\260\203\347\224\250C FUNCTION.md" new file mode 100644 index 0000000000000000000000000000000000000000..0c50e5cffbabcbb8c1c4cf149af2ef1a13ba4600 --- /dev/null +++ "b/content/zh/post/Frank/openGauss MogDB\350\260\203\347\224\250C FUNCTION.md" @@ -0,0 +1,182 @@ ++++ + +title = "openGauss/MogDB调用C FUNCTION" + +date = "2022-05-16" + +tags = ["openGauss/MogDB调用C FUNCTION"] + +archives = "2020-05" + +author = "xingchen" + +summary = "openGauss/MogDB调用C FUNCTION" + +img = "/zh/post/xingchen/title/8f11c785-f027-47b5-a1ba-726edaacb2f2.png" + +times = "18:40" + ++++ +# openGauss/MogDB调用C FUNCTION +# 摘要 +> 之前写过一篇关于[postgresql自定义函数实现,通过contrib模块进行扩展](https://blog.csdn.net/xk_xx/article/details/123011397 "postgresql自定义函数实现,通过contrib模块进行扩展")的帖子,今天和恩墨工程师进行了一些交流,在MogDB中也可以实现同样的功能,原以为需要完整的openGauss的源码才能完成,但在恩墨工程师的指点下,了解到,通过既有官网版本的安装包就可以进行插件开发。而且,使用postgres的C FUNCTION要比开发插件更加容易些。也感谢恩墨专家提供的线索和思路:+1: :+1: + +# 环境准备 +* 安装MogDB +参考官方文档,写的已经很详细了。 +* 服务器环境 +本地虚拟机 centos 7.9 + +***注意:尽量进入******omm******用户下进行编译,可以避免一些不必要的环境问题*** + +# 代码 +* C代码 +基本与postgres插件开发一样,关键是4,5,6三行。 + +```cpp +#include "postgres.h" +#include "fmgr.h" + +PG_MODULE_MAGIC; +extern "C" Datum add_ab(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1(add_ab); + +Datum +add_ab(PG_FUNCTION_ARGS) +{ + int32 arg_a = PG_GETARG_INT32(0); + int32 arg_b = PG_GETARG_INT32(1); + + PG_RETURN_INT32(arg_a + arg_b); +} +``` +* CMakeLists.txt + +```makefile +cmake_minimum_required (VERSION 2.8) + +project (gs_plug) +set(CMAKE_CXX_FLAGS "-Wall -std=c++11 -Wall") +set(CMAKE_CXX_FLAGS_DEBUG "-g3") +set(CMAKE_CXX_FLAGS_RELEASE "-O2") +set(CMAKE_BUILD_TYPE Debug) + +set(MOG_INCLUDE /opt/mogdb/app/include/postgresql/server) +set(MOG_LIBPATH /opt/mogdb/app/lib/postgresql/proc_srclib) +include_directories(${MOG_INCLUDE}) + +aux_source_directory(. DIR_SRCS) +add_library (${PROJECT_NAME} SHARED ${DIR_SRCS}) + +install(TARGETS ${PROJECT_NAME} DESTINATION ${MOG_LIBPATH}) +``` +***要点1:获取包含头文件的目录*** + +```Plain Text +[omm@vmpc funcs]$ pg_config --includedir +/opt/mogdb/app/include +``` +所需头文件路径:\`pg\_config --includedir\`/postgresql/server + +***要点1:c函数安装路径*** + +```Plain Text +[omm@vmpc funcs]$ pg_config --pkglibdir +/opt/mogdb/app/lib/postgresql +``` +安装路径:\`pg\_config --pkglibdir\`/proc\_srclib/ + + + +# 编译 & 安装 +```Plain Text +[omm@vmpc funcs]$ mkdir build +[omm@vmpc funcs]$ cd build/ +[omm@vmpc build]$ cmake ../ +CMake Deprecation Warning at CMakeLists.txt:1 (cmake_minimum_required): + Compatibility with CMake < 2.8.12 will be removed from a future version of + CMake. + + Update the VERSION argument value or use a ... suffix to tell + CMake that the project does not need compatibility with older versions. + + +-- The C compiler identification is GNU 4.8.5 +-- The CXX compiler identification is GNU 4.8.5 +-- Detecting C compiler ABI info +-- Detecting C compiler ABI info - done +-- Check for working C compiler: /bin/cc - skipped +-- Detecting C compile features +-- Detecting C compile features - done +-- Detecting CXX compiler ABI info +-- Detecting CXX compiler ABI info - done +-- Check for working CXX compiler: /bin/c++ - skipped +-- Detecting CXX compile features +-- Detecting CXX compile features - done +-- Configuring done +-- Generating done +-- Build files have been written to: /opt/mogdb/funcs/build +[omm@vmpc build]$ make +[ 50%] Building CXX object CMakeFiles/gs_plug.dir/testfunc.cpp.o +[100%] Linking CXX shared library libgs_plug.so +[100%] Built target gs_plug +[omm@vmpc build]$ make install +Consolidate compiler generated dependencies of target gs_plug +[100%] Built target gs_plug +Install the project... +-- Install configuration: "Debug" +-- Installing: /opt/mogdb/app/lib/proc_srclib/libgs_plug.so +``` +**依次执行如下命令** + +```Plain Text +mkdir build +cd build +cmake ../ +make +make install +``` +**确认安装** + +```Plain Text +[omm@vmpc build]$ ll /opt/mogdb/app/lib/proc_srclib/libgs_plug.so +-rwxr-xr-x. 1 omm dbgrp 215696 Apr 2 00:17 /opt/mogdb/app/lib/proc_srclib/libgs_plug.so + +``` +# 验证 +* 链接mogdb + +```Plain Text +[omm@vmpc ~]$ pgcli -p 26000 -d postgres +Server: PostgreSQL 9.2.4 +Version: 3.4.1 +Home: http://pgcli.com +postgres> +``` +* 创建C FUNCTION + +```sql +postgres> CREATE FUNCTION add_ab(a int ,b int ) RETURNS integer + AS 'testfunc.so', 'add_ab' + LANGUAGE C STRICT; +CREATE FUNCTION +Time: 0.039s +``` +* 查看函数 + +![image](images/8f11c785-f027-47b5-a1ba-726edaacb2f2.png) + +* 调用函数 + +```sql +postgres> select add_ab(a := 4, b := 2); ++--------+ +| add_ab | +|--------| +| 6 | ++--------+ +SELECT 1 +Time: 0.033s +postgres> + +``` \ No newline at end of file diff --git "a/content/zh/post/July/JDBC\351\251\261\345\212\250\350\277\236\346\216\245MogDB-opengauss.md" "b/content/zh/post/July/JDBC\351\251\261\345\212\250\350\277\236\346\216\245MogDB-opengauss.md" new file mode 100644 index 0000000000000000000000000000000000000000..4a4cbe60cfd94cf5ea33493106872846d3de8350 --- /dev/null +++ "b/content/zh/post/July/JDBC\351\251\261\345\212\250\350\277\236\346\216\245MogDB-opengauss.md" @@ -0,0 +1,223 @@ ++++ + +title = "JDBC驱动连接MogDB/opengauss" + +date = "2021-11-21" + +tags = [ "JDBC驱动连接MogDB/opengauss"] + +archives = "2021-11" + +author = "张凡" + +summary = "JDBC驱动连接MogDB/opengauss" + +img = "/zh/post/July/title/img1.png" + +times = "12:30" + ++++ + +# JDBC驱动连接MogDB/opengauss + + + +## 一、环境说明 + +``` +[root@node1 ~]# cat /etc/redhat-release +CentOS Linux release 7.6.1810 (Core) +[root@node1 ext]# java -version +java version "1.8.0_301" +Java(TM) SE Runtime Environment (build 1.8.0_301-b09) +Java HotSpot(TM) 64-Bit Server VM (build 25.301-b09, mixed mode) +``` + +## 二、数据库配置 + +1.配置数据库参数,允许用户登录 + +数据库配置文件postgresql.conf和pg\_hba.conf中加上如下内容 + +``` +[omm@node1 data]$ tail -4 postgresql.conf +listen_addresses = '0.0.0.0' +password_encryption_type = 0 +log_directory = 'pg_log' +remote_read_mode=non_authentication +[omm@node1 data]$ tail -1 pg_hba.conf +host all all 0.0.0.0/0 md5 +``` + +重启数据库 + +``` +gs_om -t stop +gs_om -t start +``` + +2.创建连接用户及数据库 + +``` +postgres=# create database jdbc_db; +CREATE DATABASE +postgres=# create user jdbc_usr password 'jdbc@123'; +NOTICE: The encrypted password contains MD5 ciphertext, which is not secure. +CREATE ROLE +postgres=# alter user jdbc_usr sysadmin; +ALTER ROLE +postgres=# +``` + +## 三、Java程序编写 + +``` +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +public class ConnTest { + //创建数据库连接。 + public static Connection GetConnection(String username, String passwd) { + String driver = "org.postgresql.Driver"; + String sourceURL = "jdbc:postgresql://8.131.53.xxx:26000/jdbc_db"; + Connection conn = null; + try { + //加载数据库驱动。 + Class.forName(driver).newInstance(); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + + try { + //创建数据库连接。 + conn = DriverManager.getConnection(sourceURL,"jdbc_usr", "jdbc@123"); + System.out.println("连接成功!"); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + return conn; + } + ; + /** + * 把查询到的结果放入ResultSet + * 通过迭代的方法去读取结果集中的查询结果 + * 输出查询结果 + */ + public static void Select(Connection conn) { + PreparedStatement ps = null; + ResultSet rs = null; + String sql = "SELECT version()"; + try { + ps = conn.prepareStatement(sql); + rs = ps.executeQuery(); //将查询的结果放入ResultSet结果集中 + /** + * 从结果集ResultSet中迭代取出查询结果并输出 + */ + while(rs.next()) { +// String values = rs.getString("id"); + String values = rs.getString("version"); + + System.out.println( "数据库版本:"+values); + } + } catch (SQLException e) { + System.out.println("操作失败o(╥﹏╥"); + e.printStackTrace(); + } + } + /** + * 主程序,逐步调用各静态方法。 + * @param args + */ + public static void main(String[] args) { + //创建数据库连接。 + Connection conn = GetConnection("jdbc_usr", "jdbc@123"); + Select(conn); + //关闭数据库连接。 + try { + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} +``` + +## 四、程序测试 + +1.放置jDBC驱动 + +将jdbc驱动放到jdk中的如下目录,让程序能找到驱动包 + +``` +[root@node1 ext]# pwd +/usr/java/jdk1.8.0_301-amd64/jre/lib/ext +[root@node1 ext]# wget https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.1/x86/openGauss-2.0.0-JDBC.tar.gz +2021-12-01 17:30:52 (13.2 MB/s) - 已保存 “openGauss-2.0.0-JDBC.tar.gz” [4937896/4937896]) +[root@node1 ext]# tar -zxvf openGauss-2.0.0-JDBC.tar.gz +postgresql.jar +``` + +2.运行程序 + +这里采用了俩种方式运行程序,一种是单个程序直接运行,另一个则是将Java程序打成jar在运行,这里简单介绍一下 + +\(1\)单个程序运行 + +``` +[root@node1 hello]# ls +conn.jar ConnTest.java MANIFEST.MF postgresql.jar +[root@node1 hello]# pwd +/root/java_program/hello +[root@node1 hello]# javac ConnTest.java +[root@node1 hello]# java ConnTest +``` + +连接成功! + +数据库版本:PostgreSQL 9.2.4 \(MogDB 2.0.1 build f892ccb7\) compiled at 2021-07-09 16:12:59 commit 0 last mr on x86\_64-unknown-linux-gnu, compiled by g++ \(GCC\) 7.3.0, 64-bit + +\(2\)jar包运行 + +编译ConnTest.java + +``` +[root@node1 hello]# javac ConnTest.java +``` + +编写MANIFEST.MF文件 + +MANIFEST.MF文件介绍 + +META-INF文件夹相当于一个信息包,目录中的文件和目录获得Java 2平台的认可与解释,用来配置应用程序、扩展程序、类加载器和服务。这个文件夹和其中的 MANIFEST.MF文件,在用jar打包时自动生成。执行jar文件的时候,这个jar里是需要具备 META-INF/MANIFEST.MF的,否则java -jar就找不到main class。 + +``` +[root@node1 hello]# cat MANIFEST.MF +Manifest-Version: 1.0 +Main-Class: ConnTest +``` + +程序打包 + +``` +[root@node1 hello]# jar -cvfm conn.jar MANIFEST.MF ConnTest.class +已添加清单 +正在添加: ConnTest.class(输入 = 2126) (输出 = 1212)(压缩了 42%) +``` + +运行程序 + +``` +[root@node1 hello]# java -jar conn.jar +连接成功! +数据库版本:PostgreSQL 9.2.4 (MogDB 2.0.1 build f892ccb7) compiledat 2021-07-09 16:12:59 commit 0 last mr onx86_64-unknown-linux-gnu, compiled by g++ (GCC) 7.3.0, 64-bit +``` + +## 五、总结 + +上述文章简单介绍了JDBC连接MogDB数据库,数据如何配置,以及JDBC驱动如何加载,如何配置,并运行在Linux上。更多细节参考官方文档https://docs.mogdb.io/zh/mogdb/v2.0.1/1-development-based-on-jdbc-overview + diff --git "a/content/zh/post/July/MOGDB-openGauss\347\264\242\345\274\225\346\216\250\350\215\220\345\217\212\350\231\232\346\213\237\347\264\242\345\274\225.md" "b/content/zh/post/July/MOGDB-openGauss\347\264\242\345\274\225\346\216\250\350\215\220\345\217\212\350\231\232\346\213\237\347\264\242\345\274\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..210532b0ed7fb9a66fe05f3ac3179a6ea6683fca --- /dev/null +++ "b/content/zh/post/July/MOGDB-openGauss\347\264\242\345\274\225\346\216\250\350\215\220\345\217\212\350\231\232\346\213\237\347\264\242\345\274\225.md" @@ -0,0 +1,248 @@ ++++ + +title = "MOGDB/openGauss索引推荐及虚拟索引" + +date = "2021-12-04" + +tags = [ "MOGDB/openGauss索引推荐及虚拟索引"] + +archives = "2021-12" + +author = "阎书利" + +summary = "MOGDB/openGauss索引推荐及虚拟索引" + +img = "/zh/post/July/title/img3.png" + +times = "12:30" + ++++ + +# MOGDB/openGauss索引推荐及虚拟索引 + + + +## 索引推荐 + +在ORACLE的优化中,可能大家有接触过SQL Tuning Advisor\(SQL调优顾问,STA\),类似的MOGDB/openGauss的索引推荐\(Index-advisor\)功能也可以对你的查询进行分析,并提出合理的创建索引的建议。ORACLE的STA输出是以一种意见或者建议的形式,以及对每一项建议和期望效益的理由。该建议涉及对象的统计收集,新索引的创建,SQL语句的重组,或SQL概要的创建。你可以选择该建议来完成SQL语句的调优。MOGDB/openGauss的索引推荐\(Index-advisor\)在这也是比较类似,但可能结果不如ORACLE的STA的优化报告详尽。 + +如下为我对MOGDB/openGauss的索引推荐\(Index-advisor\)功能的使用测试,包括单条SQL查询索引推荐、Workload级别索引推荐\(针对一批SQL语句的索引推荐\)等。 + +## 一、测试数据导入 + +``` +postgres=# create database ysla; +CREATE DATABASE +postgres=# \c ysla +Non-SSL connection (SSL connection is recommended when requiring high-security) +You are now connected to database "ysla" as user "omm". +ysla=# CREATE TABLE tab_ysl_1 (col1 int, col2 int, col3 text); +CREATE TABLE +ysla=# INSERT INTO tab_ysl_1 VALUES(generate_series(1, 3000),generate_series(1, 3000),repeat( chr(int4(random()*26)+65),4)); +INSERT 0 3000 +ysla=# ANALYZE tab_ysl_1; +ANALYZE +ysla=# CREATE TABLE tab_ysl_2 (col1 int, col2 int); +CREATE TABLE +ysla=# INSERT INTO tab_ysl_2 VALUES(generate_series(1, 1000),generate_series(1, 1000)); +INSERT 0 1000 +ysla=# ANALYZE tab_ysl_2; +ANALYZE +``` + +## 二、单条SQL查询索引推荐 + +如下面所示,用gs\_index\_advise函数即可使用索引推荐,结果中包含表和可以创建索引的列。 + +1.测试where + +``` +ysla=# SELECT * FROM gs_index_advise('SELECT * FROM tab_ysl_1 WHERE col1 = 10'); + table | column +-----------+-------- + tab_ysl_1 | (col1) +(1 row) +``` + +2.测试join + +``` +ysla=# SELECT * FROM gs_index_advise('SELECT * FROM tab_ysl_1 join tab_ysl_2 on tab_ysl_1.col1 = tab_ysl_2.col1'); + table | column +-----------+-------- + tab_ysl_1 | (col1) + tab_ysl_2 | +(2 rows) +``` + +3.测试多表 + +``` +ysla=# SELECT * FROM gs_index_advise('SELECT count(*), tab_ysl_2.col1 FROM tab_ysl_1 join tab_ysl_2 on tab_ysl_1.col2 = tab_ysl_2.col2 WHERE tab_ysl_2.col2 > 2 GROUP BY tab_ysl_2.col1 ORDER BY tab_ysl_2.col1'); + table | column +-----------+-------- + tab_ysl_1 | (col2) + tab_ysl_2 | (col1) +(2 rows) +``` + +4.测试order by + +``` +ysla=# SELECT * FROM gs_index_advise('SELECT *, col2 FROM tab_ysl_1 ORDER BY 1, 3'); + table | column +-----------+-------- + tab_ysl_1 | +(1 row) + + +ysla=# SELECT * FROM gs_index_advise('SELECT * FROM tab_ysl_1 WHERE col1 > 10 ORDER BY 1,col2'); + table | column +-----------+-------- + tab_ysl_1 | +(1 row) +``` + +5.测试过长字符串 + +``` +ysla=# SELECT * FROM gs_index_advise('SELECT * FROM tab_ysl_1 where col3 in (''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'',''bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'',''ccccccccccccccccccccccccccccccccccccccc'',''ddddddddddddddddddddddddddddddddddddddd'',''ffffffffffffffffffffffffffffffffffffffff'',''ggggggggggggggggggggggggggggggggggggggggggggggggggg'',''ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt'',''vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv'',''ggmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm'')'); +ERROR: index_advisor.cpp : 983 : The parameter destMax is too small or parameter count is larger than macro parameter SECUREC_STRING_MAX_LEN. The second case only occures in functions strncat_s/strncpy_s. +``` + +## 三、Workload级别索引推荐 + +这种方式可以针对多条SQL,可以将待优化的SQL写到文件里,通过脚本获得推荐索引。 + +脚本目录在安装目录的bin/dbmind/index\_advisor下边,我的目录为 + +/opt/gaussdb/app/bin/dbmind/index\_advisor/index\_advisor\_workload.py + +将待优化的SQL放到文件里 + +``` +[omm@node1 index_advisor]$ cat 1.sql +SELECT * FROM tab_ysl_1 WHERE col1 = 10; +SELECT count(*), tab_ysl_2.col1 FROM tab_ysl_1 join tab_ysl_2 on tab_ysl_1.col2 = tab_ysl_2.col2 WHERE tab_ysl_2.col2 > 2 GROUP BY tab_ysl_2.col1 ORDER BY tab_ysl_2.col1; +SELECT * FROM tab_ysl_1 join tab_ysl_2 on tab_ysl_1.col1 = tab_ysl_2.col1; +``` + +使用如下方式调用脚本,可以批量获取推荐索引,26000为我的数据库端口,ysla为我的数据库名,1.sql为我待优化的SQL存放的文件 + +``` +[omm@node1 index_advisor]$ pwd +/opt/gaussdb/app/bin/dbmind/index_advisor +[omm@node1 index_advisor]$ python3 ./index_advisor_workload.py 26000 ysla 1.sql + +###### ############################################################## Generate candidate indexes + +table: tab_ysl_1 columns: col1 +table: tab_ysl_1 columns: col2 +table: tab_ysl_2 columns: col1 + +###### ############################################################### Determine optimal indexes + +create index ind0 on tab_ysl_1(col1); +``` + +## 四、索引效率查看 + +这里验证下索引推荐给我们推荐的索引究竟是否起到优化作用。 + +``` +[omm@node1 index_advisor]$ cat 1.sql +SELECT * FROM tab_ysl_1 WHERE col1 = 10; + +[omm@node1 index_advisor]$ time gsql -d ysla -p 26000 -f 1.sql + col1 | col2 | col3 +------+------+------ + 10 | 10 | SSSS +(1 row) + +total time: 35 ms + +real 0m0.050s +user 0m0.007s +sys 0m0.002s +``` + +可以看到上边未优化的SQL执行时间为0m0.050s + +``` +[omm@node1 index_advisor]$ python3 ./index_advisor_workload.py 26000 ysla 1.sql + +###### ############################################################## Generate candidate indexes + +table: tab_ysl_1 columns: col1 + +###### ############################################################### Determine optimal indexes + +create index ind0 on tab_ysl_1(col1); +``` + +通过Index-advisor获取推荐索引。并创建索引 + +``` +ysla=# create index ind0 on tab_ysl_1(col1); +CREATE INDEX +``` + +可以看到查询的时间明显减少。 + +``` +[omm@node1 index_advisor]$ time gsql -d ysla -p 26000 -f 1.sql + col1 | col2 | col3 +------+------+------ + 10 | 10 | SSSS +(1 row) + +total time: 0 ms + +real 0m0.016s +user 0m0.009s +sys 0m0.000s +``` + +## 虚拟索引 + +一般在加索引时,会堵塞DML(不过PG支持并发加索引,不堵塞DML) 。只有索引真正能起到优化作用,我们建立索引才是有意义的。虚拟索引是一个很有用的东西,没有副作用,只是虚拟的索引,建立虚拟索引后,可以通过EXPLAIN来查看加索引后的成本估算,判断是否加索引COST会降低。 + +可以用虚拟索引检验索引的效果,根据效果可选择是否创建真实的索引优化查询。 + +``` +#测试建立虚拟索引(hypopg_create_index) +ysla=# SELECT * FROM hypopg_create_index('CREATE INDEX ON tab_ysl_1(col1)'); + indexrelid | indexname +------------+----------------------------- + 41453 | <41453>btree_tab_ysl_1_col1 +(1 row) + +#显示所有创建的虚拟索引信息(enable_hypo_index) +ysla=# select * from hypopg_display_index(); + indexname | indexrelid | table | column +-----------------------------+------------+-----------+-------- + <41454>btree_tab_ysl_1_col1 | 41454 | tab_ysl_1 | (col1) +(1 row) + +ysla=# set enable_hypo_index = on;explain SELECT * FROM tab_ysl_1 WHERE col1 = 100; +SET + QUERY PLAN +---------------------------------------------------------------------------------------------- + Index Scan using <41453>btree_tab_ysl_1_col1 on tab_ysl_1 (cost=0.00..8.27 rows=1 width=13) + Index Cond: (col1 = 100) +(2 rows) + +#测试删除指定虚拟索引(hypopg_display_index) +使用函数hypopg_drop_index删除指定oid的虚拟索引 +ysla=# select * from hypopg_drop_index(41454); + hypopg_drop_index +------------------- + t +(1 row) + +#使用函数hypopg_reset_index一次性清除所有创建的虚拟索引 +ysla=# SELECT * FROM hypopg_reset_index(); + hypopg_reset_index +-------------------- +``` + diff --git "a/content/zh/post/July/MogDB-openGauss-\345\274\200\346\234\272\350\207\252\345\220\257\345\212\250.md" "b/content/zh/post/July/MogDB-openGauss-\345\274\200\346\234\272\350\207\252\345\220\257\345\212\250.md" new file mode 100644 index 0000000000000000000000000000000000000000..d72c7e37065948d702fe4dc559a2b6e190571381 --- /dev/null +++ "b/content/zh/post/July/MogDB-openGauss-\345\274\200\346\234\272\350\207\252\345\220\257\345\212\250.md" @@ -0,0 +1,171 @@ ++++ + +title = "MogDB/openGauss 开机自启动" + +date = "2021-08-21" + +tags = [ "MogDB/openGauss 开机自启动"] + +archives = "2021-08" + +author = "高云龙" + +summary = "MogDB/openGauss 开机自启动" + +img = "/zh/post/July/title/img1.png" + +times = "12:30" + ++++ + +# MogDB/openGauss 开机自启动 + +在centos7.6 操作系统上设置 MogDB/openGauss 开机自启动,我们先来了解一下自定义服务的配置文件组成部分,共分为\[Unit\]、\[Service\]、\[Install\]三个部分。 + +``` +[Unit] +Description= 当前服务的简单描述 +Documentation= 服务配置文件的位置 +Before= 在某服务之前启动 +After= 在某服务之后启动 +Wants= 与某服务存在“依赖”关系,依赖服务退出,不影响本服务运行 +Requires= 与某服务存在“强依赖”关系,依赖服务故障,本服务也随之退出 + +[Service] +Type= +--simple(默认值):ExecStart字段启动的进程为主进程。 +--forking:ExecStart字段将以fork()方式启动,后台运行。 +--oneshot:类似于simple,只执行一次,Systemd会等它执行完,才启动其他服务。 +--dbus:类似于simple,等待D-Bus信号后在启动。 +--notify:类似于simple,启动结束后会发出通知信号,Systemd再启动其他服务。 +--idle:类似于simple,等其他任务都执行完,才会启动该服务。 + +User= 服务运行的用户 +Group= 服务运行的用户组 + +ExecStart= 启动服务的命令,可以是可执行程序、系统命令或shell脚本,必须是绝对路径。 +ExecReload= 重启服务的命令,可以是可执行程序、系统命令或shell脚本,必须是绝对路径。 +ExecStop= 停止服务的命令,可以是可执行程序、系统命令或shell脚本,必须是绝对路径。 +ExecStartPre= 启动服务之前执行的命令 +ExecStartPost= 启动服务之后执行的命令 +ExecStopPost= 停止服务之后执行的命令 +PrivateTmp= True表示给服务分配独立的临时空间 +KillSignal= 信号量,一般为SIGQUIT +TimeoutStartSec= 启动超时时间 +TimeoutStopSec= 停止超时时间 +TimeoutSec= 同时设置 TimeoutStartSec= 与 TimeoutStopSec= 的快捷方式 +PIDFile= PID文件路径 + +KillMode= Systemd停止sshd服务方式 +--control-group(默认值):所有子进程,都会被杀掉。 +--process:只杀主进程。 +--mixed:主进程将收到SIGTERM信号,子进程收到SIGKILL信号。 +--none:没有进程会被杀掉,只是执行服务的stop命令。 + +Restart=服务程序退出后,Systemd的重启方式 +--no(默认值):退出后不会重启。 +--on-success:只有正常退出时(退出状态码为0),才会重启。 +--on-failure:只有非正常退出时(退出状态码非0,包括被信号终止和超时),才会重启。 +--on-abnormal:只有被信号终止和超时,才会重启。 +--on-abort:只有在收到没有捕捉到的信号终止时,才会重启。 +--on-watchdog:超时退出,才会重启。 +--always:总是重启。 + +RestartSec= 重启服务之前,需要等待的秒数 +RemainAfterExit= yes 进程退出以后,服务仍然保持执行 + +[Install] +WantedBy=multi-user.target +--WantedBy字段,表示该服务所在的 Targe,target的含义是服务组,表示一组服务 +--multi-user.target,表示多用户命令行状态 +--graphical.target,表示图形用户状态,它依赖于multi-user.target +``` + +## MogDB/openGauss 单机自启动 模版 + +- 配置自定义服务 + + ``` + --/usr/lib/systemd/system/mogdb.service + [Unit] + Description=MogDB + Documentation=MogDB Server + After=syslog.target + After=network.target + + [Service] + Type=forking + + User=omm + Group=dbgrp + + Environment=PGDATA=/data/opengauss/data + Environment=GAUSSHOME=/data/opengauss/app + Environment=LD_LIBRARY_PATH=/data/opengauss/app/lib + ExecStart=/data/opengauss/app/bin/gaussdb + ExecReload=/bin/kill -HUP $MAINPID + KillMode=mixed + KillSignal=SIGINT + TimeoutSec=0 + + + [Install] + WantedBy=multi-user.target + ``` + + +- 添加到开机自启动 + + ``` + systemctl daemon-reload + systemctl enable mogdb + systemctl start mogdb + systemctl status mogdb + systemctl stop mogdb + ``` + + +## MogDB/openGauss 集群自启动 模版 + +- 配置自定义服务 + + ``` + --/usr/lib/systemd/system/mogdb_om.service + [Unit] + Description=MogDB + Documentation=MogDB Server + After=syslog.target + After=network.target + + [Service] + Type=forking + + User=omm + Group=dbgrp + + Environment=GPHOME=/data/opengauss/gausstools + Environment=PGDATA=/data/opengauss/data + Environment=GAUSSHOME=/data/opengauss/app + Environment=LD_LIBRARY_PATH=/data/opengauss/app/lib + ExecStart=/data/opengauss/gausstools/script/gs_om -t start + ExecReload=/bin/kill -HUP $MAINPID + KillMode=mixed + KillSignal=SIGINT + TimeoutSec=0 + + [Install] + WantedBy=multi-user.target + ``` + + +- 添加到开机自启动 + + ``` + systemctl daemon-reload + systemctl enable mogdb_om + systemctl start mogdb_om + systemctl status mogdb_om + systemctl stop mogdb_om + ``` + + diff --git "a/content/zh/post/July/MogDB-openGauss\345\270\270\347\224\250\346\237\245\350\257\242\346\261\207\346\200\273.md" "b/content/zh/post/July/MogDB-openGauss\345\270\270\347\224\250\346\237\245\350\257\242\346\261\207\346\200\273.md" new file mode 100644 index 0000000000000000000000000000000000000000..8aea635c825454d0330ada0ed149c8ce060ea907 --- /dev/null +++ "b/content/zh/post/July/MogDB-openGauss\345\270\270\347\224\250\346\237\245\350\257\242\346\261\207\346\200\273.md" @@ -0,0 +1,144 @@ ++++ + +title = "MogDB/openGauss常用查询汇总" + +date = "2021-12-04" + +tags = [ "MogDB/openGauss常用查询汇总"] + +archives = "2021-12" + +author = "高云龙" + +summary = "MogDB/openGauss常用查询汇总" + +img = "/zh/post/July/title/img2.png" + +times = "12:30" + ++++ + +# MogDB/openGauss常用查询汇总 + +## 概述 + +在MogDB/openGauss日常运维过程中,会经常通过SQL来获取想要查看的信息,这些SQL可以作为监控指标、巡检指标,也可以临时查询使用。 + +## 通过系统线程id查对应的query + +``` +#!/bin/bash + +source ~/.bashrc + +thread_sets=`ps -ef |grep -i gaussdb |grep -v grep|awk -F ' ' '{print $2}'|xargs top -n 1 -bHp |grep -i ' worker'|awk -F ' ' '{print $1}'|tr "\n" ","|sed -e 's/,$/\n/'` + +gsql -p 26000 postgres -c "select pid,lwtid,state,query from pg_stat_activity a,dbe_perf.thread_wait_status s where a.pid=s.tid and lwtid in($thread_sets);" +``` + +## 查看复制槽 + +``` +select slot_name,coalesce(plugin,'_') as plugin,slot_type,datoid,coalesce(database,'_') as database, + (case active when 't' then 1 else 0 end)as active,coalesce(xmin,'_') as xmin,dummy_standby, + pg_xlog_location_diff(CASE WHEN pg_is_in_recovery() THEN restart_lsn ELSE pg_current_xlog_location() END , restart_lsn) AS delay_lsn +from pg_replication_slots; +``` + +## 查看主备延迟 + +``` +--主库 +select client_addr,sync_state,pg_xlog_location_diff(pg_current_xlog_location(),receiver_replay_location) from pg_stat_replication; + +--备库 +select now() AS now, + coalesce(pg_last_xact_replay_timestamp(), now()) replay, + extract(EPOCH FROM (now() - coalesce(pg_last_xact_replay_timestamp(), now()))) AS diff; +``` + +## 慢SQL查询 + +``` +select datname,usename,client_addr,pid,query_start::text,extract(epoch from (now() - query_start)) as query_runtime,xact_start::text,extract(epoch from(now() - xact_start)) as xact_runtime,state,query +from pg_stat_activity +where state not in('idle') and query_start is not null; +``` + +## 锁阻塞详情 + +``` +with tl as (select usename,granted,locktag,query_start,query + from pg_locks l,pg_stat_activity a + where l.pid=a.pid and locktag in(select locktag from pg_locks where granted='f')) +select ts.usename locker_user,ts.query_start locker_query_start,ts.granted locker_granted,ts.query locker_query,tt.query locked_query,tt.query_start locked_query_start,tt.granted locked_granted,tt.usename locked_user,extract(epoch from now() - tt.query_start) as locked_times +from (select * from tl where granted='t') as ts,(select * from tl where granted='f') tt +where ts.locktag=tt.locktag +order by 1; +``` + +## 锁阻塞源统计 + +``` +with tl as (select usename,granted,locktag,query_start,query + from pg_locks l,pg_stat_activity a + where l.pid=a.pid and locktag in(select locktag from pg_locks where granted='f')) +select usename,query_start,granted,query,count(query) count +from tl +where granted='t' +group by usename,query_start,granted,query +order by 5 desc; +``` + +## 数据表大小排序 + +``` +SELECT CURRENT_CATALOG AS datname,nsp.nspname,rel.relname, + pg_total_relation_size(rel.oid) AS bytes, + pg_relation_size(rel.oid) AS relsize, + pg_indexes_size(rel.oid) AS indexsize, + pg_total_relation_size(reltoastrelid) AS toastsize +FROM pg_namespace nsp JOIN pg_class rel ON nsp.oid = rel.relnamespace +WHERE nspname NOT IN ('pg_catalog', 'information_schema','snapshot') AND rel.relkind = 'r' +order by 4 desc limit 100; +``` + +## 索引大小排序 + +``` +select CURRENT_CATALOG AS datname,schemaname schema_name,relname table_name,indexrelname index_name,pg_table_size(indexrelid) as index_size +from pg_stat_user_indexes +where schemaname not in('pg_catalog', 'information_schema','snapshot') +order by 4 desc limit 100; +``` + +## 表膨胀率排序 + +``` +select CURRENT_CATALOG AS datname,schemaname,relname,n_live_tup,n_dead_tup,round((n_dead_tup::numeric/(case (n_dead_tup+n_live_tup) when 0 then 1 else (n_dead_tup+n_live_tup) end ) *100),2) as dead_rate +from pg_stat_user_tables +where (n_live_tup + n_dead_tup) > 10000 +order by 5 desc limit 100; +``` + +## session按状态分类所占用内存大小 + +``` +select state,sum(totalsize)::bigint as totalsize +from gs_session_memory_detail m,pg_stat_activity a +where substring_inner(sessid,position('.' in sessid) +1)=a.sessionid and usename<>'mondb' and pid != pg_backend_pid() +group by state order by sum(totalsize) desc; +``` + +## 查看session中query占用内存大小 + +``` +select sessionid, coalesce(application_name,'')as application_name, + coalesce(client_addr::text,'') as client_addr,sum(usedsize)::bigint as usedsize, + sum(totalsize)::bigint as totalsize,query +from gs_session_memory_detail s,pg_stat_activity a +where substring_inner(sessid,position('.' in sessid) +1)=a.sessionid +group by sessionid,query,application_name,client_addr +order by sum(totalsize) desc limit 10; +``` + diff --git "a/content/zh/post/July/ODBC\351\251\261\345\212\250\350\277\236\346\216\245MogDB-openGauss.md" "b/content/zh/post/July/ODBC\351\251\261\345\212\250\350\277\236\346\216\245MogDB-openGauss.md" new file mode 100644 index 0000000000000000000000000000000000000000..cef01bfb317cc04ae745c8f2f0f9df4bf9aebefa --- /dev/null +++ "b/content/zh/post/July/ODBC\351\251\261\345\212\250\350\277\236\346\216\245MogDB-openGauss.md" @@ -0,0 +1,137 @@ ++++ + +title = "ODBC驱动连接MogDB/openGauss" + +date = "2021-12-01" + +tags = [ "ODBC驱动连接MogDB/openGauss"] + +archives = "2021-12" + +author = "张凡" + +summary = "ODBC驱动连接MogDB/openGauss" + +img = "/zh/post/July/title/img4.png" + +times = "12:30" + ++++ + +# ODBC驱动连接MogDB/openGauss + +## 一、环境说明 + +``` +[root@node1 ~]# cat /etc/redhat-release +CentOS Linux release 7.6.1810 (Core) +``` + +## 二、unixODBC安装 + +有网络安装\(可直接跳至三\) + +``` +yum install -y unixODBC.x86_64 +``` + +无网络安装 + +1.下载软件包并解压 + +``` +wget https://sourceforge.net/projects/unixodbc/files/unixODBC/2.3.7/unixODBC-2.3.7pre.tar.gz/download --no-check-certificate +tar -zxvf unixODBC-2.3.7pre.tar.gz +``` + +2.编译odbc + +修改configure文件,找到LIB\_VERSION,将它的值修改为"1:0:0",这样将编译出\*.so.1的动态库,与psqlodbcw.so的依赖关系相同 + +``` +cd unixODBC-2.3.7pre/ +./configure --enable-gui=no +make +make install +``` + +## 三、替换客户端MogDB程序 + +``` +wget https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.1/x86/openGauss-2.0.0-ODBC.tar.gz +tar -zxvf openGauss-2.0.0-ODBC.tar.gz +``` + +将解压得到的lib包下的文件和odbc文件夹下的lib拷贝到/usr/local/lib/ + +## 四、配置数据源 + +``` +[root@node1 ~]# cat /usr/local/etc/odbc.ini +[MGODBC] +Driver=TEST +Servername=8.131.53.xxx (数据库IP) +Database=test_db (数据库名) +Username=test_usr (数据库用户) +Password=test@123 (数据库密码) +Port=26000 (数据端口) +Sslmode=allow + +[root@node1 ~]# tail -3 /usr/local/etc/odbcinst.ini +[TEST] +Driver64=/usr/local/lib/psqlodbcw.so +setup=/usr/local/lib/psqlodbcw.so +``` + +## 五、数据库配置说明 + +这里使用简单的方式配置(也可采用guc参数进行设置) + +``` +[omm@node1 data]$ tail -5 postgresql.conf +port=26000 +listen_addresses = '0.0.0.0' +password_encryption_type = 0 +log_directory = 'pg_log' +remote_read_mode=non_authentication +[omm@node1 data]$ tail -1 pg_hba.conf +host all all 0.0.0.0/0 md5 +``` + +重启数据库 + +``` +gs_om -t stop +gs_om -t start +``` + +## 六、客户端配置环境变量 + +``` +[root@node1 ~]# tail -3 .bashrc +export LD_LIBRARY_PATH=/usr/local/lib/:$LD_LIBRARY_PATH +export ODBCSYSINI=/usr/local/etc +export ODBCINI=/usr/local/etc/odbc.ini +``` + +## 七、测试数据源 + +``` +[root@node1 ~]# isql -v MGODBC ++---------------------------------------+ +| Connected! | +| | +| sql-statement | +| help [tablename] | +| quit | +| | ++---------------------------------------+ +SQL> +``` + +即连接成功 + +## 八、总结 + +上述文档描述的是如何通过ODBC连接MogDB,更多细节可以参考官网ODBC数据源配置https://docs.mogdb.io/zh/mogdb/v2.0.1/1-development-based-on-odbc-overview + diff --git "a/content/zh/post/July/VMware\344\270\213CentOS7.6\345\256\211\350\243\205openGauss.md" "b/content/zh/post/July/VMware\344\270\213CentOS7.6\345\256\211\350\243\205openGauss.md" new file mode 100644 index 0000000000000000000000000000000000000000..b94454096c7f33fdbce96bf7469859403b9c593c --- /dev/null +++ "b/content/zh/post/July/VMware\344\270\213CentOS7.6\345\256\211\350\243\205openGauss.md" @@ -0,0 +1,319 @@ ++++ + +title = "VMware下CentOS7.6安装openGauss" + +date = "2021-10-26" + +tags = [ "VMware下CentOS7.6安装openGauss"] + +archives = "2021-10" + +author = "这白开水咋没味啊" + +summary = "VMware下CentOS7.6安装openGauss" + +img = "/zh/post/July/title/img5.png" + +times = "12:30" + ++++ + +# VMware下CentOS7.6(7.9)安装openGauss + + + +## 1. centos安装 + +这里我使用的是vmware workstation Pro 15 虽然官网了解了一下openGauss最适合的centos版本为centos7.6 ![](figures/zh-cn_image_0000001174518146.png) + +但是因为centos7.6版本已经停更,所以我这里下载的是7.9版本的镜像文件 + +![](figures/zh-cn_image_0000001219597955.png) + +下载完成后打开vmware,创建新的虚拟机 + +![](figures/zh-cn_image_0000001219439443.png) + +自定义配置,选择下一步。 + +![](figures/zh-cn_image_0000001174039688.png) + +直接下一步 + +![](figures/zh-cn_image_0000001174199664.png) + +选择稍后安装操作系统,下一步 + +![](figures/zh-cn_image_0000001219518015.png) + +客户机操作系统选Linux,版本选CentOS7 64位 + +![](figures/zh-cn_image_0000001219319491.png) + +命名随意 + +![](figures/zh-cn_image_0000001174358206.png) + +处理器配置默认全1(这里可以根据自己电脑配置自行选择) + +![](figures/zh-cn_image_0000001174518148.png) + +虚拟机内存我选的是2GB(这里也是根据自己电脑内存选择的)PS:据同学说这里虚拟机内存选8GB可以直接使用openGauss的简易安装模式,但我的电脑只有8GB所以没有尝试。 + +![](figures/zh-cn_image_0000001219597957.png) + +网络连接选NAT + +![](figures/zh-cn_image_0000001219439445.png) + +后面两项默认推荐 + +![](figures/zh-cn_image_0000001174039690.png) + +![](figures/zh-cn_image_0000001174199666.png) + +创建新虚拟磁盘 + +![](figures/zh-cn_image_0000001219518017.png) + +最大磁盘大小选20GB,选将虚拟磁盘拆分成多个文件 + +![](figures/zh-cn_image_0000001219319493.png) + +默认下一步 + +![](figures/zh-cn_image_0000001174358208.png) + +![](figures/zh-cn_image_0000001174518150.png) + +右键CentOS点设置,点CD/DVD,使用ISO映像文件,选之前下载的镜像 + +![](figures/zh-cn_image_0000001219597959.png) + +然后开启虚拟机,这里我遇到了第一个问题,一开启虚拟机,宿主机就蓝屏死机。一开始我以为我后台开了什么东西内存占用太大,但关闭后台进程后依旧存在这个问题,查了一下,大多解释是说VMware的鲁棒性很差,在win10某次更新后VMware就存在这个蓝屏的问题。解决方法是更新至最新的VMware16 Pro版本。我试了一下,确实可行,而且覆盖更新不用重新配置虚拟机,上面的工作也没有白费。接下来继续安装。 打开虚拟机,选择Install CentOS Linux7 ,enter。 + +![](figures/zh-cn_image_0000001219439447.png) + +语言就看个人情况选择了,不过中文可能会有些乱码问题。 + +![](figures/zh-cn_image_0000001174039692.png) + +这里配置一些基本信息 1、点击安装源,进入之后直接选择done,警告符号随即消失。 2、软件选择:GNOME桌面,一个友好的图形化界面 3、根据需要禁用Kdump 4、自动分区,一般化为4个分区,如图所示 + +![](figures/zh-cn_image_0000001174199668.png) + +![](figures/zh-cn_image_0000001219518019.png) + +![](figures/zh-cn_image_0000001219319495.png) + +![](figures/zh-cn_image_0000001174358212.png) + +![](figures/zh-cn_image_0000001174518152.png) + +![](figures/zh-cn_image_0000001219597961.png) + +在安装过程中设置用户和密码 + +![](figures/zh-cn_image_0000001219439453.png) + +![](figures/zh-cn_image_0000001174039694.png) + +安装后点重启,看到如下界面 + +![](figures/zh-cn_image_0000001174199670.png) + +接受许可并配置网络 + +![](figures/zh-cn_image_0000001219518021.png) + +![](figures/zh-cn_image_0000001219319497.png) + +登录后就可进入图形界面 右上角打开有线设置 + +![](figures/zh-cn_image_0000001174358214.png) + +![](figures/zh-cn_image_0000001174518154.png) + +![](figures/zh-cn_image_0000001219597963.png) + +![](figures/zh-cn_image_0000001219439455.png) + +到此,CentOS的安装就完成了。 + +## 2.openGauss安装 + +2.1 准备工作 查看ip ifconfig + +![](figures/zh-cn_image_0000001174039696.png) + +vi /etc/profile编辑/etc/profile文件,在末尾加上ulimit -c unlimited,然后重新加载该文件 source /etc/profile + +![](figures/zh-cn_image_0000001174199672.png) + +![](figures/zh-cn_image_0000001219518023.png) + +关闭防火墙,禁用SWAP,SELINUX(为了后面安装避免过多验证以及避免连接失败) + +![](figures/zh-cn_image_0000001219319499.png) + +继续执行yum install python3.6\*命令(我这里由于之前已经安装过python3.6了,因此得到的结果如下图) + +![](figures/zh-cn_image_0000001174358216.png) + +没安装过的话效果是这样的 + +![](figures/zh-cn_image_0000001174518156.png) + +然后进行其他软件包的安装 + +![](figures/zh-cn_image_0000001219597965.png) + +linux中权限最大的用户是root,Gauss数据库官方默认以dbgrp为用户组,omm为用户,所以需要进行用户创建。 + +![](figures/zh-cn_image_0000001219439457.png) + +然后我们为openGauss建一个目录,用来存放openGauss的压缩包以及该压缩包解压后的文件。这里我在/opt/software下新建了一个openGauss的文件夹。执行chmod -R 755 /opt/software/openGauss命令给予openGauss文件夹读写权限。 + +![](figures/zh-cn_image_0000001174039698.png) + +2.2下载openGauss安装包 + +在官网下载openGauss 的安装包,我这里选择的是2.0.1企业版。 + +![](figures/zh-cn_image_0000001174199674.png) + +然后设置VMware的共享文件夹用于在宿主机和虚拟机之间传递文件。 设置共享文件夹一般有自动和手动两种方式 因为未知原因,我的安装vmware tools的按键灰色不可用 + +![](figures/zh-cn_image_0000001219518025.png) + +尝试了很多解决办法也没有作用,只能选择手动设置共享文件夹。 右键centos选择设置,进入选项界面 . + +![](figures/zh-cn_image_0000001219319501.png) + +选择共享文件夹,总是启用,并在宿主机上设置共享文件夹。 设置完成后,使用vmhgfs-fuse .host:/ /mnt/hgfs指令完成共享文件夹的挂载 . + +![](figures/zh-cn_image_0000001174358218.png) + +cd进根目录/mnt/hgfs,可以看到先前设置的主机共享文件夹 不过这种方法配置共享文件夹需要每次开机后重新进行挂载,比较麻烦。 然后将宿主机中的安装包放入共享文件夹中,再通过mv指令将安装包放入openGauss文件夹下. + +![](figures/zh-cn_image_0000001174518158.png) + +然后进入openGauss文件夹解压安装包 + +![](figures/zh-cn_image_0000001219597967.png) + +然后执行命令ls -l,显示类似如下信息即可 + +![](figures/zh-cn_image_0000001219439459.png) + +## 3. 配置XML文件 + +进入刚刚解压产生的script文件夹,查看是否有预安装脚本: + +![](figures/zh-cn_image_0000001174039700.png) + +在openGauss目录下执行vim clusterconfig.xml命令。然后将下面内容右键粘贴到新建的xml文件中,然后按esc退出插入模式,输入:wq!保存并退出。 + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +<--> + + + +``` + +![](figures/zh-cn_image_0000001174199676.png) + +![](figures/zh-cn_image_0000001219518027.png) + +这里的节点名称和IP要改成自己的,这些在准备工作中已经查看了。 执行vi /etc/profile命令打开profile文件,添加如下命令: + +![](figures/zh-cn_image_0000001219319503.png) ![](figures/zh-cn_image_0000001174358220.png) + +退出插入模式,输入:wq!保存并退出。然后需要source一下 + +![](figures/zh-cn_image_0000001174518160.png) + +## 4. 预安装 + +在/opt/software/openGauss/script文件夹下,执行命令 ./gs\_preinstall -U omm -G dbgrp -X /opt/software/openGauss/clusterconfig.xml 正常情况下会出现以下反馈 + +![](figures/zh-cn_image_0000001219597969.png) + +![](figures/zh-cn_image_0000001219439461.png) + +但是可能是因为我的xml文件配置错误,又或者是其他原因,导致我的预安装指令没有反馈,不论成功还是报错都没有。这里我就犯了个错误,在没有反馈的情况下反复执行预安装指令,没有反馈重启终端再执行;还没有反馈重启虚拟机再执行。反复操作下我的centos开启过程开始报错piix4\_smbus: Host SMBus controller not enabled!;输入密码后从登录界面又跳回登录界面没法进入系统。 在尝试了诸多解决方法之后,我发现这时我的centos重启后会丢失数据,按照网上的解决方法更改的文件无法保存,所以都没有发挥作用。在进行了一个下午的尝试后我放弃了,我选择重新配置一个新的虚拟机,按照上述步骤重来一次。现在想来可能是反复执行预安装命令产生大量重复的root用户和openGauss用户的互信信息导致磁盘占用率接近满值导致出现该问题。在重新配置虚拟机后我再次开始预安装,这次出现了报错反馈Exception: \[GAUSS-51900\] The current OS is not supported. The current system is: centos7.9这里提示我们CentOS7.9不支持openGauss,所以我们需要降级到7.6版本,但7.6版本的镜像我在网上没能找到,据说openGauss相关书籍里会提供7.6版本镜像。但我这里使用的是wget http://vault.centos.org/7.6.1810/os/x86\_64/Packages/centos-release-7-6.1810.2.el7.centos.x86\_64.rpm 指令来下载centos7.6版本rpm包 + +安装下载的7.6 rpm 包 + +``` +rpm -ivh centos-release-7-6.1810.2.el7.centos.x86_64.rpm –force +``` + +这时重新运行 rpm -qa | grep -i centos-release 就可以看到两个发行版本 + +![](figures/zh-cn_image_0000001174039702.png) + +卸载掉7.7版本 + +``` + rpm -ev centos-release-7-7.1908.0.el7.centos.x86_64 +``` + +之后再次进行预安装,这次成功进行了预安装。 通过openGauss提供的gs\_checkos工具来检查系统状态。注意需要切换到/opt目录下执行命令。 + +![](figures/zh-cn_image_0000001174199678.png) + +## 5. 正式安装 + +切换到omm用户,进行安装。 + +![](figures/zh-cn_image_0000001219518029.png) + +执行过程中需要用户设定密码,最后出现completed就完成了。 + +在omm用户下,执行gs\_om -t start命令和gs\_om -t stop命令启动或关闭数据库 ![](figures/zh-cn_image_0000001219319505.png) + +![](figures/zh-cn_image_0000001174358222.png) + diff --git a/content/zh/post/July/figures/1.png b/content/zh/post/July/figures/1.png new file mode 100644 index 0000000000000000000000000000000000000000..fcc70c725ca0378cd24a521937728748ceda4d20 Binary files /dev/null and b/content/zh/post/July/figures/1.png differ diff --git a/content/zh/post/July/figures/11.png b/content/zh/post/July/figures/11.png new file mode 100644 index 0000000000000000000000000000000000000000..8dc69c8a98de3d5a1d629506533775eaa3d445ef Binary files /dev/null and b/content/zh/post/July/figures/11.png differ diff --git a/content/zh/post/July/figures/2.png b/content/zh/post/July/figures/2.png new file mode 100644 index 0000000000000000000000000000000000000000..05daf0d345ddb756754518574b3392970e5fc9eb Binary files /dev/null and b/content/zh/post/July/figures/2.png differ diff --git a/content/zh/post/July/figures/20211210-0e476672-b5db-4ad6-9289-c10feb76f434.png b/content/zh/post/July/figures/20211210-0e476672-b5db-4ad6-9289-c10feb76f434.png new file mode 100644 index 0000000000000000000000000000000000000000..d18d129409d9d43e38b52e0833b2b73f59bbb422 Binary files /dev/null and b/content/zh/post/July/figures/20211210-0e476672-b5db-4ad6-9289-c10feb76f434.png differ diff --git a/content/zh/post/July/figures/20211210-106af296-2dc6-4b45-a794-6dce76a12901.png b/content/zh/post/July/figures/20211210-106af296-2dc6-4b45-a794-6dce76a12901.png new file mode 100644 index 0000000000000000000000000000000000000000..3699eea8dee77f39106d36f2d359c81a017196f3 Binary files /dev/null and b/content/zh/post/July/figures/20211210-106af296-2dc6-4b45-a794-6dce76a12901.png differ diff --git a/content/zh/post/July/figures/20211210-1940ef8d-5cc8-433a-9eaf-41462282a702.png b/content/zh/post/July/figures/20211210-1940ef8d-5cc8-433a-9eaf-41462282a702.png new file mode 100644 index 0000000000000000000000000000000000000000..82f1cfa4495ba859d7ab935534c10d45b9cfbbf5 Binary files /dev/null and b/content/zh/post/July/figures/20211210-1940ef8d-5cc8-433a-9eaf-41462282a702.png differ diff --git a/content/zh/post/July/figures/20211210-2110a0e7-93a5-4f25-b530-b193e48c6e21.png b/content/zh/post/July/figures/20211210-2110a0e7-93a5-4f25-b530-b193e48c6e21.png new file mode 100644 index 0000000000000000000000000000000000000000..407c51002b12fcbd71caeca10e664f9441048197 Binary files /dev/null and b/content/zh/post/July/figures/20211210-2110a0e7-93a5-4f25-b530-b193e48c6e21.png differ diff --git a/content/zh/post/July/figures/20211210-28d6263f-8c33-47ea-ae52-defe7ee5ecd0.png b/content/zh/post/July/figures/20211210-28d6263f-8c33-47ea-ae52-defe7ee5ecd0.png new file mode 100644 index 0000000000000000000000000000000000000000..00bf06a4117c0cd98a51d818c448d4075c2c08ae Binary files /dev/null and b/content/zh/post/July/figures/20211210-28d6263f-8c33-47ea-ae52-defe7ee5ecd0.png differ diff --git a/content/zh/post/July/figures/20211210-31165906-62b2-4632-ac2c-25490d109830.png b/content/zh/post/July/figures/20211210-31165906-62b2-4632-ac2c-25490d109830.png new file mode 100644 index 0000000000000000000000000000000000000000..d01914ecad78efa409cb1c52529c04e9f8dc6047 Binary files /dev/null and b/content/zh/post/July/figures/20211210-31165906-62b2-4632-ac2c-25490d109830.png differ diff --git a/content/zh/post/July/figures/20211210-327c67fc-3a60-4737-b7c0-841cbccb8fce.png b/content/zh/post/July/figures/20211210-327c67fc-3a60-4737-b7c0-841cbccb8fce.png new file mode 100644 index 0000000000000000000000000000000000000000..a8b8fc01508cd6844597fb8b147d0b627583ce90 Binary files /dev/null and b/content/zh/post/July/figures/20211210-327c67fc-3a60-4737-b7c0-841cbccb8fce.png differ diff --git a/content/zh/post/July/figures/20211210-3cc8da73-a3f5-4ba6-9563-99b3ef34820f.png b/content/zh/post/July/figures/20211210-3cc8da73-a3f5-4ba6-9563-99b3ef34820f.png new file mode 100644 index 0000000000000000000000000000000000000000..d9e60aad93298a5a7faf407c50badfb4b318b875 Binary files /dev/null and b/content/zh/post/July/figures/20211210-3cc8da73-a3f5-4ba6-9563-99b3ef34820f.png differ diff --git a/content/zh/post/July/figures/20211210-3d1d1b84-c4aa-4dc2-bf0b-b33629a87bf1.png b/content/zh/post/July/figures/20211210-3d1d1b84-c4aa-4dc2-bf0b-b33629a87bf1.png new file mode 100644 index 0000000000000000000000000000000000000000..88ca5a34be1b016c5e3379e108298c81077a9a00 Binary files /dev/null and b/content/zh/post/July/figures/20211210-3d1d1b84-c4aa-4dc2-bf0b-b33629a87bf1.png differ diff --git a/content/zh/post/July/figures/20211210-415b9d65-5d55-4ac2-a8c4-81a5205d6b6e.png b/content/zh/post/July/figures/20211210-415b9d65-5d55-4ac2-a8c4-81a5205d6b6e.png new file mode 100644 index 0000000000000000000000000000000000000000..0610e49260e45b52ff8df490aefa5d707ab3331a Binary files /dev/null and b/content/zh/post/July/figures/20211210-415b9d65-5d55-4ac2-a8c4-81a5205d6b6e.png differ diff --git a/content/zh/post/July/figures/20211210-4da5c7fb-32fd-4bf7-b359-9a1759f2b4eb.png b/content/zh/post/July/figures/20211210-4da5c7fb-32fd-4bf7-b359-9a1759f2b4eb.png new file mode 100644 index 0000000000000000000000000000000000000000..1b441a050e3fe67de23d78ca7b5eac2e7548a483 Binary files /dev/null and b/content/zh/post/July/figures/20211210-4da5c7fb-32fd-4bf7-b359-9a1759f2b4eb.png differ diff --git a/content/zh/post/July/figures/20211210-557280d9-ad84-4d05-91fa-43c6feff40df.png b/content/zh/post/July/figures/20211210-557280d9-ad84-4d05-91fa-43c6feff40df.png new file mode 100644 index 0000000000000000000000000000000000000000..ca93ce82a6b41d930db0080262fe713b98bd66a0 Binary files /dev/null and b/content/zh/post/July/figures/20211210-557280d9-ad84-4d05-91fa-43c6feff40df.png differ diff --git a/content/zh/post/July/figures/20211210-5a69bbef-d423-4705-a2ed-9065d2e288a9.png b/content/zh/post/July/figures/20211210-5a69bbef-d423-4705-a2ed-9065d2e288a9.png new file mode 100644 index 0000000000000000000000000000000000000000..102c5c371cba2bf28c0a1e242fbcd1c3a993b825 Binary files /dev/null and b/content/zh/post/July/figures/20211210-5a69bbef-d423-4705-a2ed-9065d2e288a9.png differ diff --git a/content/zh/post/July/figures/20211210-5fb969ff-5bb1-46dc-bb8f-3587b9d4d0ca.png b/content/zh/post/July/figures/20211210-5fb969ff-5bb1-46dc-bb8f-3587b9d4d0ca.png new file mode 100644 index 0000000000000000000000000000000000000000..dfabe0d5ae70b30de7e234ad85cb4ef59ef0cc63 Binary files /dev/null and b/content/zh/post/July/figures/20211210-5fb969ff-5bb1-46dc-bb8f-3587b9d4d0ca.png differ diff --git a/content/zh/post/July/figures/20211210-63337a88-2942-4254-8e7d-5df49d059915.png b/content/zh/post/July/figures/20211210-63337a88-2942-4254-8e7d-5df49d059915.png new file mode 100644 index 0000000000000000000000000000000000000000..f72221f7b068335bf009300d0a2c6c88c7f18a4c Binary files /dev/null and b/content/zh/post/July/figures/20211210-63337a88-2942-4254-8e7d-5df49d059915.png differ diff --git a/content/zh/post/July/figures/20211210-66d6be04-870c-45e6-8a61-684e41032431.png b/content/zh/post/July/figures/20211210-66d6be04-870c-45e6-8a61-684e41032431.png new file mode 100644 index 0000000000000000000000000000000000000000..7c740d1b0425fd9a8045264d7cb4ad305d4e7956 Binary files /dev/null and b/content/zh/post/July/figures/20211210-66d6be04-870c-45e6-8a61-684e41032431.png differ diff --git a/content/zh/post/July/figures/20211210-685f5e8c-6074-4356-831e-a6354dc1d658.png b/content/zh/post/July/figures/20211210-685f5e8c-6074-4356-831e-a6354dc1d658.png new file mode 100644 index 0000000000000000000000000000000000000000..8f89f276a10385ba8f76e3e34f0bbc707a5e0c59 Binary files /dev/null and b/content/zh/post/July/figures/20211210-685f5e8c-6074-4356-831e-a6354dc1d658.png differ diff --git a/content/zh/post/July/figures/20211210-74432ea4-1267-4ff5-954b-14eb8af1cdea.png b/content/zh/post/July/figures/20211210-74432ea4-1267-4ff5-954b-14eb8af1cdea.png new file mode 100644 index 0000000000000000000000000000000000000000..40ff338d9a1adc919c66b5e09cc311b33aeb9ae3 Binary files /dev/null and b/content/zh/post/July/figures/20211210-74432ea4-1267-4ff5-954b-14eb8af1cdea.png differ diff --git a/content/zh/post/July/figures/20211210-7bc0bdbf-4577-42c5-99ff-3a5462e887d5.png b/content/zh/post/July/figures/20211210-7bc0bdbf-4577-42c5-99ff-3a5462e887d5.png new file mode 100644 index 0000000000000000000000000000000000000000..da9ac120bbae2521f012d0c3b4e064cc3b0f1901 Binary files /dev/null and b/content/zh/post/July/figures/20211210-7bc0bdbf-4577-42c5-99ff-3a5462e887d5.png differ diff --git a/content/zh/post/July/figures/20211210-7bf5f3c6-2b6a-4ab9-9b6d-90a0ed0aea41.png b/content/zh/post/July/figures/20211210-7bf5f3c6-2b6a-4ab9-9b6d-90a0ed0aea41.png new file mode 100644 index 0000000000000000000000000000000000000000..f0bbc55c1a0927187180d589695cb3c80e42c608 Binary files /dev/null and b/content/zh/post/July/figures/20211210-7bf5f3c6-2b6a-4ab9-9b6d-90a0ed0aea41.png differ diff --git a/content/zh/post/July/figures/20211210-7c90cd31-2185-45ba-acb9-04d2bb079b14.png b/content/zh/post/July/figures/20211210-7c90cd31-2185-45ba-acb9-04d2bb079b14.png new file mode 100644 index 0000000000000000000000000000000000000000..f3fb741162434608e32b53b3fb5ee2f4f197166d Binary files /dev/null and b/content/zh/post/July/figures/20211210-7c90cd31-2185-45ba-acb9-04d2bb079b14.png differ diff --git a/content/zh/post/July/figures/20211210-848ee9f8-263a-4376-9fc4-f9543764687d.png b/content/zh/post/July/figures/20211210-848ee9f8-263a-4376-9fc4-f9543764687d.png new file mode 100644 index 0000000000000000000000000000000000000000..44167ab1d45b5745b35f46f5ca46c6701c869a60 Binary files /dev/null and b/content/zh/post/July/figures/20211210-848ee9f8-263a-4376-9fc4-f9543764687d.png differ diff --git a/content/zh/post/July/figures/20211210-8a062cb0-cb96-485c-b2da-1d1cb5cd0b0b.png b/content/zh/post/July/figures/20211210-8a062cb0-cb96-485c-b2da-1d1cb5cd0b0b.png new file mode 100644 index 0000000000000000000000000000000000000000..894c3afb4ebb4f59f9f5095d73bc6bc9b34c6763 Binary files /dev/null and b/content/zh/post/July/figures/20211210-8a062cb0-cb96-485c-b2da-1d1cb5cd0b0b.png differ diff --git a/content/zh/post/July/figures/20211210-9293d8e6-3a8c-4d77-998a-5836c1668a33.png b/content/zh/post/July/figures/20211210-9293d8e6-3a8c-4d77-998a-5836c1668a33.png new file mode 100644 index 0000000000000000000000000000000000000000..4671bfc977081b6b64145e2cd09fcd7b9818f48c Binary files /dev/null and b/content/zh/post/July/figures/20211210-9293d8e6-3a8c-4d77-998a-5836c1668a33.png differ diff --git a/content/zh/post/July/figures/20211210-93bb4f52-7b28-48b6-a13c-0f864d518183.png b/content/zh/post/July/figures/20211210-93bb4f52-7b28-48b6-a13c-0f864d518183.png new file mode 100644 index 0000000000000000000000000000000000000000..cf0c008c63684566f759e445dac8069b4a4eb682 Binary files /dev/null and b/content/zh/post/July/figures/20211210-93bb4f52-7b28-48b6-a13c-0f864d518183.png differ diff --git a/content/zh/post/July/figures/20211210-96bb7e65-c7eb-4e31-aeef-84b891971183.png b/content/zh/post/July/figures/20211210-96bb7e65-c7eb-4e31-aeef-84b891971183.png new file mode 100644 index 0000000000000000000000000000000000000000..f7a87eb298ddda71f55f8fb52f14fca7a6d0a3cf Binary files /dev/null and b/content/zh/post/July/figures/20211210-96bb7e65-c7eb-4e31-aeef-84b891971183.png differ diff --git a/content/zh/post/July/figures/20211210-99750504-0e35-4b30-bf35-263e6c17640d.png b/content/zh/post/July/figures/20211210-99750504-0e35-4b30-bf35-263e6c17640d.png new file mode 100644 index 0000000000000000000000000000000000000000..a7243019f468d5c70a51a558850e101fe8ab0018 Binary files /dev/null and b/content/zh/post/July/figures/20211210-99750504-0e35-4b30-bf35-263e6c17640d.png differ diff --git a/content/zh/post/July/figures/20211210-9fdeb2db-b7d4-49ee-8dd5-3a933ddf6bc8.png b/content/zh/post/July/figures/20211210-9fdeb2db-b7d4-49ee-8dd5-3a933ddf6bc8.png new file mode 100644 index 0000000000000000000000000000000000000000..6bc2b30dbd6c99f47e25bb62ec91bc9e33ef34f5 Binary files /dev/null and b/content/zh/post/July/figures/20211210-9fdeb2db-b7d4-49ee-8dd5-3a933ddf6bc8.png differ diff --git a/content/zh/post/July/figures/20211210-a47a2b19-4bef-43d0-8c5d-b2e8da4715a3.png b/content/zh/post/July/figures/20211210-a47a2b19-4bef-43d0-8c5d-b2e8da4715a3.png new file mode 100644 index 0000000000000000000000000000000000000000..8107810c4a4f03e6c95997d4648f474d9b6ae419 Binary files /dev/null and b/content/zh/post/July/figures/20211210-a47a2b19-4bef-43d0-8c5d-b2e8da4715a3.png differ diff --git a/content/zh/post/July/figures/20211210-a5afea0f-957e-4043-9eb6-c14c1f59c069.png b/content/zh/post/July/figures/20211210-a5afea0f-957e-4043-9eb6-c14c1f59c069.png new file mode 100644 index 0000000000000000000000000000000000000000..a2d16a2b30f0f291f530d497e61d0075ba121333 Binary files /dev/null and b/content/zh/post/July/figures/20211210-a5afea0f-957e-4043-9eb6-c14c1f59c069.png differ diff --git a/content/zh/post/July/figures/20211210-a86cee82-2d69-4306-bc33-be92507f0cf9.png b/content/zh/post/July/figures/20211210-a86cee82-2d69-4306-bc33-be92507f0cf9.png new file mode 100644 index 0000000000000000000000000000000000000000..81069a8965942c017c6680f669a99e1196f74202 Binary files /dev/null and b/content/zh/post/July/figures/20211210-a86cee82-2d69-4306-bc33-be92507f0cf9.png differ diff --git a/content/zh/post/July/figures/20211210-aac434b7-9ca3-4120-b309-99aa2e9e21a9.png b/content/zh/post/July/figures/20211210-aac434b7-9ca3-4120-b309-99aa2e9e21a9.png new file mode 100644 index 0000000000000000000000000000000000000000..39633e2c29e0869f7fdf053518fc35defceea4b8 Binary files /dev/null and b/content/zh/post/July/figures/20211210-aac434b7-9ca3-4120-b309-99aa2e9e21a9.png differ diff --git a/content/zh/post/July/figures/20211210-ad5cc11c-fe1e-4905-b05d-5f7f5307c029.png b/content/zh/post/July/figures/20211210-ad5cc11c-fe1e-4905-b05d-5f7f5307c029.png new file mode 100644 index 0000000000000000000000000000000000000000..c5eefc992c420be3c810f4eceb8a317ff02f6a6f Binary files /dev/null and b/content/zh/post/July/figures/20211210-ad5cc11c-fe1e-4905-b05d-5f7f5307c029.png differ diff --git a/content/zh/post/July/figures/20211210-b5e98799-1e5c-411a-a4b8-bc25c8187085.png b/content/zh/post/July/figures/20211210-b5e98799-1e5c-411a-a4b8-bc25c8187085.png new file mode 100644 index 0000000000000000000000000000000000000000..eb9f8ffd5288f0484f1341273759f661e7247169 Binary files /dev/null and b/content/zh/post/July/figures/20211210-b5e98799-1e5c-411a-a4b8-bc25c8187085.png differ diff --git a/content/zh/post/July/figures/20211210-bbf89f72-abb0-4f41-a03f-d15eb662944f.png b/content/zh/post/July/figures/20211210-bbf89f72-abb0-4f41-a03f-d15eb662944f.png new file mode 100644 index 0000000000000000000000000000000000000000..b300ca1d0763112bb1a123c2c490f9e9dc16e7e7 Binary files /dev/null and b/content/zh/post/July/figures/20211210-bbf89f72-abb0-4f41-a03f-d15eb662944f.png differ diff --git a/content/zh/post/July/figures/20211210-bd249bef-5603-499b-8379-2ef1d28d796f.png b/content/zh/post/July/figures/20211210-bd249bef-5603-499b-8379-2ef1d28d796f.png new file mode 100644 index 0000000000000000000000000000000000000000..8b39eed449418a317ebb3f777603d938000b0efd Binary files /dev/null and b/content/zh/post/July/figures/20211210-bd249bef-5603-499b-8379-2ef1d28d796f.png differ diff --git a/content/zh/post/July/figures/20211210-bdbc7405-0da5-40c5-a551-7f80d62a6bee.png b/content/zh/post/July/figures/20211210-bdbc7405-0da5-40c5-a551-7f80d62a6bee.png new file mode 100644 index 0000000000000000000000000000000000000000..5420f2b4ab0407afc7d68cea1128e8decea3536b Binary files /dev/null and b/content/zh/post/July/figures/20211210-bdbc7405-0da5-40c5-a551-7f80d62a6bee.png differ diff --git a/content/zh/post/July/figures/20211210-c820f96b-328a-4d16-a45c-19cf5779a8a4.png b/content/zh/post/July/figures/20211210-c820f96b-328a-4d16-a45c-19cf5779a8a4.png new file mode 100644 index 0000000000000000000000000000000000000000..a58746079fc58dbf79a085fe6272d269eb780ae2 Binary files /dev/null and b/content/zh/post/July/figures/20211210-c820f96b-328a-4d16-a45c-19cf5779a8a4.png differ diff --git a/content/zh/post/July/figures/20211210-d5e22e70-6156-4350-800e-2ffb897830dc.png b/content/zh/post/July/figures/20211210-d5e22e70-6156-4350-800e-2ffb897830dc.png new file mode 100644 index 0000000000000000000000000000000000000000..3f2bc8d0f67252362633f3c7ecbbdeb246c124be Binary files /dev/null and b/content/zh/post/July/figures/20211210-d5e22e70-6156-4350-800e-2ffb897830dc.png differ diff --git a/content/zh/post/July/figures/20211210-db3fb345-411f-4db3-8560-7a01dbecd324.png b/content/zh/post/July/figures/20211210-db3fb345-411f-4db3-8560-7a01dbecd324.png new file mode 100644 index 0000000000000000000000000000000000000000..0f79a9f9bc9242cdeae00213a317f00c8e650311 Binary files /dev/null and b/content/zh/post/July/figures/20211210-db3fb345-411f-4db3-8560-7a01dbecd324.png differ diff --git a/content/zh/post/July/figures/20211210-ea06bee8-b0b7-44f7-a656-f4e9eddea98e.png b/content/zh/post/July/figures/20211210-ea06bee8-b0b7-44f7-a656-f4e9eddea98e.png new file mode 100644 index 0000000000000000000000000000000000000000..40ff338d9a1adc919c66b5e09cc311b33aeb9ae3 Binary files /dev/null and b/content/zh/post/July/figures/20211210-ea06bee8-b0b7-44f7-a656-f4e9eddea98e.png differ diff --git a/content/zh/post/July/figures/22.png b/content/zh/post/July/figures/22.png new file mode 100644 index 0000000000000000000000000000000000000000..2ee69b361ddabaafde14efe3c10edb2d99a4c4a1 Binary files /dev/null and b/content/zh/post/July/figures/22.png differ diff --git a/content/zh/post/July/figures/3.png b/content/zh/post/July/figures/3.png new file mode 100644 index 0000000000000000000000000000000000000000..9297d988eee1a55e3040155ac7664bcdbafa23f2 Binary files /dev/null and b/content/zh/post/July/figures/3.png differ diff --git a/content/zh/post/July/figures/33.png b/content/zh/post/July/figures/33.png new file mode 100644 index 0000000000000000000000000000000000000000..82696999a02ab7e66b98029987edbf8ec4008d5e Binary files /dev/null and b/content/zh/post/July/figures/33.png differ diff --git a/content/zh/post/July/figures/4.png b/content/zh/post/July/figures/4.png new file mode 100644 index 0000000000000000000000000000000000000000..71c7d0bb360e00505c6cdbe3304c9d8e5a470a47 Binary files /dev/null and b/content/zh/post/July/figures/4.png differ diff --git a/content/zh/post/July/figures/5.png b/content/zh/post/July/figures/5.png new file mode 100644 index 0000000000000000000000000000000000000000..6aaa0f4d9d5560533f32a1f2aef057e8adbcc05f Binary files /dev/null and b/content/zh/post/July/figures/5.png differ diff --git "a/content/zh/post/July/figures/B-tree\347\264\242\345\274\225\345\210\206\350\243\202\350\277\207\347\250\213\344\270\255\347\273\223\346\236\204\345\217\230\345\214\226.png" "b/content/zh/post/July/figures/B-tree\347\264\242\345\274\225\345\210\206\350\243\202\350\277\207\347\250\213\344\270\255\347\273\223\346\236\204\345\217\230\345\214\226.png" new file mode 100644 index 0000000000000000000000000000000000000000..422e13d39046f695ee1bbca397ad47201dad96b9 Binary files /dev/null and "b/content/zh/post/July/figures/B-tree\347\264\242\345\274\225\345\210\206\350\243\202\350\277\207\347\250\213\344\270\255\347\273\223\346\236\204\345\217\230\345\214\226.png" differ diff --git "a/content/zh/post/July/figures/B-tree\347\264\242\345\274\225\347\273\223\346\236\204\345\267\256\345\274\202.png" "b/content/zh/post/July/figures/B-tree\347\264\242\345\274\225\347\273\223\346\236\204\345\267\256\345\274\202.png" new file mode 100644 index 0000000000000000000000000000000000000000..18f33a86bf6bf904b53bc366b3a372fb904d020d Binary files /dev/null and "b/content/zh/post/July/figures/B-tree\347\264\242\345\274\225\347\273\223\346\236\204\345\267\256\345\274\202.png" differ diff --git "a/content/zh/post/July/figures/B-tree\350\212\202\347\202\271\345\210\206\350\243\202.png" "b/content/zh/post/July/figures/B-tree\350\212\202\347\202\271\345\210\206\350\243\202.png" new file mode 100644 index 0000000000000000000000000000000000000000..d3540d395d4b2f64f357dea8ccdd205f6a394eef Binary files /dev/null and "b/content/zh/post/July/figures/B-tree\350\212\202\347\202\271\345\210\206\350\243\202.png" differ diff --git a/content/zh/post/July/figures/Consistent-Example.png b/content/zh/post/July/figures/Consistent-Example.png new file mode 100644 index 0000000000000000000000000000000000000000..ef57c70d015f38b564b5955ee7fd8e5b3999a663 Binary files /dev/null and b/content/zh/post/July/figures/Consistent-Example.png differ diff --git a/content/zh/post/July/figures/Consistent.png b/content/zh/post/July/figures/Consistent.png new file mode 100644 index 0000000000000000000000000000000000000000..dbccca2b7fd30dea3022b6737ef4ac45904f60bf Binary files /dev/null and b/content/zh/post/July/figures/Consistent.png differ diff --git "a/content/zh/post/July/figures/Gin-\347\264\242\345\274\225\347\273\223\346\236\204\347\244\272\346\204\217\345\233\276.png" "b/content/zh/post/July/figures/Gin-\347\264\242\345\274\225\347\273\223\346\236\204\347\244\272\346\204\217\345\233\276.png" new file mode 100644 index 0000000000000000000000000000000000000000..961eb26d228d46df1613dc3edc8be61c3f54d99b Binary files /dev/null and "b/content/zh/post/July/figures/Gin-\347\264\242\345\274\225\347\273\223\346\236\204\347\244\272\346\204\217\345\233\276.png" differ diff --git "a/content/zh/post/July/figures/Grafana\345\217\212Prometheus.png" "b/content/zh/post/July/figures/Grafana\345\217\212Prometheus.png" new file mode 100644 index 0000000000000000000000000000000000000000..94fd3dce80ec89c8bf5b261210fbd1acf334fe1c Binary files /dev/null and "b/content/zh/post/July/figures/Grafana\345\217\212Prometheus.png" differ diff --git "a/content/zh/post/July/figures/OpenGauss\344\270\200\344\270\273\344\270\200\345\244\207\344\270\200\347\272\247\345\256\211\350\243\2051.png" "b/content/zh/post/July/figures/OpenGauss\344\270\200\344\270\273\344\270\200\345\244\207\344\270\200\347\272\247\345\256\211\350\243\2051.png" new file mode 100644 index 0000000000000000000000000000000000000000..e29285fee750dfad47e153d43305fdaeaf67bab7 Binary files /dev/null and "b/content/zh/post/July/figures/OpenGauss\344\270\200\344\270\273\344\270\200\345\244\207\344\270\200\347\272\247\345\256\211\350\243\2051.png" differ diff --git "a/content/zh/post/July/figures/OpenGauss\344\270\200\344\270\273\344\270\200\345\244\207\344\270\200\347\272\247\345\256\211\350\243\2052.png" "b/content/zh/post/July/figures/OpenGauss\344\270\200\344\270\273\344\270\200\345\244\207\344\270\200\347\272\247\345\256\211\350\243\2052.png" new file mode 100644 index 0000000000000000000000000000000000000000..a13f816054f819aac318532ee5a7f97206731b06 Binary files /dev/null and "b/content/zh/post/July/figures/OpenGauss\344\270\200\344\270\273\344\270\200\345\244\207\344\270\200\347\272\247\345\256\211\350\243\2052.png" differ diff --git "a/content/zh/post/July/figures/OpenGauss\344\270\200\344\270\273\344\270\200\345\244\207\344\270\200\347\272\247\345\256\211\350\243\2053.png" "b/content/zh/post/July/figures/OpenGauss\344\270\200\344\270\273\344\270\200\345\244\207\344\270\200\347\272\247\345\256\211\350\243\2053.png" new file mode 100644 index 0000000000000000000000000000000000000000..e45da82ba86a18ee607dc60bb99cbfa1c8166d44 Binary files /dev/null and "b/content/zh/post/July/figures/OpenGauss\344\270\200\344\270\273\344\270\200\345\244\207\344\270\200\347\272\247\345\256\211\350\243\2053.png" differ diff --git "a/content/zh/post/July/figures/OpenGauss\344\270\200\344\270\273\344\270\200\345\244\207\344\270\200\347\272\247\345\256\211\350\243\2054.png" "b/content/zh/post/July/figures/OpenGauss\344\270\200\344\270\273\344\270\200\345\244\207\344\270\200\347\272\247\345\256\211\350\243\2054.png" new file mode 100644 index 0000000000000000000000000000000000000000..2efe08cbac989609d1ecbb14cc99f1840854254b Binary files /dev/null and "b/content/zh/post/July/figures/OpenGauss\344\270\200\344\270\273\344\270\200\345\244\207\344\270\200\347\272\247\345\256\211\350\243\2054.png" differ diff --git "a/content/zh/post/July/figures/OpenGauss\344\270\200\344\270\273\344\270\200\345\244\207\344\270\200\347\272\247\345\256\211\350\243\2055.png" "b/content/zh/post/July/figures/OpenGauss\344\270\200\344\270\273\344\270\200\345\244\207\344\270\200\347\272\247\345\256\211\350\243\2055.png" new file mode 100644 index 0000000000000000000000000000000000000000..b0fb9b118c3c28548dd0199114d4ac1e541596ad Binary files /dev/null and "b/content/zh/post/July/figures/OpenGauss\344\270\200\344\270\273\344\270\200\345\244\207\344\270\200\347\272\247\345\256\211\350\243\2055.png" differ diff --git a/content/zh/post/July/figures/ShardingSphere-Proxy.png b/content/zh/post/July/figures/ShardingSphere-Proxy.png new file mode 100644 index 0000000000000000000000000000000000000000..80679f8e91be9f9728e765304079b1eee0620b28 Binary files /dev/null and b/content/zh/post/July/figures/ShardingSphere-Proxy.png differ diff --git "a/content/zh/post/July/figures/html\345\267\241\346\243\200\347\273\223\346\236\234.png" "b/content/zh/post/July/figures/html\345\267\241\346\243\200\347\273\223\346\236\234.png" new file mode 100644 index 0000000000000000000000000000000000000000..ec9f8e277876e436a42ecae826b8cb3e46411325 Binary files /dev/null and "b/content/zh/post/July/figures/html\345\267\241\346\243\200\347\273\223\346\236\234.png" differ diff --git "a/content/zh/post/July/figures/html\345\267\241\346\243\200\347\273\223\346\236\2341.jpg" "b/content/zh/post/July/figures/html\345\267\241\346\243\200\347\273\223\346\236\2341.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..d4d4df2ac3d1a49c23fe13ae93aa430d5b18167f Binary files /dev/null and "b/content/zh/post/July/figures/html\345\267\241\346\243\200\347\273\223\346\236\2341.jpg" differ diff --git a/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_55c3c80c-e440-11eb-94c3-38f9d3cd240d.png b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_55c3c80c-e440-11eb-94c3-38f9d3cd240d.png new file mode 100644 index 0000000000000000000000000000000000000000..bb3a1ff8ebd160fc3bbc7de2bc5ac31b84786ede Binary files /dev/null and b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_55c3c80c-e440-11eb-94c3-38f9d3cd240d.png differ diff --git a/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_55ccb462-e440-11eb-94c3-38f9d3cd240d.png b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_55ccb462-e440-11eb-94c3-38f9d3cd240d.png new file mode 100644 index 0000000000000000000000000000000000000000..96ff660ca52398d92b2f6a2b27cb75fd43cf3ff5 Binary files /dev/null and b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_55ccb462-e440-11eb-94c3-38f9d3cd240d.png differ diff --git a/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_55df6abc-e440-11eb-94c3-38f9d3cd240d.png b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_55df6abc-e440-11eb-94c3-38f9d3cd240d.png new file mode 100644 index 0000000000000000000000000000000000000000..d0798fd9473b63404477ba8522676f8ab0f46ad9 Binary files /dev/null and b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_55df6abc-e440-11eb-94c3-38f9d3cd240d.png differ diff --git a/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_55ed0a50-e440-11eb-94c3-38f9d3cd240d.png b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_55ed0a50-e440-11eb-94c3-38f9d3cd240d.png new file mode 100644 index 0000000000000000000000000000000000000000..5b889e846317766fd54cd9c91c08709ebe9a1903 Binary files /dev/null and b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_55ed0a50-e440-11eb-94c3-38f9d3cd240d.png differ diff --git a/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_5600e228-e440-11eb-94c3-38f9d3cd240d.png b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_5600e228-e440-11eb-94c3-38f9d3cd240d.png new file mode 100644 index 0000000000000000000000000000000000000000..aa765bfdb40a2ba50e25d7d3c31e1834ca521afb Binary files /dev/null and b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_5600e228-e440-11eb-94c3-38f9d3cd240d.png differ diff --git a/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_560d1d68-e440-11eb-94c3-38f9d3cd240d.png b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_560d1d68-e440-11eb-94c3-38f9d3cd240d.png new file mode 100644 index 0000000000000000000000000000000000000000..67c061c5dc335ea299125127d62edce453ac20d5 Binary files /dev/null and b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_560d1d68-e440-11eb-94c3-38f9d3cd240d.png differ diff --git a/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_561e3044-e440-11eb-94c3-38f9d3cd240d.png b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_561e3044-e440-11eb-94c3-38f9d3cd240d.png new file mode 100644 index 0000000000000000000000000000000000000000..ddef421f22ac030a6a546cfe929ea46ff84e262d Binary files /dev/null and b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_561e3044-e440-11eb-94c3-38f9d3cd240d.png differ diff --git a/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_35b53f3a-e9d8-11eb-a08b-00163e068ecd.png b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_35b53f3a-e9d8-11eb-a08b-00163e068ecd.png new file mode 100644 index 0000000000000000000000000000000000000000..8646434c11d4ccd909af02592f53441a4631b7ff Binary files /dev/null and b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_35b53f3a-e9d8-11eb-a08b-00163e068ecd.png differ diff --git a/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_35d0ba44-e9d8-11eb-a08b-00163e068ecd.png b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_35d0ba44-e9d8-11eb-a08b-00163e068ecd.png new file mode 100644 index 0000000000000000000000000000000000000000..b49cb26a5018282d62e116bc28cdf68c1fdd7e97 Binary files /dev/null and b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_35d0ba44-e9d8-11eb-a08b-00163e068ecd.png differ diff --git a/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_35eab66a-e9d8-11eb-a08b-00163e068ecd.png b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_35eab66a-e9d8-11eb-a08b-00163e068ecd.png new file mode 100644 index 0000000000000000000000000000000000000000..3fb0092b9160d102628a8bfde102a5139444b48f Binary files /dev/null and b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_35eab66a-e9d8-11eb-a08b-00163e068ecd.png differ diff --git a/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_36030a76-e9d8-11eb-a08b-00163e068ecd.png b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_36030a76-e9d8-11eb-a08b-00163e068ecd.png new file mode 100644 index 0000000000000000000000000000000000000000..36de21294ad24f26dcacc56185db9d1fd6d3d545 Binary files /dev/null and b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_36030a76-e9d8-11eb-a08b-00163e068ecd.png differ diff --git a/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_361a3890-e9d8-11eb-a08b-00163e068ecd.png b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_361a3890-e9d8-11eb-a08b-00163e068ecd.png new file mode 100644 index 0000000000000000000000000000000000000000..fbeeed9b8345c422bb9ec3f355bd03c60ce2307c Binary files /dev/null and b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_361a3890-e9d8-11eb-a08b-00163e068ecd.png differ diff --git a/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_3632182a-e9d8-11eb-a08b-00163e068ecd.png b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_3632182a-e9d8-11eb-a08b-00163e068ecd.png new file mode 100644 index 0000000000000000000000000000000000000000..4af2456d10f5a70b6f4fea53e6be8fd5ba87c4fa Binary files /dev/null and b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_3632182a-e9d8-11eb-a08b-00163e068ecd.png differ diff --git a/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_367bf634-e9d8-11eb-a08b-00163e068ecd.png b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_367bf634-e9d8-11eb-a08b-00163e068ecd.png new file mode 100644 index 0000000000000000000000000000000000000000..e316994f8a907d4c03755b988f471a0eea33159b Binary files /dev/null and b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_367bf634-e9d8-11eb-a08b-00163e068ecd.png differ diff --git a/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_3695b43e-e9d8-11eb-a08b-00163e068ecd.png b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_3695b43e-e9d8-11eb-a08b-00163e068ecd.png new file mode 100644 index 0000000000000000000000000000000000000000..bed487a47ea3af70d630af8d0f0180a707f388a3 Binary files /dev/null and b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_3695b43e-e9d8-11eb-a08b-00163e068ecd.png differ diff --git a/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_36b659aa-e9d8-11eb-a08b-00163e068ecd.png b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_36b659aa-e9d8-11eb-a08b-00163e068ecd.png new file mode 100644 index 0000000000000000000000000000000000000000..6b4ca957eabdfb02dc70cbe63590489793ad5f62 Binary files /dev/null and b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_36b659aa-e9d8-11eb-a08b-00163e068ecd.png differ diff --git a/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_370122b4-e9d8-11eb-a08b-00163e068ecd.png b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_370122b4-e9d8-11eb-a08b-00163e068ecd.png new file mode 100644 index 0000000000000000000000000000000000000000..7f767eb2228d20a252fd7fc3996f6837be7fd5ce Binary files /dev/null and b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_370122b4-e9d8-11eb-a08b-00163e068ecd.png differ diff --git a/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_374c758e-e9d8-11eb-a08b-00163e068ecd.png b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_374c758e-e9d8-11eb-a08b-00163e068ecd.png new file mode 100644 index 0000000000000000000000000000000000000000..767e7f26c02976524b14d6f8a8325ac3cbd55651 Binary files /dev/null and b/content/zh/post/July/figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_374c758e-e9d8-11eb-a08b-00163e068ecd.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174039688.png b/content/zh/post/July/figures/zh-cn_image_0000001174039688.png new file mode 100644 index 0000000000000000000000000000000000000000..4fc659a111dd47a6f263b7f02df2c0d39c5154be Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174039688.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174039690.png b/content/zh/post/July/figures/zh-cn_image_0000001174039690.png new file mode 100644 index 0000000000000000000000000000000000000000..52a69e74fd7d5878ac0d23fdfc94f48fb695df01 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174039690.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174039692.png b/content/zh/post/July/figures/zh-cn_image_0000001174039692.png new file mode 100644 index 0000000000000000000000000000000000000000..ad388ddd5860c2fcb721883035d8dec46ed7a491 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174039692.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174039694.png b/content/zh/post/July/figures/zh-cn_image_0000001174039694.png new file mode 100644 index 0000000000000000000000000000000000000000..2a8e0db3fbd9555c5fbb44b48079b1c2f9e400fb Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174039694.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174039696.png b/content/zh/post/July/figures/zh-cn_image_0000001174039696.png new file mode 100644 index 0000000000000000000000000000000000000000..aa7a99dd6cc1c7bcfc5345d624fb3a7dc0412372 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174039696.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174039698.png b/content/zh/post/July/figures/zh-cn_image_0000001174039698.png new file mode 100644 index 0000000000000000000000000000000000000000..c44cb5b2fc983f0b7d64cdb3e93050133a183a37 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174039698.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174039700.png b/content/zh/post/July/figures/zh-cn_image_0000001174039700.png new file mode 100644 index 0000000000000000000000000000000000000000..81d916060b55b6c5c0963def6012ab464dccf204 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174039700.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174039702.png b/content/zh/post/July/figures/zh-cn_image_0000001174039702.png new file mode 100644 index 0000000000000000000000000000000000000000..a301a93ed6c4a4bc1b8a2c3a241a034d3b69549b Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174039702.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174199664.png b/content/zh/post/July/figures/zh-cn_image_0000001174199664.png new file mode 100644 index 0000000000000000000000000000000000000000..279a003910fad4b000c4a353547204327db8eac1 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174199664.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174199666.png b/content/zh/post/July/figures/zh-cn_image_0000001174199666.png new file mode 100644 index 0000000000000000000000000000000000000000..886e6281420dc9df1ee86f5e169911ab2ba3ea9d Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174199666.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174199668.png b/content/zh/post/July/figures/zh-cn_image_0000001174199668.png new file mode 100644 index 0000000000000000000000000000000000000000..5ed806e8bd9e2dd5531bfce2d69b78676eebe649 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174199668.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174199670.png b/content/zh/post/July/figures/zh-cn_image_0000001174199670.png new file mode 100644 index 0000000000000000000000000000000000000000..eba5b28bd23114f7f0572feaa4989df956575964 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174199670.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174199672.png b/content/zh/post/July/figures/zh-cn_image_0000001174199672.png new file mode 100644 index 0000000000000000000000000000000000000000..536ac1cbe9cfb40211adfe28de9ef78cd8fdf26f Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174199672.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174199674.png b/content/zh/post/July/figures/zh-cn_image_0000001174199674.png new file mode 100644 index 0000000000000000000000000000000000000000..07661a07cb1eef5be73484cc25a8f43dd07c90f3 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174199674.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174199676.png b/content/zh/post/July/figures/zh-cn_image_0000001174199676.png new file mode 100644 index 0000000000000000000000000000000000000000..6868f8a7bb5812cdc43c2ddb41aac002be2cbd00 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174199676.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174199678.png b/content/zh/post/July/figures/zh-cn_image_0000001174199678.png new file mode 100644 index 0000000000000000000000000000000000000000..c54619474eef462891b141d5b66b19066b6fc974 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174199678.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174358206.png b/content/zh/post/July/figures/zh-cn_image_0000001174358206.png new file mode 100644 index 0000000000000000000000000000000000000000..87585dddf7dba41708e562a038cf1ae293fdb65d Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174358206.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174358208.png b/content/zh/post/July/figures/zh-cn_image_0000001174358208.png new file mode 100644 index 0000000000000000000000000000000000000000..7961ff446ade514c1b99d0e19a92e327814ff704 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174358208.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174358212.png b/content/zh/post/July/figures/zh-cn_image_0000001174358212.png new file mode 100644 index 0000000000000000000000000000000000000000..811984caaee2d65ce25947af9969a090cf4b4de4 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174358212.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174358214.png b/content/zh/post/July/figures/zh-cn_image_0000001174358214.png new file mode 100644 index 0000000000000000000000000000000000000000..de562fe3fed563dc533fe2f509e4089eb6370a94 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174358214.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174358216.png b/content/zh/post/July/figures/zh-cn_image_0000001174358216.png new file mode 100644 index 0000000000000000000000000000000000000000..411d783a7ccbb44fd8d3864145df096c5416c7d0 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174358216.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174358218.png b/content/zh/post/July/figures/zh-cn_image_0000001174358218.png new file mode 100644 index 0000000000000000000000000000000000000000..023feeba8c8be39a68959b0cc8c2d156dc795709 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174358218.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174358220.png b/content/zh/post/July/figures/zh-cn_image_0000001174358220.png new file mode 100644 index 0000000000000000000000000000000000000000..3033d60794f1230cab454f4289b73d8485bc6e2c Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174358220.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174358222.png b/content/zh/post/July/figures/zh-cn_image_0000001174358222.png new file mode 100644 index 0000000000000000000000000000000000000000..3445aba8e234f18cf9eb3528a44edf0caa4901c3 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174358222.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174518146.png b/content/zh/post/July/figures/zh-cn_image_0000001174518146.png new file mode 100644 index 0000000000000000000000000000000000000000..7207f4ed1807599337352f7cc7e7e15916a86bf4 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174518146.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174518148.png b/content/zh/post/July/figures/zh-cn_image_0000001174518148.png new file mode 100644 index 0000000000000000000000000000000000000000..6f8a490cdae9acb2b952013cb6a08ee958d473a8 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174518148.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174518150.png b/content/zh/post/July/figures/zh-cn_image_0000001174518150.png new file mode 100644 index 0000000000000000000000000000000000000000..16a2b3d7dca140bd582eb5a8ceb71876f21e52bd Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174518150.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174518152.png b/content/zh/post/July/figures/zh-cn_image_0000001174518152.png new file mode 100644 index 0000000000000000000000000000000000000000..f8ab05250a2553b26e448c234648857934bed84e Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174518152.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174518154.png b/content/zh/post/July/figures/zh-cn_image_0000001174518154.png new file mode 100644 index 0000000000000000000000000000000000000000..83b2cb91ed1dbd714c232583b683d6ad2332e87f Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174518154.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174518156.png b/content/zh/post/July/figures/zh-cn_image_0000001174518156.png new file mode 100644 index 0000000000000000000000000000000000000000..98b1fba18bf045f05e504d546d76642de23dfed7 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174518156.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174518158.png b/content/zh/post/July/figures/zh-cn_image_0000001174518158.png new file mode 100644 index 0000000000000000000000000000000000000000..1fd16fac1397910c2f36cfbd0c3e0d73d7b3f30b Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174518158.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001174518160.png b/content/zh/post/July/figures/zh-cn_image_0000001174518160.png new file mode 100644 index 0000000000000000000000000000000000000000..99740350fdc13ec3c88f258c44c10fa136339ee8 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001174518160.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001186895808.png b/content/zh/post/July/figures/zh-cn_image_0000001186895808.png new file mode 100644 index 0000000000000000000000000000000000000000..19699cbfa89cf7bcbf06ff0e7943444c7136af0c Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001186895808.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001186895810.png b/content/zh/post/July/figures/zh-cn_image_0000001186895810.png new file mode 100644 index 0000000000000000000000000000000000000000..ebd9b6b5dd9da9e3f0913cc88269bb1ca24ea9fa Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001186895810.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001186895812.png b/content/zh/post/July/figures/zh-cn_image_0000001186895812.png new file mode 100644 index 0000000000000000000000000000000000000000..0b889426267401b410dc6f4f62e96cdf0599615e Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001186895812.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001186929314.png b/content/zh/post/July/figures/zh-cn_image_0000001186929314.png new file mode 100644 index 0000000000000000000000000000000000000000..cd80a77c0031ab4fa846a6bc414fc07a24819969 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001186929314.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001186929316.png b/content/zh/post/July/figures/zh-cn_image_0000001186929316.png new file mode 100644 index 0000000000000000000000000000000000000000..1d4e9b1f2045f7c69271e41064d450cf1618612f Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001186929316.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001186929318.png b/content/zh/post/July/figures/zh-cn_image_0000001186929318.png new file mode 100644 index 0000000000000000000000000000000000000000..9d90d619434aab21bac8d341bcaf7d729b86ca84 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001186929318.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001186929320.png b/content/zh/post/July/figures/zh-cn_image_0000001186929320.png new file mode 100644 index 0000000000000000000000000000000000000000..5f64389f0889e7ba3cb570515615fb3e6eba74d9 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001186929320.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001186931494.jpg b/content/zh/post/July/figures/zh-cn_image_0000001186931494.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6801f5376691cdd8aa20152b21a1d0f38371bc49 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001186931494.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001186931496.jpg b/content/zh/post/July/figures/zh-cn_image_0000001186931496.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5616e2b0469242632676c2bd51d7da9088adefe7 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001186931496.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001186931498.jpg b/content/zh/post/July/figures/zh-cn_image_0000001186931498.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d685749af895d931e31b77d7dc4cfc96792e02ed Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001186931498.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001186934098.png b/content/zh/post/July/figures/zh-cn_image_0000001186934098.png new file mode 100644 index 0000000000000000000000000000000000000000..7658c0b6a7e22f5090b524c305213f7ab9b69281 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001186934098.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001186934100.png b/content/zh/post/July/figures/zh-cn_image_0000001186934100.png new file mode 100644 index 0000000000000000000000000000000000000000..534ba87fbf789e7df43c5f775979639755157e57 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001186934100.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001186934102.png b/content/zh/post/July/figures/zh-cn_image_0000001186934102.png new file mode 100644 index 0000000000000000000000000000000000000000..bc4be3ae7b1c3c33805f4dd1d3ab66cab2326c75 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001186934102.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001186934104.png b/content/zh/post/July/figures/zh-cn_image_0000001186934104.png new file mode 100644 index 0000000000000000000000000000000000000000..f3ec8ce2ab0c1fe6f4cc0568dd13ab624c4fbdb1 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001186934104.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187055788.png b/content/zh/post/July/figures/zh-cn_image_0000001187055788.png new file mode 100644 index 0000000000000000000000000000000000000000..218b3f18950afd0b8e1c25e1cabad016f7f19fd5 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187055788.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187055790.png b/content/zh/post/July/figures/zh-cn_image_0000001187055790.png new file mode 100644 index 0000000000000000000000000000000000000000..9c4ef4bb15715d9a99e400dae5178b0e94e19f61 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187055790.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187055792.png b/content/zh/post/July/figures/zh-cn_image_0000001187055792.png new file mode 100644 index 0000000000000000000000000000000000000000..e3c7d79d57697481da3ce33ca16f731a66c818e9 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187055792.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187089296.png b/content/zh/post/July/figures/zh-cn_image_0000001187089296.png new file mode 100644 index 0000000000000000000000000000000000000000..bf9b3c7055195a57883d1368bc5f68c8077a1e9f Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187089296.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187089298.png b/content/zh/post/July/figures/zh-cn_image_0000001187089298.png new file mode 100644 index 0000000000000000000000000000000000000000..032a20f72e852cd6b69f2d6cfa56a3082814a04f Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187089298.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187089300.png b/content/zh/post/July/figures/zh-cn_image_0000001187089300.png new file mode 100644 index 0000000000000000000000000000000000000000..425c426a40e8d2188b8fe1c52888ba006f7e4acb Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187089300.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187089302.png b/content/zh/post/July/figures/zh-cn_image_0000001187089302.png new file mode 100644 index 0000000000000000000000000000000000000000..5163539bb9e73d0b4429423f843126d43f6f4d32 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187089302.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187091472.jpg b/content/zh/post/July/figures/zh-cn_image_0000001187091472.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3dca7820d4283df6236c01801507d0935931c532 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187091472.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187091474.jpg b/content/zh/post/July/figures/zh-cn_image_0000001187091474.jpg new file mode 100644 index 0000000000000000000000000000000000000000..37b8b30b8800c5ce59202fb93eaaa3a00168f586 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187091474.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187091476.jpg b/content/zh/post/July/figures/zh-cn_image_0000001187091476.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6c98dd1af235ce16cb6594d6f1b808df514c7e23 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187091476.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187094074.png b/content/zh/post/July/figures/zh-cn_image_0000001187094074.png new file mode 100644 index 0000000000000000000000000000000000000000..3c2b4609959a43568d44cc6e37a201fe85fcdbc6 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187094074.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187094076.png b/content/zh/post/July/figures/zh-cn_image_0000001187094076.png new file mode 100644 index 0000000000000000000000000000000000000000..9c53bf03c7c3816ce38156d1903a7eed230b7ab3 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187094076.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187094078.png b/content/zh/post/July/figures/zh-cn_image_0000001187094078.png new file mode 100644 index 0000000000000000000000000000000000000000..d50ef2a97115c555fce30e7fcc8f05e492215e78 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187094078.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187094080.jpg b/content/zh/post/July/figures/zh-cn_image_0000001187094080.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c57a8f98e9680a315200029a1d0ed166bc52caf4 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187094080.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187214316.jpg b/content/zh/post/July/figures/zh-cn_image_0000001187214316.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9eee1a366dba8089a57e57ca3c1a9688fed2234c Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187214316.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187214342.png b/content/zh/post/July/figures/zh-cn_image_0000001187214342.png new file mode 100644 index 0000000000000000000000000000000000000000..175873e897a65175da56d37fb75ec6c1017f40f3 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187214342.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187214344.png b/content/zh/post/July/figures/zh-cn_image_0000001187214344.png new file mode 100644 index 0000000000000000000000000000000000000000..3c81bc4ba1e82f6a28e9115b2e72549fdf81deed Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187214344.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187247856.png b/content/zh/post/July/figures/zh-cn_image_0000001187247856.png new file mode 100644 index 0000000000000000000000000000000000000000..3a37ed7fc4020a7759ac815b68e8c8464ea3107a Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187247856.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187247858.png b/content/zh/post/July/figures/zh-cn_image_0000001187247858.png new file mode 100644 index 0000000000000000000000000000000000000000..16907d1ea4dbc399091229b0de1c225656e6d49c Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187247858.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187247860.png b/content/zh/post/July/figures/zh-cn_image_0000001187247860.png new file mode 100644 index 0000000000000000000000000000000000000000..e72ff89addcc2462d0ba32a6df226cb47779f826 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187247860.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187247862.png b/content/zh/post/July/figures/zh-cn_image_0000001187247862.png new file mode 100644 index 0000000000000000000000000000000000000000..72184e5b7624c9e93c386364c89e6dcbd78f3fcc Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187247862.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187250036.jpg b/content/zh/post/July/figures/zh-cn_image_0000001187250036.jpg new file mode 100644 index 0000000000000000000000000000000000000000..91e8300005a3967924f6f7892671f52f13eb6d14 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187250036.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187250038.jpg b/content/zh/post/July/figures/zh-cn_image_0000001187250038.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c54452df47c058e93e7b0ca1bbef1119d4c61e5 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187250038.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187252632.png b/content/zh/post/July/figures/zh-cn_image_0000001187252632.png new file mode 100644 index 0000000000000000000000000000000000000000..90d7655f776d33446560c0208c874487fc57ac2b Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187252632.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187252634.png b/content/zh/post/July/figures/zh-cn_image_0000001187252634.png new file mode 100644 index 0000000000000000000000000000000000000000..43718f36708e3fc7b884c5cde66b1b4fe6455e8a Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187252634.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187252636.png b/content/zh/post/July/figures/zh-cn_image_0000001187252636.png new file mode 100644 index 0000000000000000000000000000000000000000..bcf3e5d6ed98b424b8381f9e84d7a0e37be79487 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187252636.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187252638.png b/content/zh/post/July/figures/zh-cn_image_0000001187252638.png new file mode 100644 index 0000000000000000000000000000000000000000..085490f6f6e67430066a80aa7f805615d753a003 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187252638.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187374254.png b/content/zh/post/July/figures/zh-cn_image_0000001187374254.png new file mode 100644 index 0000000000000000000000000000000000000000..6f8fa44c69b135cccae4f2813cac387e59f24957 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187374254.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187374256.png b/content/zh/post/July/figures/zh-cn_image_0000001187374256.png new file mode 100644 index 0000000000000000000000000000000000000000..65253af2db0782a33e1b2532cbe03f3cae78f61f Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187374256.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187407766.png b/content/zh/post/July/figures/zh-cn_image_0000001187407766.png new file mode 100644 index 0000000000000000000000000000000000000000..c9ff3bc808fc66fdb2b6cb60a23a087a085fcc51 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187407766.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187407768.png b/content/zh/post/July/figures/zh-cn_image_0000001187407768.png new file mode 100644 index 0000000000000000000000000000000000000000..1012d6129e99bab400952c3ee53325db1b0fb880 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187407768.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187407770.png b/content/zh/post/July/figures/zh-cn_image_0000001187407770.png new file mode 100644 index 0000000000000000000000000000000000000000..14d008014b9f36ee72393d5ebd98696b03648e8d Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187407770.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187407772.png b/content/zh/post/July/figures/zh-cn_image_0000001187407772.png new file mode 100644 index 0000000000000000000000000000000000000000..2a734646047f82c4c37dd488d7dbf23beae98a37 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187407772.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187409942.jpg b/content/zh/post/July/figures/zh-cn_image_0000001187409942.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0bb8777dc2f39ab45ec13c9a3c40ba83a915504a Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187409942.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187409944.jpg b/content/zh/post/July/figures/zh-cn_image_0000001187409944.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e912b16d33258d017a9857950f244e2237f3d364 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187409944.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187412548.png b/content/zh/post/July/figures/zh-cn_image_0000001187412548.png new file mode 100644 index 0000000000000000000000000000000000000000..1a68b92b41bee06ee7cb6fcb0b21edf1c6674efa Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187412548.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187412550.png b/content/zh/post/July/figures/zh-cn_image_0000001187412550.png new file mode 100644 index 0000000000000000000000000000000000000000..9de09762a48ef5d4ccfad571ad0e887ac8308e1b Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187412550.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187412552.jpg b/content/zh/post/July/figures/zh-cn_image_0000001187412552.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c3402ddda084fea05a88330452ed08eb51d5ce44 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187412552.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001187412554.jpg b/content/zh/post/July/figures/zh-cn_image_0000001187412554.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ecb4d812c8318a654fc028710f35b89562fc73f Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001187412554.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219319491.png b/content/zh/post/July/figures/zh-cn_image_0000001219319491.png new file mode 100644 index 0000000000000000000000000000000000000000..df00db2b1f13ad8ca2f5268fc4924cce917be7d5 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219319491.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219319493.png b/content/zh/post/July/figures/zh-cn_image_0000001219319493.png new file mode 100644 index 0000000000000000000000000000000000000000..5630c61c9eedf3fc1f05be2724833ac8cf8613f7 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219319493.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219319495.png b/content/zh/post/July/figures/zh-cn_image_0000001219319495.png new file mode 100644 index 0000000000000000000000000000000000000000..cb7ee33fc0fa52610103602acb5c91e3b0e37752 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219319495.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219319497.png b/content/zh/post/July/figures/zh-cn_image_0000001219319497.png new file mode 100644 index 0000000000000000000000000000000000000000..584af3aca4651fed477e741489287b7feecada5e Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219319497.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219319499.png b/content/zh/post/July/figures/zh-cn_image_0000001219319499.png new file mode 100644 index 0000000000000000000000000000000000000000..e13965bb8023e91aec11805af24d42c24eb82fc5 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219319499.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219319501.png b/content/zh/post/July/figures/zh-cn_image_0000001219319501.png new file mode 100644 index 0000000000000000000000000000000000000000..a6cf5bf3dfd044984f5e44d7790dfddd2624ccce Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219319501.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219319503.png b/content/zh/post/July/figures/zh-cn_image_0000001219319503.png new file mode 100644 index 0000000000000000000000000000000000000000..c03ed827af235feadc37f73b6d38f04d84423da1 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219319503.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219319505.png b/content/zh/post/July/figures/zh-cn_image_0000001219319505.png new file mode 100644 index 0000000000000000000000000000000000000000..cc4fcdb28459cb12d8bb0e0836e3d90f740a514e Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219319505.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219439443.png b/content/zh/post/July/figures/zh-cn_image_0000001219439443.png new file mode 100644 index 0000000000000000000000000000000000000000..c6272c91b73e6c366021a8c9896d075d4552a6be Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219439443.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219439445.png b/content/zh/post/July/figures/zh-cn_image_0000001219439445.png new file mode 100644 index 0000000000000000000000000000000000000000..2bb3506ee6c03b9b6bfb31a4a874b199a19ddf0d Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219439445.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219439447.png b/content/zh/post/July/figures/zh-cn_image_0000001219439447.png new file mode 100644 index 0000000000000000000000000000000000000000..e638038916c3976bf565a811649fb353ffb31046 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219439447.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219439453.png b/content/zh/post/July/figures/zh-cn_image_0000001219439453.png new file mode 100644 index 0000000000000000000000000000000000000000..b6287e2fcb9c12835e36379b56fc0fa9433d970d Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219439453.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219439455.png b/content/zh/post/July/figures/zh-cn_image_0000001219439455.png new file mode 100644 index 0000000000000000000000000000000000000000..112c9e9cbd12302f95f05ecea34c8ace15633cba Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219439455.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219439457.png b/content/zh/post/July/figures/zh-cn_image_0000001219439457.png new file mode 100644 index 0000000000000000000000000000000000000000..970f33c931a602a5f2b9a484900a2f6b7bcd6ed3 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219439457.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219439459.png b/content/zh/post/July/figures/zh-cn_image_0000001219439459.png new file mode 100644 index 0000000000000000000000000000000000000000..b3ddab26d7d62ebad1f127313d202b769dbdcc85 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219439459.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219439461.png b/content/zh/post/July/figures/zh-cn_image_0000001219439461.png new file mode 100644 index 0000000000000000000000000000000000000000..b22931f64f4f25bf47ecf7c87f1f014c118f9d65 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219439461.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219518015.png b/content/zh/post/July/figures/zh-cn_image_0000001219518015.png new file mode 100644 index 0000000000000000000000000000000000000000..1897599bcc5e8afe44ab47fae2c28ba7738cd3ec Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219518015.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219518017.png b/content/zh/post/July/figures/zh-cn_image_0000001219518017.png new file mode 100644 index 0000000000000000000000000000000000000000..ea03172849a35d7956c1cfcd6ade9e65c9a5682c Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219518017.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219518019.png b/content/zh/post/July/figures/zh-cn_image_0000001219518019.png new file mode 100644 index 0000000000000000000000000000000000000000..98c3a6321f78d2958019ecf0ff17ae5ae484e4d7 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219518019.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219518021.png b/content/zh/post/July/figures/zh-cn_image_0000001219518021.png new file mode 100644 index 0000000000000000000000000000000000000000..f8c950abe7a9debc1d7c7f86b775bd01011cedcd Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219518021.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219518023.png b/content/zh/post/July/figures/zh-cn_image_0000001219518023.png new file mode 100644 index 0000000000000000000000000000000000000000..99740350fdc13ec3c88f258c44c10fa136339ee8 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219518023.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219518025.png b/content/zh/post/July/figures/zh-cn_image_0000001219518025.png new file mode 100644 index 0000000000000000000000000000000000000000..4a23a3ef3dc9f6d4b659f496227bc59b15e6d7e4 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219518025.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219518027.png b/content/zh/post/July/figures/zh-cn_image_0000001219518027.png new file mode 100644 index 0000000000000000000000000000000000000000..a81dbdc7301f9cc5cf1c2702c927c18be9dadc88 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219518027.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219518029.png b/content/zh/post/July/figures/zh-cn_image_0000001219518029.png new file mode 100644 index 0000000000000000000000000000000000000000..ffbadb35d8741993f183205a2719de195bf0fd01 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219518029.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219597955.png b/content/zh/post/July/figures/zh-cn_image_0000001219597955.png new file mode 100644 index 0000000000000000000000000000000000000000..a90670096a83c0d600141089d594e68289f14a68 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219597955.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219597957.png b/content/zh/post/July/figures/zh-cn_image_0000001219597957.png new file mode 100644 index 0000000000000000000000000000000000000000..66bd5a9b406b08331221965c661153fcfa77a365 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219597957.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219597959.png b/content/zh/post/July/figures/zh-cn_image_0000001219597959.png new file mode 100644 index 0000000000000000000000000000000000000000..20269db75b69b1477a8cb98b9eb55d1dd57aadf8 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219597959.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219597961.png b/content/zh/post/July/figures/zh-cn_image_0000001219597961.png new file mode 100644 index 0000000000000000000000000000000000000000..acc4e991a582c2b457a41105cbf9642ce017de94 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219597961.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219597963.png b/content/zh/post/July/figures/zh-cn_image_0000001219597963.png new file mode 100644 index 0000000000000000000000000000000000000000..3069a9d9d9ed51dd4663f66637ee6a4691ca4fa8 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219597963.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219597965.png b/content/zh/post/July/figures/zh-cn_image_0000001219597965.png new file mode 100644 index 0000000000000000000000000000000000000000..6cb6665e5a41ad0a8735cff0796aca9cff801994 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219597965.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219597967.png b/content/zh/post/July/figures/zh-cn_image_0000001219597967.png new file mode 100644 index 0000000000000000000000000000000000000000..c618510265dd445790c474af2d75dd9dd26215bd Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219597967.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001219597969.png b/content/zh/post/July/figures/zh-cn_image_0000001219597969.png new file mode 100644 index 0000000000000000000000000000000000000000..4ce7a1350a875324db3d94c2da033a38a94ddbda Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001219597969.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232453865.png b/content/zh/post/July/figures/zh-cn_image_0000001232453865.png new file mode 100644 index 0000000000000000000000000000000000000000..34118f7c536393c08c89b63c02a7dd4866025be6 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232453865.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232453869.png b/content/zh/post/July/figures/zh-cn_image_0000001232453869.png new file mode 100644 index 0000000000000000000000000000000000000000..334e67fb878d8f04b620cf3a44235cae5f4bad52 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232453869.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232453871.png b/content/zh/post/July/figures/zh-cn_image_0000001232453871.png new file mode 100644 index 0000000000000000000000000000000000000000..6a8c152c8a2459d2b768ffe61702b5c2330f1c38 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232453871.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232487371.png b/content/zh/post/July/figures/zh-cn_image_0000001232487371.png new file mode 100644 index 0000000000000000000000000000000000000000..7e3ca797d9921a1911543cf398bf83958671a3e0 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232487371.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232487373.png b/content/zh/post/July/figures/zh-cn_image_0000001232487373.png new file mode 100644 index 0000000000000000000000000000000000000000..070920efeb1cee91e6ef924be8d4e2f49fed2957 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232487373.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232487375.png b/content/zh/post/July/figures/zh-cn_image_0000001232487375.png new file mode 100644 index 0000000000000000000000000000000000000000..f44aa7e6b6b6b2b98e75286973d8d18d45aaed70 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232487375.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232487377.png b/content/zh/post/July/figures/zh-cn_image_0000001232487377.png new file mode 100644 index 0000000000000000000000000000000000000000..60e4079b5e68cc5c3ea79879ff2dfb3b1a597db6 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232487377.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232489561.jpg b/content/zh/post/July/figures/zh-cn_image_0000001232489561.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c00c0544c06467ce1bac88454f6a2ad23baab64e Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232489561.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232489563.jpg b/content/zh/post/July/figures/zh-cn_image_0000001232489563.jpg new file mode 100644 index 0000000000000000000000000000000000000000..df9d43e6c2b574cb93d289f5ac977388de54dd22 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232489563.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232489565.jpg b/content/zh/post/July/figures/zh-cn_image_0000001232489565.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a8683f1102703171959d29df94cad6246c61123 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232489565.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232492169.png b/content/zh/post/July/figures/zh-cn_image_0000001232492169.png new file mode 100644 index 0000000000000000000000000000000000000000..72ba562bbb0c12b9effdf9a894d1ba5a3c4544ec Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232492169.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232492171.png b/content/zh/post/July/figures/zh-cn_image_0000001232492171.png new file mode 100644 index 0000000000000000000000000000000000000000..a09c2e1a6d54b5561a325bfe5235bc4d3a1fce29 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232492171.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232492173.png b/content/zh/post/July/figures/zh-cn_image_0000001232492173.png new file mode 100644 index 0000000000000000000000000000000000000000..e804d3c6fa50501494a82b825444fa79b9436ad8 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232492173.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232575381.png b/content/zh/post/July/figures/zh-cn_image_0000001232575381.png new file mode 100644 index 0000000000000000000000000000000000000000..a5040626c26861efcd2e6c235e9dd52d1834391b Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232575381.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232575383.png b/content/zh/post/July/figures/zh-cn_image_0000001232575383.png new file mode 100644 index 0000000000000000000000000000000000000000..8940ea73441986585bce32658be028583baba275 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232575383.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232575385.png b/content/zh/post/July/figures/zh-cn_image_0000001232575385.png new file mode 100644 index 0000000000000000000000000000000000000000..24ff263ef0c7aa03ead864737deae4c607e06f95 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232575385.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232608885.png b/content/zh/post/July/figures/zh-cn_image_0000001232608885.png new file mode 100644 index 0000000000000000000000000000000000000000..ed6ad3ce28b0dbc79dfd0d4ae06f3662e5029a50 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232608885.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232608887.png b/content/zh/post/July/figures/zh-cn_image_0000001232608887.png new file mode 100644 index 0000000000000000000000000000000000000000..c9ff3bc808fc66fdb2b6cb60a23a087a085fcc51 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232608887.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232608889.png b/content/zh/post/July/figures/zh-cn_image_0000001232608889.png new file mode 100644 index 0000000000000000000000000000000000000000..10c006126d56027148633c5fc4ae8f693d7b0c46 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232608889.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232608893.png b/content/zh/post/July/figures/zh-cn_image_0000001232608893.png new file mode 100644 index 0000000000000000000000000000000000000000..2603df9283239c6169861d6a9d3d3d53fa2e72e8 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232608893.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232611073.jpg b/content/zh/post/July/figures/zh-cn_image_0000001232611073.jpg new file mode 100644 index 0000000000000000000000000000000000000000..999b92aaf0ea0bccb699074a521bc2812d210f89 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232611073.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232611075.jpg b/content/zh/post/July/figures/zh-cn_image_0000001232611075.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8fc3ff2963e953877498427e23d057cc96acc831 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232611075.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232611077.jpg b/content/zh/post/July/figures/zh-cn_image_0000001232611077.jpg new file mode 100644 index 0000000000000000000000000000000000000000..408046c6bb907773c068d42879b19add5733b0d6 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232611077.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232613679.png b/content/zh/post/July/figures/zh-cn_image_0000001232613679.png new file mode 100644 index 0000000000000000000000000000000000000000..7707934d1ced99560a641c176a41424762efd751 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232613679.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232613681.png b/content/zh/post/July/figures/zh-cn_image_0000001232613681.png new file mode 100644 index 0000000000000000000000000000000000000000..28ef48fde2eaae9088cc60ac2ae63369dcad0e5e Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232613681.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232613683.png b/content/zh/post/July/figures/zh-cn_image_0000001232613683.png new file mode 100644 index 0000000000000000000000000000000000000000..97e5d37d37000647a2b138eee120d4bf733b65ff Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232613683.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232693943.png b/content/zh/post/July/figures/zh-cn_image_0000001232693943.png new file mode 100644 index 0000000000000000000000000000000000000000..8eda3fefc3bf0606f5d0f1fb0a7bea5f88e5dbc0 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232693943.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232693945.png b/content/zh/post/July/figures/zh-cn_image_0000001232693945.png new file mode 100644 index 0000000000000000000000000000000000000000..b602ff73632c9e2f3e65e0aa95fd3fb74b49626e Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232693945.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232693947.png b/content/zh/post/July/figures/zh-cn_image_0000001232693947.png new file mode 100644 index 0000000000000000000000000000000000000000..fe0974ab9af9dcf7415bd8aa730d3ddf1dbac2b2 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232693947.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232727451.png b/content/zh/post/July/figures/zh-cn_image_0000001232727451.png new file mode 100644 index 0000000000000000000000000000000000000000..3b3cc2bec96b29559d091c1167c51fe605e03643 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232727451.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232727453.png b/content/zh/post/July/figures/zh-cn_image_0000001232727453.png new file mode 100644 index 0000000000000000000000000000000000000000..40581b4ca6265776bfda3d850988fe5ecd704cd3 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232727453.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232727455.png b/content/zh/post/July/figures/zh-cn_image_0000001232727455.png new file mode 100644 index 0000000000000000000000000000000000000000..384120d038fc90889cee0ca33ceacad7bbc0689e Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232727455.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232727457.png b/content/zh/post/July/figures/zh-cn_image_0000001232727457.png new file mode 100644 index 0000000000000000000000000000000000000000..202113126a174611ef0385eb61d6618bbaccde1f Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232727457.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232729629.jpg b/content/zh/post/July/figures/zh-cn_image_0000001232729629.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b507d4f936a99b2d50f06e725314e739d6b6cec8 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232729629.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232729631.jpg b/content/zh/post/July/figures/zh-cn_image_0000001232729631.jpg new file mode 100644 index 0000000000000000000000000000000000000000..73359803032b8f52f4a05ce151cf22e3e75034ae Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232729631.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232729635.jpg b/content/zh/post/July/figures/zh-cn_image_0000001232729635.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a1f2c5bdc5cb3fda73ab5dfa1cf40efdb850a84 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232729635.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232732233.png b/content/zh/post/July/figures/zh-cn_image_0000001232732233.png new file mode 100644 index 0000000000000000000000000000000000000000..cf2192fd3e6dceddc8cf39f8790109b83840dc67 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232732233.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232732235.png b/content/zh/post/July/figures/zh-cn_image_0000001232732235.png new file mode 100644 index 0000000000000000000000000000000000000000..8b6e0f3b4a77e3409bbc6606a1b1fefcc954730a Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232732235.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232732237.png b/content/zh/post/July/figures/zh-cn_image_0000001232732237.png new file mode 100644 index 0000000000000000000000000000000000000000..70c354c2e4e88ef8874ac5b1e74ca85fca594c00 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232732237.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232732239.png b/content/zh/post/July/figures/zh-cn_image_0000001232732239.png new file mode 100644 index 0000000000000000000000000000000000000000..32db8b3eb9ca67d7b4b08b22e35bd0d10ade3f87 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232732239.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232775435.png b/content/zh/post/July/figures/zh-cn_image_0000001232775435.png new file mode 100644 index 0000000000000000000000000000000000000000..b95773841735cd46c5ea0ed6824f713e87257904 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232775435.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232775437.png b/content/zh/post/July/figures/zh-cn_image_0000001232775437.png new file mode 100644 index 0000000000000000000000000000000000000000..222e0187a77c723bab367323cd8513d721b2edb0 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232775437.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232775439.png b/content/zh/post/July/figures/zh-cn_image_0000001232775439.png new file mode 100644 index 0000000000000000000000000000000000000000..6cacb65aa73b09c102b64498a0a838a754f5081d Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232775439.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232808939.png b/content/zh/post/July/figures/zh-cn_image_0000001232808939.png new file mode 100644 index 0000000000000000000000000000000000000000..7e18dcd6ce559ecd0b28952466e9d75d748ab29d Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232808939.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232808941.png b/content/zh/post/July/figures/zh-cn_image_0000001232808941.png new file mode 100644 index 0000000000000000000000000000000000000000..7f2e5cf573c745cade4f2b3f893b0b812b69769b Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232808941.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232808943.png b/content/zh/post/July/figures/zh-cn_image_0000001232808943.png new file mode 100644 index 0000000000000000000000000000000000000000..12d31cf6cdd2c610de9b77a118150f9a6da8157c Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232808943.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232808945.png b/content/zh/post/July/figures/zh-cn_image_0000001232808945.png new file mode 100644 index 0000000000000000000000000000000000000000..fdc0bd2d3ce866b182429abde37b288bd0fc4ec1 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232808945.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232811117.jpg b/content/zh/post/July/figures/zh-cn_image_0000001232811117.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bda5eb2f1e15337e615d325594a9a85143407dd8 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232811117.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232811119.jpg b/content/zh/post/July/figures/zh-cn_image_0000001232811119.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a5f019396fe02eaa92c588bef9db4a3663394166 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232811119.jpg differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232811121.png b/content/zh/post/July/figures/zh-cn_image_0000001232811121.png new file mode 100644 index 0000000000000000000000000000000000000000..286652e99fe8c8386b090f583c6e3d5f63a955fe Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232811121.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232813719.png b/content/zh/post/July/figures/zh-cn_image_0000001232813719.png new file mode 100644 index 0000000000000000000000000000000000000000..893a72c2a769bd679e2ed89e5439337aeda4e5a5 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232813719.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232813721.png b/content/zh/post/July/figures/zh-cn_image_0000001232813721.png new file mode 100644 index 0000000000000000000000000000000000000000..c4b947667cfcfa35f0add20131c6e5de8a0cc725 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232813721.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232813723.png b/content/zh/post/July/figures/zh-cn_image_0000001232813723.png new file mode 100644 index 0000000000000000000000000000000000000000..c13ba08bf50dfd7aec537cc14110b8e331d19fda Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232813723.png differ diff --git a/content/zh/post/July/figures/zh-cn_image_0000001232813725.png b/content/zh/post/July/figures/zh-cn_image_0000001232813725.png new file mode 100644 index 0000000000000000000000000000000000000000..a6323041758020f1d108163ee2ce00e08b0c1ae2 Binary files /dev/null and b/content/zh/post/July/figures/zh-cn_image_0000001232813725.png differ diff --git "a/content/zh/post/July/figures/\344\270\211\347\247\215\347\261\273\345\236\213\347\232\204\346\217\222\345\205\245.png" "b/content/zh/post/July/figures/\344\270\211\347\247\215\347\261\273\345\236\213\347\232\204\346\217\222\345\205\245.png" new file mode 100644 index 0000000000000000000000000000000000000000..a7ea11dd0417651d5f1b35503b910e77a750de46 Binary files /dev/null and "b/content/zh/post/July/figures/\344\270\211\347\247\215\347\261\273\345\236\213\347\232\204\346\217\222\345\205\245.png" differ diff --git "a/content/zh/post/July/figures/\344\270\212\350\277\260\344\270\244\344\270\252\345\233\240\347\264\240\345\257\271\346\200\247\350\203\275\347\232\204\345\275\261\345\223\215\345\246\202\344\270\212\345\233\276\346\211\200\347\244\272.png" "b/content/zh/post/July/figures/\344\270\212\350\277\260\344\270\244\344\270\252\345\233\240\347\264\240\345\257\271\346\200\247\350\203\275\347\232\204\345\275\261\345\223\215\345\246\202\344\270\212\345\233\276\346\211\200\347\244\272.png" new file mode 100644 index 0000000000000000000000000000000000000000..93b4a4bcfccbf006462f921f7539988175e55c19 Binary files /dev/null and "b/content/zh/post/July/figures/\344\270\212\350\277\260\344\270\244\344\270\252\345\233\240\347\264\240\345\257\271\346\200\247\350\203\275\347\232\204\345\275\261\345\223\215\345\246\202\344\270\212\345\233\276\346\211\200\347\244\272.png" differ diff --git "a/content/zh/post/July/figures/\344\270\244\346\254\241put\346\223\215\344\275\234\345\257\271B-tree\347\273\223\346\236\204\347\232\204\344\277\256\346\224\271.png" "b/content/zh/post/July/figures/\344\270\244\346\254\241put\346\223\215\344\275\234\345\257\271B-tree\347\273\223\346\236\204\347\232\204\344\277\256\346\224\271.png" new file mode 100644 index 0000000000000000000000000000000000000000..e819c8fed64ab0778c54484e0c76e59757c2f0a6 Binary files /dev/null and "b/content/zh/post/July/figures/\344\270\244\346\254\241put\346\223\215\344\275\234\345\257\271B-tree\347\273\223\346\236\204\347\232\204\344\277\256\346\224\271.png" differ diff --git "a/content/zh/post/July/figures/\345\267\241\346\243\200\350\204\232\346\234\254\344\270\272\347\272\257SQL\350\204\232\346\234\254\345\274\200\345\217\221.png" "b/content/zh/post/July/figures/\345\267\241\346\243\200\350\204\232\346\234\254\344\270\272\347\272\257SQL\350\204\232\346\234\254\345\274\200\345\217\221.png" new file mode 100644 index 0000000000000000000000000000000000000000..6673fd24df4aab137a3862153222decaac3212ef Binary files /dev/null and "b/content/zh/post/July/figures/\345\267\241\346\243\200\350\204\232\346\234\254\344\270\272\347\272\257SQL\350\204\232\346\234\254\345\274\200\345\217\221.png" differ diff --git "a/content/zh/post/July/figures/\346\225\260\346\215\256\345\272\223\344\270\255\346\220\234\347\264\242\346\240\221\347\244\272\346\204\217\345\233\276.png" "b/content/zh/post/July/figures/\346\225\260\346\215\256\345\272\223\344\270\255\346\220\234\347\264\242\346\240\221\347\244\272\346\204\217\345\233\276.png" new file mode 100644 index 0000000000000000000000000000000000000000..95daf1ae2b16eb199a668fc783e3db23ee9f58cd Binary files /dev/null and "b/content/zh/post/July/figures/\346\225\260\346\215\256\345\272\223\344\270\255\346\220\234\347\264\242\346\240\221\347\244\272\346\204\217\345\233\276.png" differ diff --git "a/content/zh/post/July/figures/\346\230\237\350\257\204\345\242\236\351\225\277\346\227\266\351\227\264\347\272\277.png" "b/content/zh/post/July/figures/\346\230\237\350\257\204\345\242\236\351\225\277\346\227\266\351\227\264\347\272\277.png" new file mode 100644 index 0000000000000000000000000000000000000000..62261d866717bec4c02c97c774496c51e016bf45 Binary files /dev/null and "b/content/zh/post/July/figures/\346\230\237\350\257\204\345\242\236\351\225\277\346\227\266\351\227\264\347\272\277.png" differ diff --git "a/content/zh/post/July/figures/\346\234\200\347\273\210\346\217\222\345\205\245\345\244\261\350\264\245.png" "b/content/zh/post/July/figures/\346\234\200\347\273\210\346\217\222\345\205\245\345\244\261\350\264\245.png" new file mode 100644 index 0000000000000000000000000000000000000000..8c1dd1aa32dc2386f0bd08aeb9e64aacf8b475c3 Binary files /dev/null and "b/content/zh/post/July/figures/\346\234\200\347\273\210\346\217\222\345\205\245\345\244\261\350\264\245.png" differ diff --git "a/content/zh/post/July/figures/\346\236\266\346\236\204\345\233\276.png" "b/content/zh/post/July/figures/\346\236\266\346\236\204\345\233\276.png" new file mode 100644 index 0000000000000000000000000000000000000000..3ed2e7f9abb49f469171f56684adc3f30e63f578 Binary files /dev/null and "b/content/zh/post/July/figures/\346\236\266\346\236\204\345\233\276.png" differ diff --git "a/content/zh/post/July/figures/\346\236\266\346\236\204\345\233\2763.png" "b/content/zh/post/July/figures/\346\236\266\346\236\204\345\233\2763.png" new file mode 100644 index 0000000000000000000000000000000000000000..3a9e542dea606a60a7e87b0d1c72d82ad9972311 Binary files /dev/null and "b/content/zh/post/July/figures/\346\236\266\346\236\204\345\233\2763.png" differ diff --git "a/content/zh/post/July/figures/\346\237\245\346\211\276\345\257\271\345\272\224\347\232\204\345\244\204\347\220\206\345\207\275\346\225\260.png" "b/content/zh/post/July/figures/\346\237\245\346\211\276\345\257\271\345\272\224\347\232\204\345\244\204\347\220\206\345\207\275\346\225\260.png" new file mode 100644 index 0000000000000000000000000000000000000000..9f7f39211fe3acb525d6cbe50b22425bfb6e1517 Binary files /dev/null and "b/content/zh/post/July/figures/\346\237\245\346\211\276\345\257\271\345\272\224\347\232\204\345\244\204\347\220\206\345\207\275\346\225\260.png" differ diff --git "a/content/zh/post/July/figures/\346\240\271\346\215\256\344\273\243\344\273\267\345\207\275\346\225\260\350\256\241\347\256\227\350\247\204\345\210\231.png" "b/content/zh/post/July/figures/\346\240\271\346\215\256\344\273\243\344\273\267\345\207\275\346\225\260\350\256\241\347\256\227\350\247\204\345\210\231.png" new file mode 100644 index 0000000000000000000000000000000000000000..ac18cef2d52afb2d30cc949c37ee235d756efda4 Binary files /dev/null and "b/content/zh/post/July/figures/\346\240\271\346\215\256\344\273\243\344\273\267\345\207\275\346\225\260\350\256\241\347\256\227\350\247\204\345\210\231.png" differ diff --git "a/content/zh/post/July/figures/\350\200\214\345\210\260\345\217\266\345\255\220\350\212\202\347\202\271\345\210\244\346\226\255\346\227\266.png" "b/content/zh/post/July/figures/\350\200\214\345\210\260\345\217\266\345\255\220\350\212\202\347\202\271\345\210\244\346\226\255\346\227\266.png" new file mode 100644 index 0000000000000000000000000000000000000000..ef57c70d015f38b564b5955ee7fd8e5b3999a663 Binary files /dev/null and "b/content/zh/post/July/figures/\350\200\214\345\210\260\345\217\266\345\255\220\350\212\202\347\202\271\345\210\244\346\226\255\346\227\266.png" differ diff --git "a/content/zh/post/July/figures/\350\212\202\347\202\271\345\210\206\350\243\202\345\211\215\345\220\216\346\237\245\346\211\276\350\267\257\345\276\204\347\232\204\345\267\256\345\274\202(a).png" "b/content/zh/post/July/figures/\350\212\202\347\202\271\345\210\206\350\243\202\345\211\215\345\220\216\346\237\245\346\211\276\350\267\257\345\276\204\347\232\204\345\267\256\345\274\202(a).png" new file mode 100644 index 0000000000000000000000000000000000000000..7d2f9af58c3dc979944c99994c56bc3c5a94b19d Binary files /dev/null and "b/content/zh/post/July/figures/\350\212\202\347\202\271\345\210\206\350\243\202\345\211\215\345\220\216\346\237\245\346\211\276\350\267\257\345\276\204\347\232\204\345\267\256\345\274\202(a).png" differ diff --git "a/content/zh/post/July/figures/\350\212\202\347\202\271\345\210\206\350\243\202\345\211\215\345\220\216\346\237\245\346\211\276\350\267\257\345\276\204\347\232\204\345\267\256\345\274\202\357\274\210b).png" "b/content/zh/post/July/figures/\350\212\202\347\202\271\345\210\206\350\243\202\345\211\215\345\220\216\346\237\245\346\211\276\350\267\257\345\276\204\347\232\204\345\267\256\345\274\202\357\274\210b).png" new file mode 100644 index 0000000000000000000000000000000000000000..dcaa46439af6ed335cbc88067416fbf09240429d Binary files /dev/null and "b/content/zh/post/July/figures/\350\212\202\347\202\271\345\210\206\350\243\202\345\211\215\345\220\216\346\237\245\346\211\276\350\267\257\345\276\204\347\232\204\345\267\256\345\274\202\357\274\210b).png" differ diff --git "a/content/zh/post/July/figures/\350\264\241\347\214\256\350\200\205\345\242\236\351\225\277\346\227\266\351\227\264\347\272\277.png" "b/content/zh/post/July/figures/\350\264\241\347\214\256\350\200\205\345\242\236\351\225\277\346\227\266\351\227\264\347\272\277.png" new file mode 100644 index 0000000000000000000000000000000000000000..9382a722a8b8a3f6e6ec056ad77ffdaa80f8398d Binary files /dev/null and "b/content/zh/post/July/figures/\350\264\241\347\214\256\350\200\205\345\242\236\351\225\277\346\227\266\351\227\264\347\272\277.png" differ diff --git "a/content/zh/post/July/figures/\351\200\232\350\277\207\346\237\245\350\257\242-pg_opclass-\345\217\257\344\273\245\346\237\245\345\210\260.png" "b/content/zh/post/July/figures/\351\200\232\350\277\207\346\237\245\350\257\242-pg_opclass-\345\217\257\344\273\245\346\237\245\345\210\260.png" new file mode 100644 index 0000000000000000000000000000000000000000..96a2c24adb30baef4a6d186d88249ac3d76af2e0 Binary files /dev/null and "b/content/zh/post/July/figures/\351\200\232\350\277\207\346\237\245\350\257\242-pg_opclass-\345\217\257\344\273\245\346\237\245\345\210\260.png" differ diff --git "a/content/zh/post/July/openGauss-2-1-0-\351\227\252\345\233\236\347\211\271\346\200\247.md" "b/content/zh/post/July/openGauss-2-1-0-\351\227\252\345\233\236\347\211\271\346\200\247.md" new file mode 100644 index 0000000000000000000000000000000000000000..934659788f057518248a72b27943babb2e9cb4ca --- /dev/null +++ "b/content/zh/post/July/openGauss-2-1-0-\351\227\252\345\233\236\347\211\271\346\200\247.md" @@ -0,0 +1,255 @@ ++++ + +title = "openGauss 2.1.0 闪回特性" + +date = "2021-10-21" + +tags = [ "openGauss 2.1.0 闪回特性"] + +archives = "2021-10" + +author = "贾军锋" + +summary = " openGauss 2.1.0 闪回特性" + +img = "/zh/post/July/title/img11.png" + +times = "12:30" + ++++ + + + +# openGauss 2.1.0 闪回特性 + + + +openGauss 2.1.0于2021年9月30日发布,是openGauss的一个Preview版本,该版本生命周期仅为半年。该版本的新增功能如下: + +- 存储过程兼容性增强 +- SQL引擎能力增强 +- 支持Ustore存储引擎 +- 支持段页式存储 +- 基于Paxos分布式一致性协议的高可用 +- AI4DB和DB4AI竞争力持续构筑 +- 日志框架及错误码整改 +- JDBC客户端负载均衡及读写分离 +- 支持cmake脚本编译 +- 列存表支持主键唯一键约束 +- 支持jsonb数据类型 +- 支持unique sql自动淘汰 +- UCE故障感知 +- 支持GB18030字符集 +- 备机catch优化 +- 客户端工具gsql支持readline命令自动补齐 +- 动态数据脱敏 +- 支持国密算法 +- 防篡改账本数据库 +- 内置角色和权限管理机制 +- 透明加密 +- 全密态数据库增强 +- 支持dblink +- 支持Ubuntu系统 +- 支持Hash索引 +- upsert支持子查询 +- min/max函数支持ip地址类型 +- 增加array\_remove/array\_replace/first/last函数 +- Data Studio客户端工具适配内核特性 + +虽然以上官方文档中描述的新增特性中并没有提及闪回特性,但在《管理员指南》中已经明确提及该特性的使用方法。 + +闪回恢复其实是利用回收站的闪回恢复删除的表。利用MVCC机制闪回恢复到指定时间点或者CSN点\(commit sequence number\)。 + +闪回技术能够有选择性的高效撤销一个已提交事务的影响,从人为错误中恢复。在采用闪回技术之前,只能通过备份恢复、PITR等手段找回已提交的数据库修改,恢复时长需要数分钟甚至数小时。采用闪回技术后,恢复已提交的数据库修改前的数据,只需要秒级,而且恢复时间和数据库大小无关。 + +**闪回恢复适用于:** + +- 误删除表的场景; +- 需要将表中的数据恢复到指定时间点或者CSN。 + +**闪回支持两种恢复模式:** + +- 基于MVCC多版本的数据恢复:适用于误删除、误更新、误插入数据的查询和恢复,用户通过配置旧版本保留时间,并执行相应的查询或恢复命令,查询或恢复到指定的时间点或CSN点。 +- 基于类似windows系统回收站的恢复:适用于误DROP、误TRUNCATE的表的恢复。用户通过配置回收站开关,并执行相应的恢复命令,可以将误DROP、误TRUNCATE的表找回。 + +**重要提示:** + +遗憾的是,官方文档关于闪回恢复的前提条件并没有描述到位,导致初次接触该功能的小伙伴有些茫然\(我也是\),无法复现闪回恢复的特性操作。这里,需要向大家明确的是:关于openGauss的闪回,仅支持Ustore存储引擎\(和Oracle一样,闪回的数据存储在UNDO表空间\),也就是说,我们需要创建Ustore存储引擎的表才可以使用openGauss的闪回功能。 + +下面我们来看看openGauss的闪回测试。 + +## 一、创建测试数据 + +- 1. 设置Ustore闪回相关参数 + + ``` + gs_guc set -N all -I all -c "undo_zone_count=16384" ## 内存中可分配的undo zone数量,0代表禁用undo和Ustore表,建议取值为max_connections*4 + gs_guc set -N all -I all -c "enable_default_ustore_table=on" ## 开启默认支持Ustore存储引擎 + gs_guc set -N all -I all -c "version_retention_age=10000" ## 旧版本保留的事务数,超过该事务数的旧版本将被回收清理 + gs_guc set -N all -I all -c "enable_recyclebin=on" ## 打开回收站 + gs_guc set -N all -I all -c "recyclebin_retention_time=15min" ## 置回收站对象保留时间,超过该时间的回收站对象将被自动清理 + gs_om -t restart + ``` + +- 2. 创建测试表 + + ``` + gsql -d postgres -p 26000 -r + openGauss=# create table t1(a int,b int,c int,d int); + openGauss=# insert into t1 values(1,2,3,4),(21,22,23,24),(31,32,33,34); + openGauss=# select * from t1; + a | b | c | d + ----+----+----+---- + 1 | 2 | 3 | 4 + 21 | 22 | 23 | 24 + 31 | 32 | 33 | 34 + openGauss=# \d+ t1 + Table "public.t1" + Column | Type | Modifiers | Storage | Stats target | Description + --------+---------+-----------+---------+--------------+------------- + a | integer | | plain | | + b | integer | | plain | | + c | integer | | plain | | + d | integer | | plain | | + Has OIDs: no + Options: orientation=row, compression=no, storage_type=USTORE + ``` + + +## 二、闪回查询 + +闪回查询可以查询过去某个时间点表的某个snapshot数据,这一特性可用于查看和逻辑重建意外删除或更改的受损数据。闪回查询基于MVCC多版本机制,通过检索查询旧版本,获取指定老版本数据。 + +示例: + +1. 更新元组 + +``` +openGauss=# select current_timestamp; + pg_systimestamp +------------------------------ + 2021-10-12 10:03:08.272344+08 + +openGauss=# update t1 set a=99; +openGauss=# select * from t1; + a | b | c | d +----+----+----+---- + 99 | 2 | 3 | 4 + 99 | 22 | 23 | 24 + 99 | 32 | 33 | 34 +``` + +2. 查询timestamp对应的CSN + +``` +openGauss=# select snptime,snpcsn from gs_txn_snapshot + where snptime between '2021-10-12 10:03:05.272344+08' and '2021-10-12 10:03:18.272344+08'; + snptime | snpcsn +-------------------------------+-------- + 2021-10-12 10:03:07.583368+08 | 2213 + 2021-10-12 10:03:10.595467+08 | 2214 + 2021-10-12 10:03:13.606675+08 | 2215 + 2021-10-12 10:03:16.619061+08 | 2216 +``` + +3. 执行闪回查询命令,查看闪回结果 + +- 基于timestamp的闪回查询 + + ``` + select * from t1 timecapsule timestamp to_timestamp('2021-10-12 10:03:08.272344','YYYY-MM-DD HH24:MI:SS.FF'); + a | b | c | d + ----+----+----+---- + 1 | 2 | 3 | 4 + 21 | 22 | 23 | 24 + 31 | 32 | 33 | 34 + ``` + +- 基于CSN的闪回查询 + + ``` + select * from t1 timecapsule csn 2213; + a | b | c | d + ----+----+----+---- + 1 | 2 | 3 | 4 + 21 | 22 | 23 | 24 + 31 | 32 | 33 | 34 + ``` + + +说明: + +- TIMESTAMP参数:指要查询某个表在TIMESTAMP这个时间点上的数据,TIMESTAMP指一个具体的历史时间。 +- CSN参数:CSN是一个逻辑提交时间点,数据库中的CSN是一个写一致性点,查询某个CSN下的数据代表SQL查询数据库在该一致性点的相关数据。 + +## 三、回收站 + +在拥有回收站之前,当用户误将表drop或truncate后,只能使用全库备份恢复的方式来解决这种逻辑错误。 + +在openGauss 2.1.0版本中,引入了回收站功能,用户通过该功能可以从回收站中闪回TRUNCATE或DROP的表对象,将数据恢复到错误操作前,大大提高了用户数据的可靠性。 + +- 闪回drop: 可以恢复意外删除的表,从回收站\(recyclebin\)中恢复被删除的表及其附属结构如索引、表约束等。闪回drop是基于回收站机制,通过还原回收站中记录的表的物理文件,实现已drop表的恢复。 +- 闪回truncate: 可以恢复误操作或意外被进行truncate的表,从回收站中恢复被truncate的表及索引的物理数据。闪回truncate基于回收站机制,通过还原回收站中记录的表的物理文件,实现已truncate表的恢复。 + +官方文档没有强调到的坑需要注意:recyclebin不支持Ustore,只支持Astore\[详见下面测试示例\] – 需要找开发确认,更新官方文档 + +示例: + +1. 误操作删除表 + +``` +-- 创建测试数据 +openGauss=# create table t1(id int,name varchar(200)) with (STORAGE_TYPE=USTORE); +openGauss=# insert into t1 values(1,'t1_Tom'),(2,'t1_Jerry'); +openGauss=# select * from t1; + id | name +----+---------- + 1 | t1_Tom + 2 | t1_Jerry +openGauss=# create table t2(id int,name varchar(200)) with (STORAGE_TYPE=ASTORE); +openGauss=# insert into t2 values(1,'t2_Tom'),(2,'t2_Jerry'); +openGauss=# select * from t2; + id | name +----+---------- + 1 | t2_Tom + 2 | t2_Jerry +-- 模拟误删表 +openGauss=# drop table t1; +openGauss=# drop table t2; +``` + +2. 查询回收站对象 + +``` +openGauss=# SELECT rcyname,rcyoriginname,rcytablespace FROM GS_RECYCLEBIN; + rcyname | rcyoriginname | rcytablespace +-----------------------------+---------------+--------------- + BIN$3BFF4EB403B$4C71318==$0 | t2 | 0 -- 仅看见Astore存储的t2表,并没有看到Ustore存储的t1表,注意!! +(1 row) +``` + +3. 闪回操作 + +``` +openGauss=# timecapsule table t2 to before drop rename to t2_bak; +TimeCapsule Table +openGauss=# select * from t2_bak; + id | name +----+---------- + 1 | t2_Tom + 2 | t2_Jerry +``` + +和Oracle一样,也可以使用recyname恢复表,如“timecapsule table “BIN$3BFF4EB403B$4C71318==$0” to before drop rename to t2;”。 + +其他闪回操作详见官方文档。 + +## 4. 清空回收站 + +``` +openGauss=# purge recyclebin; +PURGE RECYCLEBIN +``` + +以上是本人对openGauss 2.1.0版本闪回特性的基本测试,希望能帮助到有需要的小伙伴。 + diff --git "a/content/zh/post/July/openGauss-B-tree\347\264\242\345\274\225\350\257\273\345\206\231\345\271\266\345\217\221\345\216\237\347\220\206.md" "b/content/zh/post/July/openGauss-B-tree\347\264\242\345\274\225\350\257\273\345\206\231\345\271\266\345\217\221\345\216\237\347\220\206.md" new file mode 100644 index 0000000000000000000000000000000000000000..966bd6319aa30eb1eafd74389627eb4976fc34b7 --- /dev/null +++ "b/content/zh/post/July/openGauss-B-tree\347\264\242\345\274\225\350\257\273\345\206\231\345\271\266\345\217\221\345\216\237\347\220\206.md" @@ -0,0 +1,272 @@ ++++ + +title = "openGauss B-tree索引读写并发原理" + +date = "2021-07-21" + +tags = [ "openGauss B-tree索引读写并发原理"] + +archives = "2021-07" + +author = "sung" + +summary = "openGauss B-tree索引读写并发原理" + +img = "/zh/post/July/title/img1.png" + +times = "12:30" + ++++ + +# openGauss B-tree索引读写并发原理 + +本文主要依据LEHMAN & YAO的Efficient Locking for Concurrent Operations on B-tree,以及openGauss的代码,探索openGauss的B-tree索引的读写并发原理。 + +openGauss索引详解讲B-tree索引结构时讲到,B-tree索引的每个节点都有指针指向其右侧的节点(link pointer),link pointer提供了额外的方法访问右侧的节点。当一个节点分裂成左右两个节点,分裂后左侧的节点与原来的节点在磁盘上占据相同的物理页,左侧节点通过link pointer和右侧节点相连。因此,左右两个节点逻辑上可以视为一个节点,直到父节点更新子节点的信息。 + +**图 1** B-tree节点分裂 +![](figures/B-tree节点分裂.png "B-tree节点分裂") + +在节点发生分裂的同时,link pointer同时建立。在B-tree索引的查找流程执行时,如果发现当前查找的Key超过了当前查找的page的HK,表明在搜索过程中索引结构发生了变化,此时应该通过link pointer访问右侧节点。这样的实现,有时效率并不高,因为可能需要额外的磁盘操作,但正确性上没有问题,而且实际使用过程中通常分裂不会那么频繁。 + +## 搜索算法 + +在B-tree索引中查找v,如果v在B-tree中存在,搜索流程在找到包含v的节点A,以及包含指向v的指针的t时结束。否则,我们在一个数据范围包含v在内的页面A中进行查找,最终确认v不存在。 + +整个查找流程的伪代码如下: + +``` +x <- scannode(v, A) 表示在内存页A中查找v,返回一个数据指针给x。 + +procedure search(v) +current <- root; // 获取根节点,current 表示当前查找的page +A <- get(current); // 把对应的page加载到内存 +while current is not a leaf do // 向下查找到叶子节点 +begin + current <- scannode(v, A); // 找到对应的子节点 + A <- current; +end; + +while t <- scannode(v, A) = link ptr of A do // 如果有必要,继续向右查找 +begin + current <- t; + A <- get(current) +end; + +if v is in A +then + done "success" +else + done "failure" +``` + +整个查找流程执行过程像单线程执行一样,和传统的一些搜索算法不同的是,整个过程中几乎没有加锁。 + +## 插入算法 + +和搜索算法类似,首先需要找到插入的位置。从树的根节点开始向下查找,找到数据要插入的叶子节点。在搜索位置的过程中,记录每一层搜索到的位置最靠右的节点,形成一条B-tree上的搜索路径。 + +在找到的叶子节点插入v,可能导致节点的分裂(这种情况下,插入是unsafe的),在这种情况下,对应的叶子节点a 分裂为 a’ 和 b’,其中 a’ 与 a 是相同的物理页。由于叶子节点发生改变,需要相应地更新其父节点,通过回溯之前查找路径的方式来进行更新。进而,其父节点也可能发生分裂,因此需要逐级向上回溯更新,直到某个节点的插入不会发生分裂(插入是safe的)。在需要修改某个节点时,先要对其加锁。 + +整个流程中,通过优化锁顺序避免死锁发生。另外需要注意一点,在回溯过程中,由于节点的分裂,我们回溯到的节点,可能不是最终执行插入的节点。在这种情况下,需要通过link pointer找到正确的插入位置。 + +以下是插入算法的伪代码,其中一些流程被当作原语描述,因为它们实现比较简单,而且这些操作不是本文描述的重点。例如: + +A <- node.insert\(A, w, v\)表示将指针 w,以及值 v 插入节点A。 + +u <- allocate\(1 new page for B\) 表示在磁盘上申请一个新页。B表示的页面将会通过指针 u 写入这个页。 + +“A,B <- rearrange old A, adding …” 表示将A分裂为两个新的节点 A 和 B。 + +以下算法描述在B-tree中插入值 v 的过程。 + +``` +procedure insert(v) +initialize stack; // 初始化栈,记录查找路径,用于回溯 +current <- root; // 从根节点开始查找 +A <- get(current); // 加载页面到内存 +while current is not a leaf do +begin + t <- current; + current <- scannode(v, A); + if new current was not link pointer in A then + push(t); // 记录查找路径 + A <- get(current); +end; + +lock(current); // 找到一个要插入的叶子节点,对节点加锁 +A <- get(current); +move.right; // 如果在加锁之前,叶子节点发生了分裂,则需要通过link pointer找到正确的插入位置;否则,什么也不做 +if v is in A then stop "v is already exists in tree"; // 如果已经插入 +w <- pointer to pages allocated for record associated with v; +Doinsertion: +if A is safe then // 当前页面是写入safe的 +begin + A <- node.insert(A, w, v); // 插入值 v ,指针 w 到 A + put(A, current); // 写page + unlock(current); // 解锁 +end +else // 页面是写入unsafe,需要分裂 +begin + u <- allocate(1 new page for B) // 申请新页面 + A,B <- rearrange old A, adding v and w, to make 2 nodes, + where (link ptr of A, link ptrof B) <- (u, link ptr of old A) + // 分裂 原来的 A 为 A, B 插入数据 v 和 指针 w,新 A 的 link ptr指向 B,B的link ptr 指向原来 A 的link ptr + y <- max value stored in new A + put(B, u) // 写分裂后右侧页 + put(A, current) // 写分裂后左侧页 + oldnode <- current; + v <- y; + w <- u; + current <- pop(stack); // 开始回溯 + lock(current); // 对父节点加锁 + A <- get(current); + move.right; // 如果父节点发生分裂,通过link ptr向右查找 + unlock(oldnode); // 子节点解锁 + goto Doinsertion // 如果有必要,继续向上更新父节点 +end + +move.right的伪代码 +procedure move.right +while t <- scannode(v, A) is a link pointer of A do +begin + lock(t); // 对右侧节点加锁 + unlock(current); // 解锁左侧节点 + current <- t; +A <- get(current); +end +``` + +整个流程中,最多同时有3个节点同时被锁住,分别是\[1\]发生分裂的子节点(分裂后的左侧节点),\[2\]父节点(发生分裂后的左侧节点),\[3\]父节点分裂后的右侧节点。 + +## 正确性证明 + +LEHMAN & YAO 在Efficient Locking for Concurrent Operations on B-tree中给出了算法正确性的证明。 + +正确性证明主要证明了两个大的问题: + +- 整个流程不会出现死锁 +- 最终流程结束的时候,结果是正确的。细化一点就是要证明 + - 最终树的结构是正确的 + - 除了正在修改树结构的这个进程外,其他进程看到的是一棵一致的B-tree + + +## 不会出现死锁的证明 + +首先定义B-tree中节点间的一个顺序关系 \(<\) : + +- 任意时间,如果两个节点 a 和 b,如果 a 到 根节点的距离 大于 b 到根节点的距离,则 a < b。 +- a 和 b 到根节点的距离相同,如果跟随 a 的 link ptr能够到达 b,则 a < b, 即 a 在 b 的左侧。 + +根据插入算法可知,在时间点t0,如果 a < b,则在任意时间点 t \> t0,a < b。因为插入流程只是简单地将一个节点 x 分裂成 x’ 和 x’’,而且 + +- 任意 y < x ,则 y < x' +- 任意 y,如果 x < y, 则 x'' < y + +根据插入流程的加锁顺序可知,当对一个节点加锁时,不会再对 其下 或 其左 的节点加锁,因此加锁遵循了一个好的加锁顺序。由于插入是唯一会对节点加锁的流程,因此我们可以得到结论,不会出现死锁。 + +这里可以回顾一下死锁的几个要素: + +- 互斥。资源同时只能被一个进程持有。 +- 请求与保持。请求其他资源时,不释放当前持有的资源。 +- 不剥夺。不能再对方未释放资源时,抢占其占有的资源。 +- 循环等待。多个进程形成一种相互等待对方释放资源的关系。 +- 由于插入过程,上下两层都是先对子加锁,再对父加锁;同一层,是先加左侧,再加右侧,因此在加锁顺序上避免了循环等待的情况,可以避免死锁。 + +## B-tree结构正确性证明 + +为确保树结构正确,需要关注会修改树结构的操作。只有写操作会修改树结构,在插入算法的伪代码中有3个地方会执行 put 操作。 + +- put\(A, current\) 用于向一个写safe的节点写入数据。 +- put\(B, u\) 用于向一个写unsafe的节点写入数据。在这个操作中,向分裂后的右侧节点写入数据。 +- put\(A, current\) 用于向一个写unsafe的节点写入数据。在这个操作中,向分裂后的左侧节点写入数据,同时修改节点的link ptr指向分裂后的右侧节点。 + + **图 2** 三种类型的插入 + ![](figures/三种类型的插入.png "三种类型的插入") + + +算法中,在 put\(B, u\) 之后紧接着执行 put\(A, current\),这种执行顺序将两个put减少为一次操作。下面证明 “put\(B, u\); put\(A, current\)” 对B-tree结构来说是一次修改。 + +## 证明: + +假设两次 put 操作 分别修改节点 b 和 a。执行 put\(B, u\)时,其他节点都没有指向节点 b 的指针,因此 put 操作不会对B-tree结构有影响。 + +执行 put\(A, current\)时,会修改节点 a 的结构,同时还会修改节点 a 的link ptr指向节点 b。此时节点 b 已经存在,且 b 的link ptr指向 a 节点分裂前link ptr指向的节点。这样同时实现了 修改节点 a,将节点 b 加入B-tree两个效果。 + +由于 put\(B, u\)不修改B-tree结构,put\(A, current\)只修改 a 节点(内容 和 link ptr),对B-tree来说是一次修改。 + +**图 3** 两次put操作对B-tree结构的修改 +![](figures/两次put操作对B-tree结构的修改.png "两次put操作对B-tree结构的修改") + +下面给出所有操作正确修改B-tree结构的证明: + +- Case 1: put\(A, current\) 修改一个写safe的节点,操作加锁修改B-tree节点,且不改变B-tree结构,正确性可以保证。 +- Case 2: put\(B, u\) 操作不修改B-tree结构,不会出错。 +- Case 3: put\(A, current\)修改写unsafe的节点。操作既修改当前节点 a 的内容,同时把节点 b 加入到B-tree结构中。和Case 1类似,节点 a 在执行 put\(A, current\)时已经加锁,根据之前的证明可以知道,这次操作可以保证树结构的正确性(加锁修改B-tree中的一个节点)。 + +## 交互正确性证明 + +上面证明了写操作能保证B-tree结构的正确性。剩下还需要证明,在读写并发执行时如果有插入操作导致B-tree结构发生变化,其他读写进程仍可以正确执行。因为读操作不会改变B-tree结构,因此只需要证明写操作不会影响其他进程操作结果的正确性。细分一下,需要证明\[1\]写操作不会影响其他读操作的正确性,\[2\]写操作不会影响其他写操作的正确性。 + +以下过程中我们提到的操作都是原子的。 + +假设 t0 时刻,一个写进程对节点 a 执行一次 put 操作,在 t’ 时刻其他进程从磁盘读取节点 a。 + +如果 t’ \> t0,则写操作不会影响读操作的正确性。 + +**证明:** + +假设节点 a 是查找路径上的一个节点,则在到达 a 节点之前的查找路径不会受节点 a 插入的影响;同时,之前已经证明插入操作可以保证B-tree结构的正确性,所以任意时刻 t’ \> t0,从节点 a 向下的查找路径也不会受插入的影响。综上,任意时刻 t’ \> t0,查找不会受插入的影响。 + +将写操作对节点的修改分为以下3种: + +- Type 1: 节点是写safe的,简单地将插入的数据及对应的指针插入节点。 +- Type 2: 节点是写unsafe的,插入数据导致节点分裂,插入的数据在分裂后左侧节点。 +- Type 3: 节点是写unsafe的,插入数据导致节点分裂,插入的数据在分裂后右侧节点。 + +上面证明了任意时刻 t’ \> t0,查找不会受插入的影响。下面考虑 t’ < t0的情况。 + +Type 1: 节点 n 是写safe的。如果节点 n 是叶子节点,则插入操作不会修改任何已存在的指针,则查询的结果等价于插入进程执行之前串行执行查询。如果节点 n 是非叶子节点,则节点 n 的插入是由其子节点分裂导致。假设其子节点 I 分裂为 I’ 和 m’,其中唯一可能出现的交叉执行是查询进程获得了 n 指向 I 的指针,然后插入导致 I 分裂为 I’ 和 m’,原来指向 I 的指针,现在指向 I’。这种情况下,查询进程通过 I’ 的 link ptr 可以查询到 m’,因此查询结果是正确的。 + +Type 2、3: 插入导致节点 n 分裂为 n1’ 和 n2’。如果 n 是叶子节点,则查找 n 和查找 n1’ 及 n2’ 的结果是一致的,除了新插入的数据可能查不到。如果 n 是非叶子节点, 则 n 的分裂是由子节点的分裂导致。节点 n 分裂为 n1’ 和 n2’,分裂后的两个节点拥有和 n 一样的一组指针以及新插入的节点信息。从节点 n 向下查找能到达的节点,与通过节点 n1’ 及其 指向 n2’ 的link ptr是一致的。例外的情况是,查询读取节点 n 时,本来如果新指针已经插入,查询应该使用新指针;但由于当前新指针还未插入,查询实际使用的是新指针左侧的指针,如图-4所示。 + +如果 n 已经分裂,则查询数据应该跟随图中标红的路径,直接找到对应子节点。由于查询执行时,新指针还未插入,因此实际查找路径为图中标蓝的路径。在这种情况下,还需要通过link ptr才能找到正确的节点,最终结果都是正确的。 + +**图 4** 节点分裂前后查找路径的差异\(a\) +![](figures/节点分裂前后查找路径的差异(a).png "节点分裂前后查找路径的差异(a)") + +**图 5** 节点分裂前后查找路径的差异(b\) +![](figures/节点分裂前后查找路径的差异(b).png "节点分裂前后查找路径的差异(b)") + +上面证明了查询操作和写操作并发时,最终不会影响查询结果的正确性。还需要证明,两个写操作并发时,相互之间不会影响正确性。 + +假设插入进程 I 和 插入进程 I’ 并发执行,则 I’ 可能处于\[1\]查找插入节点的阶段,可能是\[2\]已经完成插入在向上回溯更新,也可能\[3\]正在节点上执行插入操作。 + +- \[1\]如果是正在查找插入节点,上面已经证明写操作不会影响查询结果的正确性,所以相互之间不影响正确性。 +- \[2\]对于节点 n 而言,回溯到节点 n 是由于其子节点发生分裂,需要在 n 中插入新生成的子节点信息。从 n 加入回溯栈,到回溯到 n 这段时间,节点 n 由于其他并发操作,例如由于 I 的插入发生分裂。由于节点 n 分裂后的节点都在原节点的右侧,通过 link ptr 可以到达,因此插入算法最终能找到正确的位置。 +- \[3\] 如果 I’ 打算在节点 n 上插入,需要先对 n 加锁。但此时 I 已经先对 n 加锁,最终 I’ 等到 I 释放锁,完成加锁动作,再加载节点 n。如果节点 n 没有发生分裂,则 I’ 直接在 n 中执行插入; 如果节点 n 发生了分裂,则跟随 link ptr能找到正确插入位置。 + +最终的完整算法可能存在LiveLock的问题,即一个进程无休止地运行,因为它必须跟随其他进程所创建的link ptr。在一个多核系统中,如果进程跑在一个相对运行非常慢的核上就可能出现这个问题。 + +但这个问题在实践中出现的可能性极小,因为: + +- 大多数多核系统中,每个核的性能差别不大。 +- 实际使用中B-tree节点的创建和删除所占的时间并不多,即使有一个核非常慢,需要跟随的 link ptr也并不多。 +- B-tree每一层能创建的节点数有限制,所以需要跟随的 link ptr并不是无上限的。 + +如何完全避免LiveLock超出本文的讨论范围,因此不过多展开了。 + +## 删除 + +通常如果B-tree节点中数据少于 k 个entry可能触发节点合并,一种简单的处理删除的方式是允许B-tree叶子节点少于 k 个entry。非叶子节点只是用于判断数据范围,因此没有必要执行删除动作。 + +删除操作的流程和插入非常类似,首先查找到对应的叶子节点,然后对叶子节点加锁,数据加载到内存,执行删除数据的动作,最后回写。 + +正确性的证明和插入类似,这里不赘述了。 + +## 锁效率 + +显然,在并发执行过程中,为了保证数据安全,加锁是必要的。之前讲插入流程时说到,一个进程同时最多会锁定三个节点。实际这种情况发生的概率并不大,因为每个节点都容量都很大,除非有非常的并发进程在执行。因此实际使用过程中,锁冲突出现的概率并不大。 + +以上是关于OpenGauss的B-tree索引并发的理论部分,下一篇我们结合OpenGauss的代码看一下实现。 + diff --git "a/content/zh/post/July/openGauss-B-tree\347\264\242\345\274\225\350\257\273\345\206\231\345\271\266\345\217\221\345\256\236\347\216\260.md" "b/content/zh/post/July/openGauss-B-tree\347\264\242\345\274\225\350\257\273\345\206\231\345\271\266\345\217\221\345\256\236\347\216\260.md" new file mode 100644 index 0000000000000000000000000000000000000000..bb5ed37676a23cdd2dbe68e01ccdd48a04d32a32 --- /dev/null +++ "b/content/zh/post/July/openGauss-B-tree\347\264\242\345\274\225\350\257\273\345\206\231\345\271\266\345\217\221\345\256\236\347\216\260.md" @@ -0,0 +1,346 @@ ++++ + +title = "openGauss B-tree索引读写并发实现" + +date = "2021-09-01" + +tags = [ "openGauss B-tree索引读写并发实现"] + +archives = "2021-09" + +author = "吴松" + +summary = "openGauss B-tree索引读写并发实现" + +img = "/zh/post/July/title/img2.png" + +times = "12:30" + ++++ + +# openGauss B-tree索引读写并发实现 + +openGauss B-tree 索引在实现时,没有完全遵循 L & Y 的理论,本文主要关注openGauss 中 B-tree 索引的实现。 + +## 实现与理论的差异 + +- 读锁 + + 介绍B-tree索引并发读写原理时讲到,L & Y 的理论中读操作完全不加锁,而是假设 B-tree 节点在内存中的拷贝是非共享的。但实际在 PG 和 openGauss 的实现中,内存中的 buffer 是共享的。所以在实现中,实际读操作需要对节点加读锁,来保证读期间没有修改操作。加锁动作降低了并发性,但保证了数据正确性。 + +- 写锁 + + L & Y 的理论总是将加锁和 moveright 耦合,最多同时会对三个节点加锁(分裂的子节点、父节点、父节点的右兄弟节点)。实现中,同一层的节点不需要都加锁。L & Y 的理论中通过记录从根节点开始查找路径上的节点及 key 的信息来记录父子节点关系,理论中假设所有的 key 都是不同的。实现中,父节点通过记录子节点的 Block Number 来实现父节点指向子节点的指针。理论中如果发生分裂需要更新父节点时,父节点所在层的加锁的顺序是:先对父节点加锁,如果父节点发生了分裂,则对父节点的右兄弟节点加锁,然后释放父节点的锁。而实现中,在对父节点的右兄弟节点加锁前,可以释放父节点的锁,因为 link ptr 实现中是记录的右兄弟节点的 block number, 即使右兄弟节点又发生分裂,依然可以通过 block number 找到分裂后的左侧节点,然后继续向右查找。 + + +- link ptr + + L & Y 的理论中每一层节点都有指向右侧节点的 link ptr,当执行 scan 的操作时,通过 link ptr 只能支持自左向右的 scan。为支持逆向 scan,OpenGauss中每个节点都有指向左右两侧的两个指针,将指向同层右侧节点的称为 right link ptr,指向左侧节点的称为 left link ptr。这个修改,导致 L & Y 的理论中节点分裂需要额外的动作。在锁定分裂节点的同时,还需要锁定该节点分裂前的右兄弟,以修改其 left link ptr,如图-1所示。实际逆向 scan 比正向 scan 要复杂一些,因为如果逆向scan的过程中节点发生分裂,可能还需要通过 right link ptr 向右查找(实际比更复杂,我们会在后面讨论)。 + + **图 1** B-tree索引结构差异 + ![](figures/B-tree索引结构差异.png "B-tree索引结构差异") + + +在 scan 过程中,只有在需要读一个节点时才会对其加锁,为减少加锁的时长,读操作会一次把所有满足条件的项全部读出来,在处理这些项时就可以释放锁了。有时,为了防止并发的 delete 操作,需要 pin 住某个叶子节点\(后面会讲到\)。scan过程中,需要记住当前在扫描节点的 right link ptr,通过 right link ptr 向右scan不会遗漏数据;但如果是向左scan,需要获取最新的 left link ptr。 + +大多数情况下,在对下一个节点进行 pin 和加锁前,会释放前一个节点的pin 和锁;但有些情况下会先对下一个节点进行 pin 和加锁,再释放前一个节点的 pin 和锁。之前讲并发原理时证明过,流程中可以避免死锁的出现,但现在由于实现没有完全按照 L & Y 的理论,加了 left link ptr,似乎有死锁的可能。 + +L & Y 的理论中假设 key 的大小是固定的,实际需要处理变长的 key。因此对每一个 page 而言,key 的个数没有一个固定的最大值,实现中假设一个 page 内至少要有3个 item,因此 tuple 的大小不能超过一个 page 最大可用空间的1/3。节点发生分裂时,按照占用空间来平均分配分裂后的左右节点,而不是按照 item 的个数均分。同时分裂需要将已经在执行中的事务占用的空间考虑在内,否则事务提交时可能出现 page 内没有足够空间的问题。 + +L & Y 的理论中假设key没有重复的,实际是对于非 unique index 可能存在重复 key。 + +## 插入 + +在执行数据插入的流程中,需要更新 heap tuple 和 index tuple,更新index tuple 执行 ExecInsertIndexTuples 方法,处理的主要流程如下: + +``` +{ + 遍历表上的每一个索引 { + 如果索引的 indisready = false,表示索引不可写,则跳过此索引; + 如果是分区表的 local 索引,且 pg_partiton 中 indisusable = false,跳过此索引; + 调用索引插入函数 index_insert,完成 index tuple 插入 + 检查索引插入是否违反约束 unique / exclusion constraint + 返回结果 + } +} +``` + +在介绍索引相关的代码实现之前,先介绍一下OpenGauss中索引实现的目录结构。 + +索引的实现主要在 src/gausskernel/storage/access下,其中 + +- nbtree: 行存表btree索引的实现 +- cbtree: 列存表btree索引的实现 +- gist: gist索引实现 +- gin: 倒排索引实现 +- psort: 列存表psort索引实现 +- index: 索引对外接口 + +行存表支持的索引类型:btree\(缺省值\)、gin和gist。 + +列存表支持的索引类型:psort\(缺省值\)、btree和gin。 + +本文中主要介绍行存表Btree索引实现,对应的目录为nbtree + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

文件

+

功能

+

nbtree.cpp

+

Btree索引对外接口

+

nbtinsert.cpp

+

Btree索引插入、分裂

+

nbtsearch.cpp

+

Btree索引查找

+

nbtpage.cpp

+

Btree索引page管理

+

nbtsearch.cpp

+

Btree索引查找

+

nbtcompare.cpp

+

Btree索引数据比较

+

nbtsort.cpp

+

Btree索引index tuple排序及btree构建

+

nbtxlog.cpp

+

Btree索引xlog相关操作及回放

+

nbtutils.cpp

+

Btree索引其他相关函数

+
+ +- index\_insert + + ``` + // 实现在 indexam.cpp 所有索引对外接口 + { + 根据索引类型,找到对应的索引插入处理函数(存储在pg_am中),例如btree索引对应的插入函数为btinsert + 调用对应的插入处理函数 + 返回结果 + } + ``` + +- btinsert + + ``` + // 实现在 nbtree.cpp,行存表btree索引插入实现 + { + 调用 index_form_tuple 生成 index tuple; + 调用 _bt_doinsert 完成索引插入 + 返回结果 + } + ``` + +- \_bt\_doinsert + + // 实现在nbtinsert.cpp,行存表btree索引 index tuple 插入 + + ``` + { + 生成scankey用于在B-tree索引中查找插入位置。 + 调用 _bt_search 查找插入的叶子节点,返回一个BTStack类型的对象,该对象的实现是一个链表,链表头是叶子节点,后面的每个节点是前一个节点的父节点。由于插入数据可能导致节点分裂,子节点分裂需要更新父节点,可以通过此链表向上查找父节点。 + 调用 _bt_moveright,找到最终指向插入的叶子节点。因为流程不是全程加锁(先加读锁,然后释放读锁,再加写锁),因此在找到插入的叶子节点后,该节点可能由于新的插入导致分裂,则新的插入位置可能在原节点的右侧,需要向右查找(多次分裂可能需要多次向右查找);如果叶子节点没有发生变化,或者插入位置不需要向右查找,则_bt_moveright实际不做任何事情。这一点涉及Btree索引的并发控制,可以参考前一篇。 + 检查unique约束,包括进行中的事务,如果有执行中的事务可能导致违反约束,需要等待执行中的事务结束。 + page内查找插入位置_bt_findinsertloc + 执行插入操作 _bt_insertonpg + } + ``` + + +在查找插入节点的实现 \_bt\_search 中,在每次使用节点时都需要调用一次 \_bt\_moveright ,因为查找过程中加锁的顺序是先对父节点加 ReadLock,查找到子节点(父节点指向子节点的指针的实现是记录子节点的页号)后释放父节点的 ReadLock,再对子节点加 ReadLock。由于读写并发,查找到子节点到对子节点加 ReadLock 这段时间内子节点有可能已经发生了分裂,因此在使用子节点查找时需要调用一次\_bt\_moveright。这样实现可以减少加锁的粒度,有利于读写并发。 + +\_bt\_search查找到最终要执行插入的叶子节点后,在使用该节点时也要调用一次\_bt\_moveright,原因是查找到叶子节点后,对叶子节点加了 ReadLock。但是由于需要执行插入动作,会先释放叶子节点的 ReadLock 再对叶子节点加 WriteLock,放锁到再加锁的间隙叶子节点可能发生分裂。代码实现如下: + +``` + stack = _bt_search(rel, indnkeyatts, itup_scankey, false, &buf, BT_WRITE); + + offset = InvalidOffsetNumber; + + /* trade in our read lock for a write lock */ + LockBuffer(buf, BUFFER_LOCK_UNLOCK); + LockBuffer(buf, BT_WRITE); + buf = _bt_moveright(rel, buf, indnkeyatts, itup_scankey, false, true, stack, BT_WRITE); +``` + +- \_bt\_moveright + + // 实现在nbtserach.cpp 在获取到将要执行插入的叶子节点后,由于并发操作,实际位置可能发生了变化,需要由当前节点向右查找才能找到实际位置 + + ``` + { + 如果是写入操作insert/update,则在流程中检查是否有未完成的分裂,插入流程不允许插入一个未完成分裂中的page(通过P_INCOMPLETE_SPLIT判断。未完成的分裂可能是程序crash或者其他failure导致,special区域有标记页面的状态),因此先在流程中完成分裂动作。_bt_finish_split 这里流程的实现需要先了解 _bt_split 流程。 + 如果当前节点的flag是(BTP_DELETED | BTP_HALF_DEAD),表明叶子节点被删除,需要向右移动;如果当前的scan key > 节点的HK,则节点在上次获取之后发生了分裂,需要向右移动。循环检查,直到当前节点是最右节点,或者scan key <= HighKey。 + } + ``` + + 对要执行插入操作的叶子节点加锁后,需要在叶子节点内找到具体的插入位置,调用方法 \_bt\_findinsertloc。 + +- \_bt\_findinsertloc + + // 实现在 nbtinsert.cpp 在page内为要插入的 index tuple 查找一个插入位置(page buffer + offset) + + ``` + { + 检查 tuple size 是否小于page内最大可用空间的1/3,一个page内最少要放3个tuple + 如果当前page可用空间不足以放下插入的tuple,可能需要执行节点分裂。在执行分裂之前检查一下是否可以将要插入的tuple放置到右侧的某个节点中(例如 scankey = HK 的场景,可以将tuple插入右侧节点,从而避免执行分裂动作) { + a. 如果是叶子节点且page内有垃圾可以清理,则调用 _bt_vacuum_one_page 做一次 page 的 vacuum,执行结束后检查 page 内的剩余空间是否足够插入tuple。 + b. 如果 page-vaccum 后仍然不够或是不能执行 vacuum,检查是否满足以下两个条件: + - 当前节点是最右侧节点 //不能继续向右查找 + - Scan key != HK 或者 有0.99的概率向右移动 // 如果多个page内有相同的key,对查找而言可能需要不断向右查找。此时另一种选择是找一个靠左的节点将其分裂,然后有足够的空间插入要插入的tuple。这里向右继续查找的概率是0.99,有0.01的概率将当前节点分裂后进行插入。 + 如果当前节点处于分裂未完成的状态,先完成分裂动作 + + 不满足 1、2两个条件,则表明 scan key = HK,则向右查找到一个有足够空间可以插入tuple的节点。 + 如果查找过程中向右移动了,则预期将tuple插入到节点的第一个位置(非最右节点是偏移是2,因为有HK,最右节点偏移是1)返回 对应的节点 以及 offset(入参传递); + 如果没有向右移动,也没有执行vacuum,且当前的offset != INVALID,则 返回节点和offset; + 其他情况需要在page内重新查找位置, 调用_bt_binsrch,返回 + } + ``` + + 上述流程由于涉及节点分裂,所以看上去有点复杂,此流程主要关注读写并发的问题,分裂后面会详述。此流程是对page加 WriteLock,在 page 内找到插入的位置(offset)。 + + ``` + 找到位置后,调用 _bt_insertonpg 执行插入操作 + // 实现在 nbtinsert.cpp 向page内插入一个tuple,执行时需要对buffer pin + write lock,执行完成后释放pin 和 write lock; 如果插入的节点是非叶子节点,cbuf表示插入的tuple的左子节点,插入操作会清除左子节点上的BTP_INCOMPLETE_SPLIT标记(该标记表示page处于分裂为完成的状态,通常是标记在分裂后的左侧节点上,表明分裂后父节点缺少分裂后右侧节点的信息)。 + { + 检查不是 incomplete split page + 检查page内free space 是否足够插入 tuple,如果不够 执行分裂 + 调用 _bt_findsplitloc 找到分裂点,执行分裂 _bt_split,在 _bt_split 完成 tuple 的插入,完成分裂后向上更新父节点 + 如果空间足够,调用 _bt_pgaddtup 执行插入 + 执行插入的 page buffer 标脏 + 记Xlog + } + ``` + +- **写流程中加锁顺序:** + + 对节点加 ReadLock,获取节点信息 + + 释放 ReadLock + + 加 WriteLock,执行插入操作,(可能继续执行分裂) + + 执行完成 Unlock + + +## 查找 + +- \_bt\_search + + ``` + // 实现在 nbtsearch.cpp 中,用于从B-tree的根节点开始向下查找到符合条件的叶子节点 + { + 获取根page _bt_getroot + 调用 _bt_moveright; 如果是write-mode(插入流程查找插入位置),_bt_moveright会完成未完成的split + 如果page是叶子page,则返回 + 否则调用 _bt_binsrch,在当前page进行查找,返回对应的item的offset。如果查找的是叶子节点,则返回的是第一个key >= scan key; 非叶子节点返回的是最后一个 key < scanKey + 释放父节点的读锁,获取子节点的读锁,直到找到叶子节点。 + 返回一个BTStack对象(数据结构为链表,每个节点中有 block number 和 offset信息,同时还有一个指针指向父节点),如果发生分裂,可以通过此链表回溯父节点。(L&Y paper) + 返回BTStack对象 + } + ``` + + +- 查找流程的加锁顺序: + + 先对父节点加 ReadLock,然后获取子节点信息, + + 父节点 Unlock + + 子节点加 ReadLock + + 代码如下: + + ``` + { + /* drop the read lock on the parent page, acquire one on the child */ + *bufP = _bt_relandgetbuf(rel, *bufP, blkno, BT_READ); + ... + } + Buffer _bt_relandgetbuf(Relation rel, Buffer obuf, BlockNumber blkno, int access) + { + Buffer buf; + + Assert(blkno != P_NEW); + if (BufferIsValid(obuf)) + LockBuffer(obuf, BUFFER_LOCK_UNLOCK); + buf = ReleaseAndReadBuffer(obuf, rel, blkno); + LockBuffer(buf, access); + _bt_checkpage(rel, buf); + return buf; + } + ``` + + +## 分裂 + +插入和查找流程都涉及节点的分裂问题,因此在这里有必要介绍一下分裂的实现。 + +分裂的主要实现也在 nbtinsert.cpp中,主要逻辑在 + +- \_bt\_split + + ``` + // 分裂btree中的一个节点,只实现一个page分裂为两个page,不包括更新父节点信息部分,更新父节点的实现在_bt_insert_parent + { + // 分裂过程中有三个 page, 原始未分裂的 page 记为 origpage,新申请的分裂后右侧的 page 记为 rightpage,临时 page 用于记录分裂后左侧节点的数据记为 leftpage,最终需要把 leftpage 拷贝回 origpage。 + _bt_getbuf 申请一个新的 index page(rightpage),并对 page 进行初始化。FSM 返回一个page后,需要对这个page进行检查确保这个page没有被其他人使用。返回之前会对 page buffer 加锁以及引用计数(lock and pin)。 + 初始化 leftpage,拷贝 origpage 的 LSN 到 leftpage; 拷贝 origpage 中的 special pointer 中的 flags 到 leftpage ,然后设置 leftpage 中 special pointer 的 flag。将BTP_ROOT、BTP_SPLIT_END、BTP_HAS_GARBAGE三个标记位设置为0。rightpage 此标记位设置为和 leftpage 一致。 + 设置是 leftpage 的 special pointer 的 flag 中 BTP_INCOMPLETE_SPLIT 设置为1,表明父节点中缺少指向其右兄弟节点的指针。设置 leftpage 和 rightpage 的 link ptr。 + 如果分裂的节点不是最右节点,则rightpage中的第一个位置是 HighKey,否则是 user data。 + 确定分裂后左右两侧的数据,将数据插入左右两侧的page + 对 rightpage 加 BT_WRITE 锁,用 leftpage 的内容覆盖 origpage (除了data外,special pointer 也一起更新),将 origpage 和 rightpage 的 buffer 标脏。更新原 origpage 的右兄弟节点的 left link ptr,指向rightpage,将页面标脏。 + 如果分裂的是非叶子节点,节点分裂完成后清除子节点上的BTP_INCOMPLETE_SPLIT标记。非叶子节点分裂是由叶子节点分裂导致,父节点完成分裂是指向表明子节点的downlink插入完成,可以将子节点的BTP_INCOMPLETE_SPLIT标记清除// 叶子节点上BTP_INCOMPLETE_SPLIT标记清除是在父节点中插入分裂后的右侧节点的指针时完成,实现是在_bt_insert_parent -> _bt_insertonpg + 记XLog + } + ``` + + **图 2** B-tree索引分裂过程中结构变化 + ![](figures/B-tree索引分裂过程中结构变化.png "B-tree索引分裂过程中结构变化") + + +分裂过程中的加锁顺序: + +- origpage 加 WriteLock, 修改数据 和 right link ptr +- rightpage 加 WriteLock, 修改数据 和 link ptr +- origpage 分裂前的右兄弟节点加WriteLock, 修改 left link ptr +- origpage 分裂前的右兄弟节点 Unlock +- rightpage Unlock +- origpage Unlock + +- root分裂 + + L & Y 的理论中没有讨论根节点分裂的问题。OpenGauss实现时按照普通节点分裂的方式,处理根节点的分裂。根节点分裂时,需要新生成一个根节点,原来的根节点成为新根节点的子节点,根节点分裂需要额外更新meta-page中根节点的信息,最终结果也是正确的。 + + 在节点分裂时,需要通过回溯的方式来更新上层节点。但如果这个上层节点是meta-page,那么此次回溯无法正确更新,因为meta-page中只是元数据不能分裂。这种情况下B-tree产生了新的root,需要重新走一遍查找,直到找到刚分裂的节点的上一层,然后向右查找到正确的插入位置。这通常需要重新获取meta-page以及新的root-page,可以通过保存节点在B-tree中所处的层号(通常叶子节点是0层,其父节点是1层,以此类推),最终找到要更新的节点所处的层。 + + 这次主要介绍了 B\_tree 索引实现与理论中的一些差异点,重点介绍了读写及分裂的流程,及其中加解锁的顺序。索引数据的删除、vacuum、WAL等内容下次再介绍。 + + diff --git "a/content/zh/post/July/openGauss-Gin-\347\264\242\345\274\225.md" "b/content/zh/post/July/openGauss-Gin-\347\264\242\345\274\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..34833decaa1a176547b0b68c730be1a932450dbe --- /dev/null +++ "b/content/zh/post/July/openGauss-Gin-\347\264\242\345\274\225.md" @@ -0,0 +1,352 @@ ++++ + +title = "openGauss Gin 索引" + +date = "2021-09-21" + +tags = [ "openGauss Gin 索引"] + +archives = "2021-09" + +author = "吴松" + +summary = "openGauss Gin 索引" + +img = "/zh/post/July/title/img5.png" + +times = "12:30" + ++++ + +# openGauss Gin 索引 + + + +## 概述 + +GIN(Generalized Inverted Index)通用倒排索引,是首选的文本搜索索引类型。倒排索引对应的列上的数据类型通常是一个多值类型,索引中包含每个单词的索引条目,以及所匹配的位置的压缩列表。如果搜索条件是多个单词,可以先使用第一个单词进行匹配,再在找到的结果中使用其他单词删除不匹配的项。Gin 索引的 key 是多值类型中出现的单词,叶子节点中存储了每个单词出现的 TID 的列表。如果这个 TID 列表比较小,它可以和元素放在同一个页面中(称为 posting list)。如果列表比较大,就需要用到更高效的数据结构 B-tree,这样的 B-tree 位于单独的数据页上(称为 posting tree)。 + +## 索引结构 + +Gin 索引大的组织结构是一棵B-tree 如图-1 所示 + +其中也有 meta-page、root-page 等 page,如果一个 key 对应的 tids 比较少可以和 key 放在同一个 page 中作为叶子节点; 如果对应的 tids 比较多\(占用的空间的大小\),需要将这些 tids 放到单独的数据页上,并且以 B-tree 的形式组织方便快速查找,叶子节点中记录对应的 B-tree 的 root-page 的信息。 + +**图 1** Gin 索引结构示意图 +![](figures/Gin-索引结构示意图.png "Gin-索引结构示意图") + +## 语法 + +``` +CREATE INDEX name ON table USING GIN (column); +``` + +openGauss 中创建 gin 索引时,索引列的类型必须是 tsvector 类型。 + +``` +Example: + +postgres=# create table ts(doc text, doc_tsv tsvector); + +postgres=# insert into ts(doc) values + ('Can a sheet slitter slit sheets?'), + ('How many sheets could a sheet slitter slit?'), + ('I slit a sheet, a sheet I slit.'), + ('Upon a slitted sheet I sit.'), + ('Whoever slit the sheets is a good sheet slitter.'), + ('I am a sheet slitter.'), + ('I slit sheets.'), + ('I am the sleekest sheet slitter that ever slit sheets.'), + ('She slits the sheet she sits on.'); + +postgres=# update ts set doc_tsv = to_tsvector(doc); + +postgres=# create index on ts using gin(doc_tsv); +``` + +![](figures/5.png) + +查询一个既包含 many 又包含 slitter 的 doc 如下: + +![](figures/4.png) + +## 实现 + +Gin 索引的实现主要在 src/gausskernel/storage/access/gin 下,主要文件及功能如下: + +**表 1** + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

文件

+

功能

+

ginbtree.cpp

+

倒排索引page处理相关函数

+

ginarrayproc.cpp

+

支持倒排索引处理各种数组类型的函数

+

gindatapage.cpp

+

倒排索引处理 posting tree page 相关实现

+

gininsert.cpp

+

倒排索引插入相关实现

+

ginpostinglist.cpp

+

倒排索引处理 posting list 相关实现

+

ginscan.cpp

+

倒排索引扫描相关实现

+

ginget.cpp

+

倒排索引scan过程中获取tuple相关实现

+

ginxlog.cpp

+

倒排索引xlog回放相关实现

+

ginvacuum.cpp

+

倒排索引delete和vacuum相关实现

+
+ +查看 pg\_am 中 Gin 索引相关处理函数: + + + + + + + + + + + + + + + + + + + + + + + + +

amname

+

gin

+

aminsert

+

gininsert

+

ambeginscan

+

ginbeginscan

+

amendscan

+

ginendscan

+

amgetbitmap

+

gingetbitmap

+

ambuild

+

ginbuild

+

+

+
+ +## 构建 Gin 索引 + +``` +ginbuild +{ + ... + // 初始化工作,如 创建 gin 索引的 meta 和 root,即 XLOG 等 + buildInitialize(index, &buildstate); + // scan heap tuples 调用 ginBuildCallback 处理每个要加入索引的 tuple + // ginBuildCallback 会从 heap tuple 中提取 entries,如果有多个值 + // 会对这些值进行去重和排序。得到去重及排完序的 entries 后,调用 ginInsertBAEntries + // 将这些 entries 及 对应的 tids 插入一棵RB-tree + reltuples = tableam_index_build_scan(heap, index, indexInfo, false, ginBuildCallback, (void*)&buildstate); + ... + // 从RB-tree中把之前插入的 entries 和 tids scan 出来,插入到 gin index 中 + while ((list = ginGetBAEntry(&buildstate.accum, &attnum, &key, &category, &nlist)) != NULL) { + /* there could be many entries, so be willing to abort here */ + CHECK_FOR_INTERRUPTS(); + // 如果 key 不存在,则新增一个 key entry,如果已经存在则更新对应的 tids + // 首先在gin索引中查找到对应 key 的叶子节点,如果 key 已经存在,更新对应的 tids + // 不存在则插入一个新的叶子节点 + ginEntryInsert(&buildstate.ginstate, attnum, key, category, list, nlist, &buildstate.buildStats); + } + + ... + // 更新 meta-page 中的信息, 记 XLOG + ginUpdateStats(index, &buildstate.buildStats); + ... + 返回结果 +} +``` + +在向gin索引中插入数据时,首先和B-tree索引一样,首先需要查找对应的 key 是否存在; + +如果 key 已经存在,则查看现在叶子节点中 key 对应的 tids 是 posting tree 还是 posting list,更新 tids; + +posting list 如果由于更新导致 tids 比较多,可能变为 posting tree + +如果 key 不存在,则在叶子节点中插入这个新的 key 以及对应的 tids。 + +``` +void ginEntryInsert(GinState *ginstate, OffsetNumber attnum, Datum key, GinNullCategory category, + ItemPointerData *items, uint32 nitem, GinStatsData *buildStats) +{ + GinBtreeData btree; + GinBtreeEntryInsertData insertdata; + GinBtreeStack *stack = NULL; + IndexTuple itup; + Page page; + + insertdata.isDelete = FALSE; + + /* During index build, count the to-be-inserted entry */ + if (buildStats != NULL) + buildStats->nEntries++; + + ginPrepareEntryScan(&btree, attnum, key, category, ginstate); + + // 在 B-tree 中找到叶子节点 + stack = ginFindLeafPage(&btree, false); + page = BufferGetPage(stack->buffer); + + // 如果 key 已经存在 + if (btree.findItem(&btree, stack)) { + /* found pre-existing entry */ + itup = (IndexTuple)PageGetItem(page, PageGetItemId(page, stack->off)); + // 如果是 posting tree 结构 + if (GinIsPostingTree(itup)) { + /* add entries to existing posting tree */ + BlockNumber rootPostingTree = GinGetPostingTree(itup); + + /* release all stack */ + LockBuffer(stack->buffer, GIN_UNLOCK); + freeGinBtreeStack(stack); + + /* insert into posting tree */ + ginInsertItemPointers(ginstate->index, rootPostingTree, items, nitem, buildStats); + return; + } + // 如果是 posting list + /* modify an existing leaf entry */ + itup = addItemPointersToLeafTuple(ginstate, itup, items, nitem, buildStats); + + insertdata.isDelete = TRUE; + } else { // 对应的 key 不存在, 需要新建一个叶子节点里的对象 + /* no match, so construct a new leaf entry */ + itup = buildFreshLeafTuple(ginstate, attnum, key, category, items, nitem, buildStats); + } + + /* Insert the new or modified leaf tuple */ + insertdata.entry = itup; + ginInsertValue(&btree, stack, &insertdata, buildStats); + pfree(itup); + itup = NULL; +} +``` + +gin 的 B-tree 也会涉及分裂等问题,和 B-tree 的分裂类似,因此在使用过程中也会有与 B-tree 索引使用过程中 moveright 类似的动作,本文不展开介绍分裂相关内容了。 + +相关数据结构: + +``` +// 用于表示一个 key 及 与其关联的 tids 的数据结构 +typedef struct GinEntryAccumulator { + RBNode rbnode; + Datum key; + GinNullCategory category; + OffsetNumber attnum; + bool shouldSort; + ItemPointerData *list; + uint32 maxcount; /* allocated size of list[] */ + uint32 count; /* current number of list[] entries */ +} GinEntryAccumulator; + +// Gin 索引整体结构为 B-tree 结构 +// B-tree 中的一个节点 +typedef struct GinBtreeStack { + BlockNumber blkno; + Buffer buffer; + OffsetNumber off; + ItemPointerData iptr; + /* predictNumber contains predicted number of pages on current level */ + uint32 predictNumber; + struct GinBtreeStack *parent; // 父节点 +} GinBtreeStack; + +typedef struct GinBtreeData *GinBtree; +``` + +gin 索引的查找和插入的流程在构建 gin 索引的流程中都有涉及,和 B-tree 有些类似,本文不展开介绍了。 + +另外需要注意的一点是,gin 索引是行存表和列存表都支持的索引类型,但是在pg\_am中行存表的 gin 和 列存表的 gin 是两条记录,cgin pg\_am 中相关处理函数如下所示: + +**表 2** + + + + + + + + + + + + + + + + + + + + + + + + +

amname

+

cgin

+

aminsert

+

gininsert

+

ambeginscan

+

ginbeginscan

+

amendscan

+

ginendscan

+

amgetbitmap

+

cgingetbitmap

+

ambuild

+

cginbuild

+

+

+
+ +可以看出列存表的 gin 索引大部分处理函数和行存表是共用的,但索引构建的实现和行存不同,主要差异点是行存表和列存表底层存储及访问方式的差异,gin 索引本身的实现并没有太大差别。 + +索引删除和vacuum相关的内容不在本文讨论,这块内容后面单独叙述。 + diff --git "a/content/zh/post/July/openGauss-MogDB\346\225\260\346\215\256\345\272\223\345\256\214\347\276\216\351\200\202\351\205\215Grafana\345\217\212Prometheus.md" "b/content/zh/post/July/openGauss-MogDB\346\225\260\346\215\256\345\272\223\345\256\214\347\276\216\351\200\202\351\205\215Grafana\345\217\212Prometheus.md" new file mode 100644 index 0000000000000000000000000000000000000000..93dfbc5218a995807e038f4ddb1717e355b167de --- /dev/null +++ "b/content/zh/post/July/openGauss-MogDB\346\225\260\346\215\256\345\272\223\345\256\214\347\276\216\351\200\202\351\205\215Grafana\345\217\212Prometheus.md" @@ -0,0 +1,387 @@ ++++ + +title = "openGauss/MogDB数据库完美适配Grafana及Prometheus" + +date = "2021-12-05" + +tags = [ "openGauss/MogDB数据库完美适配Grafana及Prometheus"] + +archives = "2021-12" + +author = "彭冲" + +summary = "openGauss/MogDB数据库完美适配Grafana及Prometheus" + +img = "/zh/post/July/title/img8.png" + +times = "12:30" + ++++ + +# openGauss/MogDB数据库完美适配Grafana及Prometheus + +Grafana是一个跨平台、开源的度量分析和可视化工具,可以通过对接各种数据源并作可视化展示。 + +Prometheus是著名开源监控项目,其监控任务由具体的exporter实现,exporter到目标端抓取监控数据,然后保存在TSDB时序数据库中。Prometheus也支持PromQL查询语言进行自定义的查询组合。 + +openGauss/MogDB数据库可以通过opengauss\_exporter完美适配Grafana及Prometheus。 + +下面是效果图: + +![](figures/Grafana及Prometheus.png) + +下面在麒麟arm测试环境下演示部署过程,x86环境需要替换相关下载安装包。 + +## 1.Prometheus部署 + +1.1. 安装包下载 + +软件下载地址:https://prometheus.io/download/ + +下载文件prometheus-2.31.1.linux-arm64.tar.gz + +1.2. 创建prometheus用户 + +``` +# useradd prometheus +# password prometheus +``` + +1.3. 创建prometheus相应目录并解压安装包 + +``` +# mkdir /appdata/prometheus +# mkdir /appdata/prometheus/etc +# mkdir /appdata/prometheus/log +# mkdir /appdata/prometheus/data + +# tar -zxvf prometheus-2.31.1.linux-arm64.tar.gz -C /appdata/prometheus +# chown -R prometheus: /appdata/prometheus +# chmod -R 755 /appdata/prometheus +``` + +1.4. 使用prometheus用户检查安装版本 + +``` +$ /appdata/prometheus/prometheus-2.31.1.linux-arm64/prometheus --version +``` + +1.5. 使用prometheus用户编辑配置文件 + +``` +$ vi /appdata/prometheus/etc/prometheus.yml +``` + +内容如下 + +``` +# my global config +global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). +# Alertmanager configuration +alerting: + alertmanagers: + - static_configs: + - targets: +# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. +rule_files: + # - "first_rules.yml" + # - "second_rules.yml" + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['x.x.x.x:50090'] +``` + +1.6. 使用root用户配置prometheus开机启动 + +``` +# vi /usr/lib/systemd/system/prometheus.service +``` + +内容如下 + +``` +[Unit] +Description= Prometheus +After=network.target + +[Service] +Type=simple +User=prometheus +ExecStart=/appdata/prometheus/prometheus-2.31.1.linux-arm64/prometheus --web.listen-address="0.0.0.0:50090" --config.file=/appdata/prometheus/etc/prometheus.yml --storage.tsdb.path=/appdata/prometheus/data +ExecReload=/bin/kill -HUP $MAINPID +Restart=on-failure + +[Install] +WantedBy=multi-user.target +``` + +启动服务 + +``` +# systemctl daemon-reload +# systemctl enable prometheus +# systemctl start prometheus +``` + +1.7. 使用web浏览器测试prometheus服务 + +打开 http://x.x.x.x:50090/ 如下如所示,说明prometheus服务正常。 + +## 2. node\_exporter部署 + +2.1. 安装包下载 + +软件下载地址:https://github.com/prometheus/node\_exporter/releases + +下载文件node\_exporter-1.2.2.linux-arm64.tar.gz + +2.2. 使用root用户解压安装包 + +root用户解压安装包到prometheus用户,测试环境部署在prometheus用户下。 + +``` +# tar -zxvf node_exporter-1.2.2.linux-arm64.tar.gz -C /appdata/prometheus/ +# chown -R prometheus: /appdata/prometheus/node_exporter-1.2.2.linux-arm64 +``` + +2.3. 使用prometheus用户检查安装版本 + +``` +$ /appdata/prometheus/node_exporter-1.2.2.linux-arm64/node_exporter --version +``` + +2.4. 使用root用户配置node\_exporter开机启动 + +``` +# vi /usr/lib/systemd/system/node_exporter.service +``` + +内容如下: + +``` +[Unit] +Description= node exporter +After=network.target + +[Service] +Type=simple +User=prometheus +ExecStart=/appdata/prometheus/node_exporter-1.2.2.linux-arm64/node_exporter --web.listen-address=":9100" --no-collector.softnet +ExecReload=/bin/kill -HUP $MAINPID +Restart=on-failure + +[Install] +WantedBy=multi-user.target + +# systemctl daemon-reload +# systemctl enable node_exporter +# systemctl start node_exporter +``` + +2.5. 查看node\_exporter服务 + +``` +# systemctl status node_exporter +``` + +2.6. prometheus配置exporter + +prometheus配置文件prometheus.yml增加下面的内容 + +``` + - job_name: 'node_exporter' + static_configs: + - targets: ['110.128.131.16:9100'] +``` + +2.7. 使用root用户重启prometheus服务 + +``` +# systemctl restart prometheus +``` + +## 3. opengauss\_exporter部署 + +3.1. 安装包下载 + +下载地址:https://github.com/enmotech/opengauss\_exporter + +下载文件node\_exporter-1.2.2.linux-arm64.tar.gz + +3.2. 使用root用户解压安装包 + +root用户解压安装包到prometheus用户,测试环境部署在prometheus用户下。 + +``` +# unzip opengauss_exporter_0.0.9_linux_arm64.zip -d /appdata/prometheus/opengauss_exporter +# chown -R prometheus: /appdata/prometheus/opengauss_exporter +``` + +3.3. 使用prometheus用户检查安装版本 + +``` +$ /appdata/prometheus/opengauss_exporter/opengauss_exporter --version +``` + +3.4. 上传yaml文件 + +文件存放路径:/appdata/prometheus/opengauss\_exporter + +修改权限 + +``` +# chown prometheus: queries.yaml +``` + +3.5. MogDB数据库端配置 + +3.5.1. 创建监控用户 + +密码复杂度要符合数据库的要求,默认要求大小写+特殊字符,不少于8位 + +``` +$ gsql -Uomm postgres -r + +CREATE USER db_exporter WITH PASSWORD 'XXXXXXXX' MONADMIN; +grant usage on schema dbe_perf to db_exporter; +grant select on pg_stat_replication to db_exporter; +``` + +3.5.2. 配置pg\_hba.conf + +以md5的加密方式添加mogdbmonitor监控机白名单 + +``` +$ gs_guc set -I all -N all -h "host postgres db_exporter 110.128.131.16/32 md5" +``` + +3.6. 使用root用户配置服务开机启动 + +``` +# vi /usr/lib/systemd/system/mogdb_exporter.service +``` + +内容如下: + +``` +[Unit] +Description=Prometheus MogDB Exporter Server + +[Service] +User=prometheus +Environment="DATA_SOURCE_NAME=postgresql://db_exporter:password@ip:port/postgres?sslmode=disable" +ExecStart=/appdata/prometheus/opengauss_exporter/opengauss_exporter --web.listen-address=":51007" --config="/appdata/prometheus/opengauss_exporter/queries.yaml" + +[Install] +WantedBy=multi-user.target +``` + +启动服务 + +``` +# systemctl daemon-reload +# systemctl enable mogdb_exporter +# systemctl start mogdb_exporter +``` + +3.7. 查看exporter服务 + +``` +# systemctl status mogdb_exporter +``` + +3.8. prometheus配置exporter + +prometheus配置文件prometheus.yml增加下面的内容 + +``` + - job_name: ' mogdb_exporter ' + static_configs: + - targets: ['110.128.131.16: 51007'] +``` + +3.9. 使用root用户重启prometheus服务 + +``` +# systemctl restart prometheus +``` + +## 4. Grafana部署 + +4.1. 安装包下载 + +下载地址:https://grafana.com/grafana/download + +下载文件grafana-enterprise-8.2.4.linux-arm64.tar.gz + +4.2. 使用root用户解压安装包 + +root用户解压安装包到prometheus用户,测试环境部署在prometheus用户下。 + +``` +# tar -zxvf grafana-enterprise-8.2.4.linux-arm64.tar.gz -C /appdata/prometheus/ +# chown -R prometheus: /appdata/prometheus/grafana-8.2.4 +``` + +4.3. 使用prometheus用户检查安装版本 + +``` +$ /appdata/prometheus/grafana-8.2.4/bin/grafana-server -v +``` + +4.4. 使用prometheus用户配置grafana + +``` +$ vi /appdata/prometheus/grafana-8.2.4/conf/defaults.ini +``` + +例如修改http\_port = 51009 + +修改内置数据库3306端口等 + +4.5. 使用root用户配置服务开机启动 + +``` +# vi /usr/lib/systemd/system/grafana.service +``` + +内容如下 + +``` +[Unit] +Description=Grafana Server + +[Service] +User=prometheusExecStart=/appdata/prometheus/grafana-8.2.4/bin/grafana-server -homepath /appdata/prometheus/grafana-8.2.4/ -config /appdata/prometheus/grafana-8.2.4/conf/defaults.ini + +[Install] +WantedBy=multi-user.target +``` + +启动服务 + +``` +# systemctl daemon-reload +# systemctl enable grafana.service +# systemctl start grafana.service +``` + +4.6. 查看grafana服务状态 + +``` +# systemctl status grafana.service +``` + +4.7. 使用web浏览器测试grafana + +打开 http://x.x.x.x:51009/ ,默认账号:admin 默认密码:admin + +然后配置prometheus,加载dashboard即可。 + diff --git "a/content/zh/post/July/openGauss-MogDB\346\225\260\346\215\256\345\272\223\350\247\246\345\217\221\345\231\250\345\210\206\344\270\244\346\255\245\346\213\206\350\247\243\345\210\233\345\273\272.md" "b/content/zh/post/July/openGauss-MogDB\346\225\260\346\215\256\345\272\223\350\247\246\345\217\221\345\231\250\345\210\206\344\270\244\346\255\245\346\213\206\350\247\243\345\210\233\345\273\272.md" new file mode 100644 index 0000000000000000000000000000000000000000..09a0baa7cb074b4251954d251a33e16dc3b0fdf6 --- /dev/null +++ "b/content/zh/post/July/openGauss-MogDB\346\225\260\346\215\256\345\272\223\350\247\246\345\217\221\345\231\250\345\210\206\344\270\244\346\255\245\346\213\206\350\247\243\345\210\233\345\273\272.md" @@ -0,0 +1,76 @@ ++++ + +title = "openGauss/MogDB数据库触发器分两步拆解创建" + +date = "2021-12-05" + +tags = [ "openGauss/MogDB数据库触发器分两步拆解创建"] + +archives = "2021-12" + +author = "彭冲" + +summary = "openGauss/MogDB数据库触发器分两步拆解创建" + +img = "/zh/post/July/title/img7.png" + +times = "12:30" + ++++ + +# openGauss/MogDB数据库触发器分两步拆解创建 + +## Oracle触发器参考例子 + +``` +CREATE OR REPLACE TRIGGER ora_trigger AFTER UPDATE OR INSERT OR DELETE ON tab_x FOR each row +declare + ... +begin + ... + insert into tab_x ...; +end; +/ +``` + +针对上面的例子,PostgreSQL里需要分两步来操作 + +## 1.先创建“触发器函数” + +“触发器函数”是返回类型为trigger的函数,把上面的业务逻辑提取到下面的函数里面。 + +``` +CREATE FUNCTION tigger_fun() RETURNS trigger AS $$ +declare + ... +begin + ... + insert into tab_x ...; +end; +$$ language plpgsql; +``` + +## 2.创建触发器 + +下面procedure关键字后面指定上一步创建的触发器函数名。 + +``` +create trigger tab_x_trigger + AFTER UPDATE OR INSERT OR DELETE on tab_x + for each row execute procedure tigger_fun(); +``` + +## 3.触发器的查看 + +方式一:使用gsql工具连接后,通过表的定义上可以查看 + +``` +\d+ tab_x +``` + +方式二:通过sql语句查询pg\_trigger系统表 + +``` +select * from pg_trigger where tgname='tab_x'; +``` + diff --git "a/content/zh/post/July/openGauss-MogDB\351\205\215\347\275\256IPv6.md" "b/content/zh/post/July/openGauss-MogDB\351\205\215\347\275\256IPv6.md" new file mode 100644 index 0000000000000000000000000000000000000000..fc41559ace2e60cceaa15ca14ff29183ba8b0e1b --- /dev/null +++ "b/content/zh/post/July/openGauss-MogDB\351\205\215\347\275\256IPv6.md" @@ -0,0 +1,239 @@ ++++ + +title = "openGauss/MogDB配置IPv6" + +date = "2021-12-05" + +tags = [ "openGauss/MogDB配置IPv6"] + +archives = "2021-12" + +author = "彭冲" + +summary = "openGauss/MogDB配置IPv6" + +img = "/zh/post/July/title/img6.png" + +times = "12:30" + ++++ + +# openGauss/MogDB配置IPv6 + +openGauss/MogDB支持多种网络接口,假如我们想在支持IPv6的网络上部署使用,只需简单操作即可,本文将介绍在Centos上如何配置使用。 + +## 关于IPv6 + +IPv6\(Internet Protocol Version 6\),是Internet Engineering Task Force \(IETF\)设计用于替代IPv4的下一代IP协议,使用IPv6能解决网络地址资源数量的问题。 + +我们使用ipconfig /all命令查看windows网络接口,会看到IPv6地址。 + +以太网适配器 以太网 7: + +``` + 本地链接 IPv6 地址. . . . . . . . : fe80::828a:5e20:53cb:7719%6(首选) + IPv4 地址 . . . . . . . . . . . . : 192.168.137.68(首选) +``` + +Centos下使用ip addr命令查看linux网络接口,也会看到IPv6地址。 + +``` +# ip addr +1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever +2: enp0s3: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 + link/ether 08:00:27:b5:54:32 brd ff:ff:ff:ff:ff:ff + inet 192.168.137.101/24 brd 192.168.137.255 scope global enp0s3 + valid_lft forever preferred_lft forever + inet6 fe80::a00:27ff:feb5:5432/64 scope link + valid_lft forever preferred_lft forever +``` + +## IPv6分类 + +1.本地关联IPv6 + +本地关联的IPv6,是以fe80开头,与网卡的物理地址\(MAC地址\)有关,不需要通过DHCP自动分配或者手工设置。 + +2.全局IPv6 + +如果需要跨网络或者跨路由器进行通信,则需要使用全局的IPv6。 + +## 创建全局IPv6 + +创建全局IPv6有多种的方式,例如DHCPv6、Stateless address autoconfiguration \(SLAAC\) 以及手工配置。 + +手工配置可以使用ip命令来配置: + +``` +# ip -6 addr add 2022:1:0:0::db1/64 dev enp0s3 +``` + +或者使用ifconfig命令来配置: + +``` +# ifconfig enp0s3 inet6 add 2022:1:0:0::db1/64 +``` + +通过上面任意一种方式配置后,可以看到enp0s3网络接口将增加一个inet6,并且是global属性的。 + +``` +# ip addr +1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever +2: enp0s3: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 + link/ether 08:00:27:b5:54:32 brd ff:ff:ff:ff:ff:ff + inet 192.168.137.101/24 brd 192.168.137.255 scope global enp0s3 + valid_lft forever preferred_lft forever + inet6 2022:1::db1/64 scope global + valid_lft forever preferred_lft forever + inet6 fe80::a00:27ff:feb5:5432/64 scope link + valid_lft forever preferred_lft forever +``` + +注意:上面IPv6字符串配置中有db1,这符合HEX字符规则,非HEX字符则不允许设置,比如我们把db换成dx,则会提示下面的错误信息。 + +``` +# ifconfig enp0s3 inet6 add 2022:1:0:0::dx1/64 +2022:1:0:0::dx1: Resolver Error 0 (no error) +``` + +## IPv6连通性测试 + +在本地使用ping6进行连通性测试,先使用全局IPv6进行测试 + +``` +# ping6 2022:1::db1 -c3 +PING 2022:1::db1(2022:1::db1) 56 data bytes +64 bytes from 2022:1::db1: icmp_seq=1 ttl=64 time=0.027 ms +64 bytes from 2022:1::db1: icmp_seq=2 ttl=64 time=0.047 ms +64 bytes from 2022:1::db1: icmp_seq=3 ttl=64 time=0.028 ms + +--- 2022:1::db1 ping statistics --- +3 packets transmitted, 3 received, 0% packet loss, time 2000ms +rtt min/avg/max/mdev = 0.027/0.034/0.047/0.009 ms +``` + +再使用本地关联IPv6进行测试,此时需要带上网络接口名称 + +``` +# ping6 fe80::a00:27ff:feb5:5432%enp0s3 -c3 +PING fe80::a00:27ff:feb5:5432%enp0s3(fe80::a00:27ff:feb5:5432%enp0s3) 56 data bytes +64 bytes from fe80::a00:27ff:feb5:5432%enp0s3: icmp_seq=1 ttl=64 time=0.040 ms +64 bytes from fe80::a00:27ff:feb5:5432%enp0s3: icmp_seq=2 ttl=64 time=0.041 ms +64 bytes from fe80::a00:27ff:feb5:5432%enp0s3: icmp_seq=3 ttl=64 time=0.022 ms + +--- fe80::a00:27ff:feb5:5432%enp0s3 ping statistics --- +3 packets transmitted, 3 received, 0% packet loss, time 2000ms +rtt min/avg/max/mdev = 0.022/0.034/0.041/0.010 ms +``` + +## openGauss/MogDB配置IPv6 + +编辑postgresql.conf文件,修改监听参数 + +``` +listen_addresses = '*' +``` + +修改完后重启服务,数据库将监听本机所有的网络接口。 + +编辑pg\_hba.conf文件,添加数据库客户端连接的IPv6认证条目 + +``` +host all all fe80::a00:27ff:feb5:5432/128 md5 +host all all 2022:1::db1/128 md5 +``` + +## 使用gsql客户端进行测试 + +1.使用本地关联IPv6进行测试,此时需要带上网络接口名称 + +``` +$ gsql -h fe80::a00:27ff:feb5:5432%enp0s3 -Umoguser postgres -r -p6432 + +postgres=> \conninfo +You are connected to database "postgres" as user "moguser" on host "fe80::a00:27ff:feb5:5432%enp0s3" at port "6432". +postgres=> SELECT datname,usename, client_addr FROM pg_stat_activity where usename='moguser'; + datname | usename | client_addr +----------+---------+-------------------------- + postgres | moguser | fe80::a00:27ff:feb5:5432 +(1 row) +``` + +2.使用全局IPv6进行测试 + +``` +$ gsql -h 2022:1::db1 -Umoguser postgres -r -p6432 + +postgres=> \conninfo +You are connected to database "postgres" as user "moguser" on host "2022:1::db1" at port "6432". +postgres=> SELECT datname,usename, client_addr FROM pg_stat_activity where usename='moguser'; + datname | usename | client_addr +----------+---------+------------- + postgres | moguser | 2022:1::db1 +(1 row) +``` + +## 使用java jdbc进行测试 + +通过java程序test.jar包进行测试,test.jar需要三个入参,分别是jdbc url、jdbc username、jdbc password。 + +1.使用普通的IPv4进行测试 + +``` +$ java -jar test.jar jdbc:postgresql://192.168.137.101:6432/postgres moguser Admin@1234 +``` + +执行结果如下,可以看到数据库连接测试成功 + +``` +Input jdbc url:jdbc:postgresql://192.168.137.101:6432/postgres +Input jdbc username:moguser +Connection test successfully. +``` + +2.使用本地关联IPv6进行测试,进行测试 + +``` +$ java -jar test.jar jdbc:postgresql://fe80::a00:27ff:feb5:5432:6432/postgres moguser Admin@1234 +``` + +执行结果如下,可以看到数据库连接测试成功 + +``` +Input jdbc url:jdbc:postgresql://fe80::a00:27ff:feb5:5432:6432/postgres +Input jdbc username:moguser +Connection test successfully +``` + +3.使用全局IPv6进行测试 + +``` +$ java -jar test.jar jdbc:postgresql://2022:1::db1:6432/postgres moguser Admin@1234 +``` + +执行结果如下,可以看到数据库连接测试成功 + +``` +Input jdbc url:jdbc:postgresql://2022:1::db1:6432/postgres +Input jdbc username:moguser +Connection test successfully. +``` + +## 总结 + +1.openGauss/MogDB配置IPv6只需简单修改listen\_addresses = ‘\*’ 即可。 + +2.使用gsql客户端进行连接时,本地关联IPv6还需要使用网络接口名进行访问,全局IPv6不需要。 + +3.使用jdbc客户端进行连接时,无论是本地关联IPv6还是全局IPv6,直接使用地址即可。 + diff --git "a/content/zh/post/July/openGauss-MogDB\351\233\266\345\255\227\350\212\202\351\227\256\351\242\230\345\244\204\347\220\206.md" "b/content/zh/post/July/openGauss-MogDB\351\233\266\345\255\227\350\212\202\351\227\256\351\242\230\345\244\204\347\220\206.md" new file mode 100644 index 0000000000000000000000000000000000000000..926aacc03e7d5ac59a6ae97630bbaf6f329a5591 --- /dev/null +++ "b/content/zh/post/July/openGauss-MogDB\351\233\266\345\255\227\350\212\202\351\227\256\351\242\230\345\244\204\347\220\206.md" @@ -0,0 +1,206 @@ ++++ + +title = "openGauss/MogDB零字节问题处理" + +date = "2021-12-05" + +tags = [ "openGauss/MogDB零字节问题处理"] + +archives = "2021-12" + +author = "彭冲" + +summary = "openGauss/MogDB零字节问题处理" + +img = "/zh/post/July/title/img6.png" + +times = "12:30" + ++++ + +# openGauss/MogDB零字节问题处理 + +**问题描述**:java应用端程序调用GZIP压缩类对数据进行编码压缩后入库 ,然后从数据库取出进行解压,原来再mysql数据库中是正常的,但迁移到openGauss/mogdb之后,解压出来的数据是乱码,不正常。 + +mysql端表结构如下: + +``` +CREATE TABLE `test` ( + `id` bigint(20) NOT NULL, + `info` varchar(20) NOT NULL, + `info2` mediumtext CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci ROW_FORMAT=DYNAMIC; +``` + +迁移到openGauss/MogDB后表结构如下: + +``` +create table test( +id int, +info text, +info2 text +); +``` + +java压缩接口方法如下: + +``` + public static String compress(String str) throws IOException { + if (null == str || str.length() <= 0) { + return str; + } + GZIPOutputStream gzip = null; + // 创建一个新的输出流 + ByteArrayOutputStream out = new ByteArrayOutputStream(); + try { + // 使用默认缓冲区大小创建新的输出流 + gzip = new GZIPOutputStream(out); + // 将字节写入此输出流 + gzip.write(str.getBytes("utf-8")); + // 因为后台默认字符集有可能是GBK字符集,所以此处需指定一个字符集 + gzip.close(); + // 使用指定的 charsetName,通过解码字节将缓冲区内容转换为字符串 + return out.toString("ISO-8859-1"); + } finally { + closeQuietly(gzip); + closeQuietly(out); + } + } +``` + +java解压接口方法如下: + +``` + public static String unCompress(String str) throws IOException { + GZIPInputStream gzip = null; + if (null == str || str.length() <= 0) { + return str; + } + // 创建一个新的输出流 + ByteArrayOutputStream out = new ByteArrayOutputStream(); + // 创建一个 ByteArrayInputStream,使用 buf 作为其缓冲 区数组 + ByteArrayInputStream in = new ByteArrayInputStream(str.getBytes("ISO-8859-1")); + try { + // 使用默认缓冲区大小创建新的输入流 + gzip = new GZIPInputStream(in); + byte[] buffer = new byte[256]; + int n = 0; + // 将未压缩数据读入字节数组 + while ((n = gzip.read(buffer)) >= 0) { + out.write(buffer, 0, n); + } + // 使用指定的 charsetName,通过解码字节将缓冲区内容转换为字符串 + return out.toString("utf-8"); + } finally { + closeQuietly(gzip); + closeQuietly(in); + closeQuietly(out); + } + } +``` + +测试用例部分关键代码参考如下: + +1.对UTF8编码的字符串数据进行压缩,然后存到数据库中 + +``` +String str = "{\"name\":\"jerome\",\"familyName\":\"peng\",\"company\":\"enmotech\"}"; + +System.out.println("input:"+str); + +String compress_java = GZipUtils.compress(str); + + try{ + ps = conn.prepareStatement(sql); + ps.setInt(1, 100); + ps.setString(2, str); + ps.setString(3, compress_java); + ps.execute(); + } catch (Exception e) { + e.printStackTrace(); + } +``` + +2.从数据库中取出字段进行解密 + +``` + sql = " select info,info2 from test where id=100"; + ResultSet rs = null; + try{ + ps = conn.prepareStatement(sql); + rs = ps.executeQuery(); + while (rs.next()) { + String compress_db = rs.getString(2); + String unCompress= GZipUtils2.unCompress(compress_db ); + System.out.println("output:"+unCompress); + } + } catch (Exception e) { + e.printStackTrace(); + } +``` + +期望结果是从数据库中取出来的字符串能够解压出原始数据。也就是上面的unCompress变量输出的结果应该要与上面的str变量输出结果一致,应为: + +``` +{"name":"jerome","familyName":"peng","company":"enmotech"} +``` + +如果我们在pg数据库里进行测试,上面测试第一步会报错提示无法对0字节进行存储 + +``` +org.postgresql.util.PSQLException: ERROR: invalid byte sequence for encoding "UTF8": 0x00 +``` + +但在openGauss/MogDB里面,数据可以正常存储,不会报错,但是压缩接口进行解码时数据显示乱码。 + +下面我们对比入库前和入库后的字节序列(以hex字符形式显示,两个字符表示一个字节): + +入库前的hex字符串 + +``` +1f8b0800000000000000ab56ca4bcc4d55b252ca4a2dca07327494d2127333732afd20a205a979e940b1e4fcdc82c4bc4aa0406a5e6e7e496a7286522d003efb28273a000000 +``` + +入库后的hex字符串 + +``` +1f8b0820202020202020ab56ca4bcc4d55b252ca4a2dca07327494d2127333732afd20a205a979e940b1e4fcdc82c4bc4aa0406a5e6e7e496a7286522d203efb28273a202020 +``` + +我们发现其实是00与20的差异,所有的hex 00被转义为了hex 20,也就是0字节被转义为了空格。 + +既然知道了这个差异,那我们对取出的数据做一次反向替换,应该可以解决这个问题。 + +我们可以按字节进行读取,如果数值是32(hex 20对应十进制32)的字节,那我们就替换为0字节。 + +``` +if(bytes_src[i]==32) { + bytes_dest[i]=(byte)0; +}else { + bytes_dest[i]=bytes_src[i]; +} +``` + +这样修改之后测试发现还是有问题,因为压缩后的字节数据里可能也包含hex 20,这样我们会把不该替换的字节也做了误处理。 + +进一步修正为只对首尾固定的部分进行处理,思路来源与GZIP公共类。 + +``` +//头部10个字节或者尾部8个字节还原0字节 +if((i<=10 || i>=len-1-8) && bytes_src[i]==32) { + bytes_dest[i]=(byte)0; +}else { + bytes_dest[i]=bytes_src[i]; +} +``` + +这样处理后,测试数据可以正常解压,测试结果如下: + +``` +input:{"name":"jerome","familyName":"peng","company":"enmotech"} +HEX_ja:1f8b0800000000000000ab56ca4bcc4d55b252ca4a2dca07327494d2127333732afd20a205a979e940b1e4fcdc82c4bc4aa0406a5e6e7e496a7286522d003efb28273a000000 +HEX_db:1f8b0820202020202020ab56ca4bcc4d55b252ca4a2dca07327494d2127333732afd20a205a979e940b1e4fcdc82c4bc4aa0406a5e6e7e496a7286522d203efb28273a202020 +HEX_cv:1f8b0800000000000000ab56ca4bcc4d55b252ca4a2dca07327494d2127333732afd20a205a979e940b1e4fcdc82c4bc4aa0406a5e6e7e496a7286522d003efb28273a000000 +output:{"name":"jerome","familyName":"peng","company":"enmotech"} +``` + diff --git "a/content/zh/post/July/openGauss-SQL\346\211\247\350\241\214\345\231\250.md" "b/content/zh/post/July/openGauss-SQL\346\211\247\350\241\214\345\231\250.md" new file mode 100644 index 0000000000000000000000000000000000000000..5053a521780e953b4600e6de29a08da0119096d4 --- /dev/null +++ "b/content/zh/post/July/openGauss-SQL\346\211\247\350\241\214\345\231\250.md" @@ -0,0 +1,395 @@ ++++ + +title = "openGauss SQL执行器" + +date = "2021-12-05" + +tags = [ "openGauss SQL执行器"] + +archives = "2021-12" + +author = "匿名" + +summary = "openGauss SQL执行器" + +img = "/zh/post/July/title/img7.png" + +times = "12:30" + ++++ + +# openGauss SQL执行器 + +## 一、源码分析: + +### 1、ExecutorStart 代码调试过程以及分析 + +ExecutorStart 部分代码主要需要的数据结构有: + +QueryDesc:查询描述符,实际是需要执行的SQL语句的相关信息,包含由CreateQueryDesc函数设置的操作类型、规划器的输出计划树、元组输出的接收器、查询环境变量以及由ExecutorStart函数设置的结果元组tuples描述、执行器状态、和per-plan-node状态树。具体结构见后面的执行器的主要数据结构分析 + +CmdType:由 Query 或 PlannedStmt 表示的操作类型的枚举;包括CMD\_SELECT、CMD\_UPDATE, CMD\_INSERT, CMD\_DELETE, CMD\_MERGE,等 + +PlanState :PlanState是所有PlanState-type节点的虚父类,从未被实例化 + +另外一个传入 Executor 的参数是 eflag,它的定义在 executor.h 中 + +![](figures/zh-cn_image_0000001187214316.jpg) + +从 ExecutorStart 开始调试:这个例程必须在任何执行的开始被调用,这个函数先检查是否存在 hook 函数,hook 是官方留给第三方插件使用的。如果存在则执行 hook 函数,否则执行标准 Executor。 + +![](figures/zh-cn_image_0000001187055788.png) + +调试过程如下: + +![](figures/zh-cn_image_0000001186895808.png) + +1、执行命令 select \* from student; ,分析传入的数据结构queryDesc和eflags: + + +![](figures/zh-cn_image_0000001232575381.png) + +2、向下执行,发现此时 hook 上挂载了 explain\_ExecutorStart 函数,于是进入开始执行函数,该函数负责检查是否需要启动审计功能。 + +![](figures/zh-cn_image_0000001232453865.png) + +3、 explain\_ExecutorStart 函数首先调用prev\_ExecutorStart函数,而 prev\_ExecutorStart 又挂载了 gs\_audit\_executor\_start\_hook 函数,该函数负责启动审计功能 + +4、之后调用了标准 ExecutorStart\(\)函数。 + +分别进入如下几个函数,进行了一些操作。 + +(1)estate = CreateExecutorState\(\); + +![](figures/zh-cn_image_0000001232775435.png) + +创建并初始化一个EState节点,它是用于整个Executor调用的工作存储。 + +\*主要地,这将创建每查询内存上下文用于保存直到查询结束为止的所有工作数据。 + +注意,每查询上下文将成为调用者的子上下文 + +(2)ExecAssignExprContext。 + +![](figures/zh-cn_image_0000001232693943.png) + +/\* ---------------------------------------------------------------- + +\*其他node-init支持函数 + +\*注意:所有这些都被CurrentMemoryContext调用等于每个查询的内存上下文。 + +\*初始化ps\_ExprContext字段。这只是必要的为使用ExecQual或ExecProject的节点执行此操作,因为那些例程需要一个上下文。其他节点,不需要计算表达式不需要这样做。 + +\* ———————— + +(3)createexprcontext + +\*在EState中为表达式计算创建上下文。 + +\*一个executor运行可能需要多个exprcontext\(我们通常为每个Plan节点创建一个exprcontext,并为每个输出元组处理\(如约束检查\)单独创建一个exprcontext\)。每个ExprContext都有自己的“per-tuple”内存上下文。 + +注意,我们没有对调用者的内存上下文做任何假设。 + +![](figures/zh-cn_image_0000001187374254.png) + +(4) execopenscanrelation + +\*打开堆关系被扫描一个基本级别的扫描计划节点。 + +\*这应该在节点的ExecInit例程中调用。 + +\* + +\*默认情况下,获取关联的AccessShareLock。但是,如果关系已经被InitPlan锁定,我们就不需要获得任何额外的锁定。这节省了到共享锁管理器的访问。 + +![](figures/zh-cn_image_0000001187214342.png) + +/\* ---------------------------------------------------------------- + +### 2、ExecutorRun 代码调试过程以及分析 + +这是执行器运行的部分 + +这一部分传入的参数有 + +QueryDesc 这个和上部分传入的参数相同 + +ScanDirection 这个是一个枚举类型变量 + +-1 表示反方向扫描 + +0 表示无扫描方向 + +1 表示正方向扫描 count + +元组最大数目 + +ExecutorRun 函数是执行器模块的主要例程。它接受来自交通警察\(traffic cop\)的查询描述符并执行查询计划。 执行此部分时 ExecutorStart 必须已经被调用。 + +如果 direction 是 NoMovementScanDirection,即为 0,那么除了启动/关闭目的地之外什么都不做。 否则,我们将 在指定方向检索最多 ‘count’ 个元组。 + +注意:count = 0 被解释为没有入口限制,即运行直到完成。 另请注意,计数限制仅适用于检索到的元组,例如不适用于由 ModifyTable 计划节点插入/更新/删除的元组。 + +没有返回值,但输出元组\(如果有\)被发送到 QueryDesc 中指定的目标接收者;顶层处理的元组数量可以在 Estate- \>es\_processed 中找到。 + +openGauss 提供了一个函数 hook 变量,让可加载插件在调用 ExecutorRun 时获得控制。这样的插件通常会调用 standard\_ExecutorRun\(\)。 + +调试过程如下: + +- 1、同样以 select \* from student; 为例,此时参数取值如下 + + ![](figures/zh-cn_image_0000001187055790.png) + +- 2、之后函数调用 exec\_explain\_plan\(\)对 queryDesc 进行解析 + +- 3、解析成功后检查是否存在 ExecutorRun\_hook, 如果 不存在则执行标准 ExecutorRun。这里同样已经被挂载了 explain\_ExecutorRun\(\) + + 正如注释所说,该函数负责跟踪嵌套深度。一旦深度超出限制,则抛出一个异常。 + +- 4、执行完成这部分后,进入到standard\_ExecutorRun\(queryDesc, direction, count\);函数中 + + 调用了CreateExprContext函数,在EState中为表达式计算创建上下文。 + + ![](figures/zh-cn_image_0000001186895810.png) + + *一个executor运行可能需要多个exprcontext\(我们通常为每个Plan节点创建一个exprcontext,并为每个输出元组处理\(如约束检查\)单独创建一个exprcontext\)。每个ExprContext都有自己的“per-tuple”内存上下文。注意,我们没有对调用者的内存上下文做任何假设。每一个元组都在execProcnode中获得分发函数按节点调用函数,之后在三个execScan中一起进行组合扫描,扫描结束回到run函数 + +- 5、最后回到 ExecutorRun 函数,进行 SQL 自调,查询执行完毕时,基于运行时信息分析查询计划问题。本次调试中, 查询中并没有问题。之后顺利退出此函数。 + +### 3、ExecutorFinish & ExecutorEnd 代码调试过程以及分析 + +此例程必须在最后一次 ExecutorRun 调用之后调用。 + +它执行清理,例如触发 AFTER 触发器。它与 ExecutorEnd 是分开的,因为 EXPLAIN ANALYZE 需要在总运行时间中包含这些操作。 + +调试过程如下: + +- 1、ExecutorFinish这一部分代码比较简短,依旧是用select \* from student;来观察; + +- 2、hook 上同样被挂载了函数 explain\_ExecutorFinish , + + 该函数的任务依旧是在清理时跟踪嵌套深度,如果深度超出限制则抛出异常。该部分执行结束后退出。 + +- 3、进入standard\_ExecutorFinish\(queryDesc\),该部分标准执行代码比较短:主要是运行 ModifyTable 节点完成 以及执行队列后触发器,除非被告知不要; + +- 4、最后轮到 ExecutorEnd: + + ![](figures/zh-cn_image_0000001232575383.png) + +- 5、hook 上被挂载了函数 hypo\_executorEnd\_hook + + 它的作用是重置 isExplain 标志,此时查询已经执行完了,所以对标志进行重置。为下一轮查询做好工作。 + +- 6、进入standard\_ExecutorEnd; + + 相关函数有: + + (1)ExecFreeExprContext + + 计划节点的ExprContext应该在执行器关闭期间被显式释放,因为可能有需要调用的关闭回调。\(上述例程生成的其他资源,如投影信息,不需要显式释放,因为它们只是每查询内存上下文中的内存。\) + + 然而……在ExecEndNode期间不需要这样做,因为FreeExecutorState会释放EState中的任何剩余的ExprContexts。让FreeExecutorState这样做,可以让ExprContexts以相反的创建顺序被释放,而不是像我们在这里删除它们时那样按照创建顺序被释放,这就节省了在FreeExprContext中清除列表的O\(N^2\)工作。 + + ![](figures/zh-cn_image_0000001232453869.png) + + (2)FreeExecutorState + + \*释放一个EState以及所有剩余的工作存储。 + + \*注意:这不是负责释放非内存资源,如打开关系或缓冲引脚。但是它将关闭EState内任何仍然活跃的exprcontext。对于EState仅用于表达式求值而不是运行完整的Plan的情况,这已经是足够的清理工作了。 + + \*这可以在任何内存上下文中调用…只要它不是被释放的那一种。 + + ![](figures/zh-cn_image_0000001232775437.png) + +运行机制: + +1、Postgres进程 + +opengauss与PostgreSQL类似,是多进程结构的数据库。在PostgreSQL中主要有postmaster, postgres, vacuum, bgwriter, pgarch, walwriter, pgstat等进程,postmaster负责在启动数据库的时候创建共享内存 并初始化各种内部数据结构,如锁表,数据库缓冲区等,该进程在数据库中只有一个。在数据库启动以后负责监听用户 请求,创建postgres进程来为用户服务。 + +Postgres进程负责执行客户端发出的所有的SQL语句及自定义函数。在opengauss中与Postgres进程相关的代码在src/gusskernel/process/tcop/postgres.cpp文件中,Postgres进程的入口函数是PostgresMain。 + +![](figures/zh-cn_image_0000001232693945.png) + +PostgresMain首先进行一些初始化工作,然后使用语句for \(; 进入一个无限循环状态,等待客户端发来命令请求,接受客户端命令,执行客户端命令,将执行结果返回给客户端。 + +for \(; 无限循环体首先调用ReadCommand从客户端读取一条命令,然后根据命令类型,调用相应的处理函数。 + +对于可以直接执行的SQL语句(simple query),命令类型的代码是“Q”,主要的处理代码如下: + +![](figures/zh-cn_image_0000001187374256.png) + +语句exec\_simple\_query\(query\_string\)负责解析SQL语句,生成查询计划,执行查询计划,将查询结果返回给客户端。我们组主要进行分析的是opengauss执行查询计划的过程。 + +2、执行器的整体执行流程 + +执行器(executor)采用优化器创建的计划,并对其进行递归处理以提取所需的行的集合。这本质上是一种需求驱动的流水线执行机制。即每次调用一个计划节点时,它都必须再传送一行,或者报告已完成传送所有行。 + +![](figures/zh-cn_image_0000001187214344.png) + +图1:执行计划树示例 + +如图所示的执行计划树示例,顶部节点是Merge Join节点。在进行任何合并操作之前,必须获取2个元组(MergeJoin节点的2个子计划各返回1个元组)。因此,执行器以递归方式调用自身以处理其子计划(如从左子树的子计划开始)。 + +Merge Join由于要做归并操作,因此它要子计划按序返回元组,从图中可以看出,它的子计划是一个Sort节点。Sort的子节点可能是Seq Scan节点,代表对表的实际读取。执行SeqScan节点会使执行程序从表中获取一行并将其返回到调用节点。Sort节点将反复调用其子节点以获得所有要排序的行。当输入完毕时(如子节点返回NULL而不是新行),Sort算子对获取的元组进行排序,它每次返回1个元组,即已排序的第1行。然后不断排序并向父节点传递剩余的排好序的元组。 + +Merge Join节点类似地需要获得其右侧子计划中的第1个元组,看是否可以合并。如果是,它将向其调用方返回1个连接行。在下1次调用时,或者如果它不能连接当前输入对,则立即前进到1个表或另1个表的下1行(取决于比较的结果),然后再次检查是否匹配。最终,1个或另1个子计划用尽,并且Merge Join节点返回NULL,以指示无法再形成更多的连接行。 + +复杂的查询可能涉及多个级别的计划节点,但是一般方法是相同的:每个节点都会在每次调用时计算并返回其下1个输出行。每个节点还负责执行优化器分配给它的任何选择或投影表达式。 + +执行器机制用于执行所有4种基本SQL查询类型:SELECT、INSERT、UPDATE和DELETE。 + +对于SELECT,顶级执行程序代码仅需要将查询计划树返回的每一行发送给客户端。 + +对于INSERT,每个返回的行都插入到为INSERT指定的目标表中。这是在称为ModifyTable的特殊顶层计划节点中完成的。(1个简单的“INSERT … VALUES”命令创建了1个简单的计划树,该树由单个Result节点组成,该节点仅计算一个结果行,并传递给ModifyTable树节点实现插入)。 + +对于UPDATE,优化器对每个计算的更新行附着所更新的列值,以及原始目标行的TID(元组ID或行ID);此数据被馈送到ModifyTable节点,并使用该信息来创建新的更新行并标记旧行已删除。 + +对于DELETE,计划实际返回的唯一列是TID,而ModifyTable节点仅使用TID访问每个目标行并将其标记为已删除。 + +执行器的整体执行流程如图2所示。 + +![](figures/zh-cn_image_0000001187055792.png) + +图2:执行器整体执行流程图 + +执行引擎的执行流程非常清晰,可以分成3个阶段。 + +初始化阶段。在这个阶段执行器会完成一些初始化工作,通常的做法是遍历整个执行树,根据每个算子的不同特征进行初始化执行。比如 HashJoin这个算子,在这个阶段会进行 Hash表的初始化,主要是内存的分配。入口函数为ExecutorStart \(\)。 + +执行阶段。这个阶段是执行器最重要的部分。在这个阶段,执行器完成对于执行树的迭代\(Pipeline\)遍历,通过从磁盘读取数据,根据执行树的具体逻辑完成查询语义。入口函数为 ExecutorRun \(\)。 + +清理阶段。因为执行器在初始化阶段向系统申请了资源,所以在这个阶段要完成对资源的清理。比如在 HashJoin初始化时对 Hash表内存申请的释放。入口函数为 ExecutorFinish \(\)、ExecutorEnd \(\)。 + +3、执行器的主要数据结构分析 + +QueryDescs: 查询描述符,实际是需要执行的SQL语句的相关信息,包含由CreateQueryDesc函数设置的操作类型、规划器的输出计划树、元组输出的接收器、查询环境变量以及由ExecutorStart函数设置的结果元组tuples描述、执行器状态、和per-plan-node状态树。 + +具体结构如下: + +![](figures/zh-cn_image_0000001186895812.png) + +EState:执行器在调用时的主要工作状态,由ExecutorStart函数设置 + +执行器全局状态estate中保存了査询涉及的范围表\(es\_range\_table\)、Estate所在的内存上下文\(es\_query\_cxt,也是执行过程中一直保持的内存上下文\)、用于在节点间传递元组的全局元组表\(es\_TupleTable\)和每获取一个元组就会回收的内存上下文\(es\_per\_tuple\_exprContext\) 。 + +PlanState:PlanState是所有PlanState-type节点的虚父类 + +执行器初始化时,ExecutorStart会根据査询计划树构造执行器全局状态\(estate\)以及计划节点执行状态\(planstate\)。在査询计划树的执行过程中,执行器将使用planstate来记录计划节点执行状态和数据,并使用全局状态记录中的es\_tupleTable字段在节点间传递结果元组。执行器的清理函数ExecutorEnd将回收执行器全局状态和计划节点执行状态。 + +状态节点之间通过lefttree和righttree指针组织成和査询计划树结构类似的状态节点树,同时,每个状态节点都保存了指向其对应的计划节点的指针\(PlanState类型中的Plan字段\)。 + +### 4、执行器的主要函数分析 + +在opengauss中与执行器主要相关的代码在src/gusskernel/runtime/executor/execMain.cpp文件中,主要函数有 ExecutorStart \(\)、 ExecutorRun \(\)、ExecutorFinish \(\)、ExecutorEnd \(\)。 + +初始化阶段: + +在这个阶段执行器会完成一些初始化工作,通常的做法是遍历整个执行树,根据每个算子的不同特征进行初始化执行。入口函数为ExecutorStart \(\)。这个例程必须在任何查询计划的开始执行时调用。它接受一个以前由CreateQueryDesc创建的QueryDesc\(它是分开的,只是因为一些地方使用QueryDescs实用命令\)。填充QueryDesc的tupDesc字段来描述将返回的元组,并设置内部字段\(estate和planstate\)。 + +ExecutorStart \(\)函数代码如下所示: + +![](figures/zh-cn_image_0000001232575385.png) + +其中ExecutorStart\_hook为钩子函数,是PostgreSQL预留的接口,通过重新编写的钩子函数可以改变postgresql的默认功能,钩子函数通常以hook最为结尾,PostgreSQL预留了非常丰富的接口。因此如果没有特殊需求,一般进入standard\_ExecutorStart\(queryDesc, eflags\)函数。 + +ExecutorRun \(\)、ExecutorFinish \(\)、ExecutorEnd \(\)这三个函数类似。都提供了一个函数挂钩变量,让可加载插件在ExecutorStart被调用时获得控制。但是通常会调用standard\_ExecutorStart\(\)。 + +standard\_ExecutorStart\(queryDesc, eflags\)函数主要做的工作是: + +构建EState:调用CreateExecutorState\(\)函数,创建每个查询的上下文,切换到每个查询的内存上下文进行启动;如果是非只读查询,设置命令ID以标记输出元组 + +初始化计划状态树: 调用InitPlan\(queryDesc, eflags\)实现; + +执行器中对査询计划树的初始化都是从其根节点开始,并递归地对其子节点进行初始化。计划节点的初始化过程一般都会经历如下图所示的几个基本步骤,该过程在完成计划节点的初始化之后会输出与该计划节点对应的PlanState结构指针,计划节点的PlanState结构也会按照査询计划树的结构组织成计划节点执行状态树。对计划节点初始化的主要工作是根据计划节点中定义的相关信息,构造对应的PlanStale结构并对相关字段赋值。 + +![](figures/zh-cn_image_0000001232453871.png) + +计划节点的初始化由函数ExecInitNode完成,该函数以要初始化的计划节点为输入,并返回该计划节点所对应的PlanState结构指针。在ExecInitNode中,通过判断计划节点的类型来调用相应的处理过程,每一种计划节点都有专门的初始化函数,且都以“ExecInit节点类型”的形式命名。例如,NestLoop节点的初始化函数为ExecInitNestLoop。在计划节点的初始化过程中,如果该节点还有下层的子节点,则会递归地调用子节点的初始化函数来对子节点进行初始化。ExecInitNode函数会根据计划节点的类型\(T\_NestLoop\)调用该类型节点的初始化函数\(ExecInitNestLoop\)。由于NestLoop节点有两个子节点,因此ExecInitNestLoop会先调用ExecInitNode对其左子节点进行初始化,并将其返回的PlanState结构指针存放在为NestLoop构造的NestLoopState结构的lefttree字段中;然后以同样的方式初始化右子节点,将返回的PlanState结构指针存放于NestLoopState的righttree字段中。同样,如果左右子节点还有下层节点,初始化过程将以完全相同的方式递归下去,直到到达査询计划树的叶子节点。而在初始化过程中构造的树也会层层返回给上层节点,并被链接在上层节点的PlanState结构中,最终构造出完整的PlanState树。 + +执行阶段。 + +这个阶段是执行器最重要的部分。在这个阶段,执行器完成对于执行树的迭代\(Pipeline\)遍历,通过从磁盘读取数据,根据执行树的具体逻辑完成查询语义。入口函数为 ExecutorRun \(\)。 + +![](figures/zh-cn_image_0000001232775439.png) + +ExecutorRun \(\)首先调用standard\_ExecutorStart\(\)完成查询。然后ExecutorRun \(\)函数在之后进行SQL 自调优,即查询执行完毕时,基于运行时信息分析查询计划问题。 + +standard\_ExecutorRun\(\)函数主要做的工作就是运行计划,在执行过程中会调用ExecutePlan完成査询计划的执行;该函数的主体部分是一个大的循环,每一次循环都通过ExecProcNode函数从计划节点状态树中获取一个元组,然后对该元组进行相应的处理\(增删查改\),然后返回处理的结果。当ExecProcNode从计划节点状态树中再也取不到有效的元组时结束循环过程。 + +ExecProcNode的执行过程也和ExecInitNode类似:从计划节点状态树的根节点获取数据,上层节点为了能够完成自己的处理将会递归调用ExecProcNode从下层节点获取输入数据\(一般为元组\),然后根据输入数据进行上层节点对应的处理,最后进行选择条件的运算和投影运算,并向更上层的节点返回结果元组的指针。同ExecInitNode 一样,ExecProcNode 也是一个选择函数,它会根据要处理的节点的类型调用对应的处理函数。例如,对于NestLoop类型的节点,其处理函数为ExecNestLoop。ExecNestLoop函数同样会对NestLoop类型的两个子节点调用ExecProcNode以获取输入数据。如果其子节点还有下层节点,则以同样的方式递归调用ExecProcNode进行处理,直到到达叶子节点。每一个节点被ExecProcNode处理之后都会返回一个结果元组,这些结果元组作为上层节点的输入被处理形成上层节点的结果元组,最终根节点将返回结果元组。 + +每当通过ExecProcNode从计划节点状态树中获得一个结果元组后,ExecutePlan函数将根据整个语句的操作类型调用相应的函数进行最后的处理。对于不扫描表的简单查询\(例如select 1\),调用的是Result节点,通过ExecResult函数直接输出“査询”结果。对于需要扫描表的查询\(例如select xx from tablexx这种\),系统在扫描完节点后直接返回结果,而对于增删改查询,情况特殊,有一个专门的ModifyTable节点来处理它:主要调用了ExecInsert、ExecDelete、ExecUpdate这三个函数进行处理。对于插入语句,则首先需要调用ExecConstraints对即将插入的元组进行约束检査,如果满足要求,ExecInsert会调用函数heap\_insert将元组存储到存储系统。对于删除和更新,则分别由 ExecDelete 和 ExecUpdate 调用 heap\_delete 和 heap\_update 完成。 + +清理阶段: + +因为执行器在初始化阶段向系统申请了资源,所以在这个阶段要完成对资源的清理。入口函数为 ExecutorFinish \(\)、ExecutorEnd \(\)。 + +ExecutorFinish:此例程必须在最后一次ExecutorRun调用之后调用。它执行清理操作,比如触发AFTER触发器。它与ExecutorEnd是分开的,因为EXPLAIN ANALYZE需要在整个运行时中包含这些操作。 standard\_ExecutorFinish\(\)主要做的工作就是运行ModifyTable节点,执行队列后触发器,除非告诉不需要。 + +ExecutorEnd \(\):此例程必须在任何查询计划执行结束时调用,之后会调用standard\_ExecutorEnd\(\)函数,然后调用ExecEndPlan处理执行状态树根节点释放已分配的资源,最后释放执行器全局状态EState完成整个执行过程。清理过程的任务主要是回收初始化过程中分配的资源、投影和选择结构的内存、结果元组存储空间等,计划节点执行状态树清理完之后,ExecutorEnd还将调用FreeExecutorState清理执行器全局状态。 + +(4)函数调用关系:对Sort节点的整个查询执行周期里的节点的函数调用栈如下: + +![](figures/zh-cn_image_0000001232693947.png) + +## 实验总结 + +(一)实验中出现的问题以及解决方案\(对于未解决问题请将问题列出来\) + +1、连接MobaXterm\_Personal\_20.3失败。 + +原因:IP地址和用户与虚拟机系统不匹配、或是网络超时 + +方法:保证虚拟机网络稳定连接,ifconfig命令查找虚拟机ip地址 + +2、make失败,产生error。 + +原因:有很多因素,例如绝对路径与相对路径问题、相关文件权限不够等; + +方法:配置环境变量时使用绝对路径;赋予相关文件更高权限,必要时更改目录用户所属组。 + +3、启动数据库失败:gs\_ctl: command not found... + +原因:没有在omm用户下配置环境变量。每次启动,重新配置相关环境变量再启动则成功。 + +或者数据库已打开:another server might be running; Please use the restart command。直接连接便好;或者进入安装目录中删除pid文件,再次启动运行。 + +4、连接数据库失败、端口被占用 + +原因:有其他进程占用端口。 + +方法:使用netstat命令查看占用端口进程,然后杀掉该进程,重新连接数据库。 + +5、调试中没有进入到所设置的断点 + +原因:无法在前端对后端断点进行Debug + +方法:对后端Gaussdb进行Debug + +6、Debug时候有时会突然终止(实际不应该终止)。 + +原因:未知 + +方法:重启数据库 + +7、make时总是出现error + +原因:未知 + +方法:初始化脚本要限制进程数量(一般改为1024) + +8、将环境变量写入.bash\_profile文件使设置的环境变量永久生效,会导致centos虚拟机屏幕变黑、任务栏不可用。 + +原因:未知 + +方法:将文件删除,按部就班,每次启动数据库进行环境变量配置。 + +(二)对于实验的感受,建议,意见 + +1、感受 + +这次的实验很艰难,首先在代码的下载和编译中就遇到了很多问题,之后调试的过程中,由于对软件的不熟练,以及对opengauss内核的茫然,让我一度以为最终不能完成实验。但是在最后,通过努力,我认为还是交上了一份不错的答案。 + diff --git "a/content/zh/post/July/openGauss-gist-\347\264\242\345\274\225.md" "b/content/zh/post/July/openGauss-gist-\347\264\242\345\274\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..efed4ead9c2211b84e1500dd27a88d01dbd6d733 --- /dev/null +++ "b/content/zh/post/July/openGauss-gist-\347\264\242\345\274\225.md" @@ -0,0 +1,435 @@ ++++ + +title = "openGauss gist 索引" + +date = "2021-10-26" + +tags = [ "openGauss gist 索引"] + +archives = "2021-10" + +author = "吴松" + +summary = "openGauss gist 索引" + +img = "/zh/post/July/title/img2.png" + +times = "12:30" + ++++ + +# openGauss gist 索引 + + + +## 概述 + +自 B-tree 提出以来,衍生出很多不同类型的搜索树,GiST\(Generalized Search Tree\)广义搜索树是一种新型的索引结构,它可以在一种实现中提供很多不同树形结构的功能。GiST是一种可扩展的数据结构,允许用户针对不同的数据类型开发索引,支持对支持的数据类型的各种方式的查找。GiST可以统一许多流程的搜索树(如 R-tree、B±tree、hB-tree、TV-tree、CH-tree等等),而无需构建多个搜索树。准确地说 Gist 并不是一种具体的索引类型,而是 tree 结构的索引模板,PG 和 OpenGauss 中有基于 Gist 实现的 R-tree 索引。 + +除了统一这些搜索树外,GiST还具有以前的树所没有的特性:数据和查询可扩展性。 + +## 查询扩展性 + +以前的搜索树在处理数据方面是可扩展的。例如,PG 支持可扩展的 B±tree 和 R-tree,这意味着你可以在不同的数据类型上建立 B±tree 或 R-tree ,例如 int 类型,float 类型等。但是 B±tree只支持\(\>=,<=,\>,<,=\)这几个谓词,而 R-tree 只支持\(contains, contained, equals\)。因此,PG 中如果你想用 B±tree 索引支持如查找“所有带炫酷爆炸的电影” 这样的查询可能很难实现。 + +而 GiST 可以被编程以支持任何查询谓词。运行 GiST 所需要的只是实现 4 个由用户定义的接口,这些接口中定义了树中 key 的行为。当然,要支持一些看起来很花哨的查询,这些接口必须实现得非常漂亮,但对于标准的一些查询(如 B-tree、R-tree等),实现起来非常简单。 + +## GiST的关键 + +GiST 本身的结构是一种类似与 B-tree 的平衡树结构,包含 对。但 GisT 中的 key 和 B-tree 中的可能不一样,GiST 中的 key 可以是用户自定义的类型,用于表示通过关联的 pointer 可以访问到的一些属性。例如, B±tree 的 Gist 中 key 是数字范围(所有的指针指向的内容的范围都在4\~6之间);T-tree 的 GiST 中 key 是边界框(指针指向的内容都在California);RD-tree 的 key 是集合(指针指向的内容都是key中表示的集合的子集)… + +要让 Gist 正确工作,需要弄清楚 key 表示的是什么,然后编写 4 个接口来实现对树的插入、删除和搜索。 + +## 4种接口 + +以下是 GiST 工作需要实现的 4 个接口。 + +- Consistent: 让树可以正确执行搜索。给定树种 page 上的 key p,以及用户查询 q,Consistent 方法应该返回 NO,如果对于一个给定的数据项 p 和 q 都不为真,否则应该返回 MAYBE。 + + 举例: 下图 B-tree 内部节点中的 10,表示的含义是其最终指向的数据范围是 \[10, 70\) + + 即 + + p : \[10, 70\) + + 假设 q : select xx from table where key < 80 + + 那么对于 + + item\(key = 90,…\),Consistent 方法返回 NO + + item\(key = 60,…\),Consistent 方法返回 MAYBE + + +![](figures/Consistent.png) + +- Union: 用于合并树中的信息。给定一组条目 S,该方法返回一个新的键 p,它 对于 S 下的所有数据项都为真。实现 Union 的一个简单的方法是返回一个等价于 S 中 keys 的析取的谓词,即 如果 S \{P1,P2,…Pn\} 返回 P1 or P2 or … Pn +- Penalty: 如果在以 为根的子树中插入新的数据项,则返回一个数字,表示这样做的代价有多大。插入的项,将沿着代价最小的路径插入。 + +- PickSplit: 和 B-tree 一样,GiST中的页面有时也需要在插入新数据时进行分裂,PickSplit 决定哪些数据属于新页面,哪些数据留在老页面。 + +更多关于 Gist 的细节,可以参考原始论文:Generalized Search Trees for Database Systems + +## Gist 索引实现 + +- 构建 gist 索引 + + ``` + gistbuild + { + ... + // 构建 GISTBuildState 对象 + GISTBuildState buildstate; + ... + buildstate.giststate = initGISTstate(index); + ... + // 初始化 gist 根节点 + /* initialize the root page */ + buffer = gistNewBuffer(index); + Assert(BufferGetBlockNumber(buffer) == GIST_ROOT_BLKNO); + page = BufferGetPage(buffer); + + START_CRIT_SECTION(); + + GISTInitBuffer(buffer, F_LEAF); + + MarkBufferDirty(buffer); + + if (RelationNeedsWAL(index)) { + XLogRecPtr recptr; + + XLogBeginInsert(); + XLogRegisterBuffer(0, buffer, REGBUF_WILL_INIT); + + recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_CREATE_INDEX); + PageSetLSN(page, recptr); + } else + PageSetLSN(page, GetXLogRecPtrForTemp()); + + UnlockReleaseBuffer(buffer); + + END_CRIT_SECTION(); + ... + // 表扫描,构造 index tuple,将 index tuple 插入 gist 索引中 + reltuples = tableam_index_build_scan(heap, index, indexInfo, true, gistBuildCallback, (void *)&buildstate); + ... + } + ``` + + 重点关注 gist 索引中 index tuple 结构 和 索引构造实现 + + 这部分的主要实现在 gistBuildCallback 中,对于每个需要被索引的 heap tuple,都需要调用 gistBuildCallback 进行处理。 + + ``` + gistBuildCallback + { + // 组装 gist index tuple + itup = gistFormTuple(buildstate->giststate, index, values, isnull, true); + // 调用 gistdoinsert,将 index tuple 插入 gist 索引 + gistdoinsert(index, itup, buildstate->freespace, buildstate->giststate); + ... + } + + gistdoinsert + { + // 从树的 root 节点开始以最小 penalty 向下查找,用插入的 key 更新父节点向下的指针; + for( ; ; ) { + // 如果在中间 crash 了,树仍然是一致的,更新父节点有时候可能不是很必要 + // 对当前访问的节点加锁时,首先加 ShareLock,如果需要更新节点,先放锁 + // 再将节点的锁换成 ExclusiveLock + + // 如果有未完成的分裂,或者并发执行的分裂任务需要处理 + + // 如果不是叶子节点 + if (!GistPageIsLeaf(stack->page)) { + // 找到插入代价(penalty)最小的子节点 + downlinkoffnum = gistchoose(state.r, stack->page, itup, giststate); + // 和 B-tree 索引不同的是,B-tree 索引是先找到叶子节点,在叶子节点执行插入,再向上回溯更新父节点 + // 而 gist 索引是在向下查找的过程中先更新父节点,然后再向下查找子节点直到叶子节点; + // 因此,最后更新完子节点不需要再向上回溯更新,因为父节点全部已经再查找过程中更新完毕 + // 更新非叶子节点 + newtup = gistgetadjusted(state.r, idxtuple, itup, giststate); + if (newtup) { + ... + } + } + else { // 如果是叶子节点 + // 插入新 key + ... + (void)gistinserttuple(&state, stack, giststate, itup, InvalidOffsetNumber); + } + } + } + ``` + + ``` + // 查找对插入 index tuple 中包含的 key 代价最小的子节点,返回指向子节点的指针 item 在当前节点的 offsetnumber + gistchoose + { + // index tuple 中包含压缩的属性,先解压缩 + gistDeCompressAtt(giststate, r, it, NULL, (OffsetNumber)0, identry, isnull); + // 索引可能包含多个列,每个列都有一个代价值 + // 索引定义中先出现的列的代价比后出现的列的代价权重更大 + // best_penalty[j] 是目前我们看到的对列 j 的最小代价,或者是 -1 如果还没有检查到列 j ; 第一个 -1 右侧的代价值都是未定义的 + best_penalty[0] = -1; + // 当前 page 上所有 tuple + for(i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) { + // 获取一个 index tuple + IndexTuple itup = (IndexTuple)PageGetItem(p, PageGetItemId(p, i)); + // 对 index tuple 的每个属性 + for (j = 0; j < r->rd_att->natts; j++) { + // 计算每个列的代价 + datum = index_getattr(itup, j + 1, giststate->tupdesc, &IsNull); + gistdentryinit(giststate, j, &entry, datum, r, p, i, FALSE, IsNull); + usize = gistpenalty(giststate, j, &entry, IsNull, &identry[j], isnull[j]); + // 由于前面的列的权重更大,因此如果计算出来的前面的列的代价值比之前最小的代价值更大,则没有必要继续检查后面的列,可以直接检查下一个 tuple 了 + // 如果前面列比之前最小的更小,或者相等则继续检查后面的列 + if (best_penalty[j] < 0 || usize < best_penalty[j]) { + result = i; + best_penalty[j] = usize; + + if (j < r->rd_att->natts - 1) + best_penalty[j + 1] = -1; + ... + } + // 如果当前 tuple 所有列的代价都是 0 ,则没有必要检查后面的 tuple 了,跳出循环 + } + + // 返回指向代价值最小的 index tuple line-pointer 在 page 中的 offsetnumber + return result; + } + ``` + + // 每一个 gist 索引支持的类型,都实现了索引相关的 proc + + ``` + gistpenalty + { + float penalty = 0.0; + // 找到对应的 penalty 函数,计算 penelty 值 + if (giststate->penaltyFn[attno].fn_strict == FALSE || (isNullOrig == FALSE && isNullAdd == FALSE)) { + FunctionCall3Coll(&giststate->penaltyFn[attno], giststate->supportCollation[attno], PointerGetDatum(orig), + PointerGetDatum(add), PointerGetDatum(&penalty)); + /* disallow negative or NaN penalty */ + if (isnan(penalty) || penalty < 0.0) { + penalty = 0.0; + } + } else if (isNullOrig && isNullAdd) { + penalty = 0.0; + } else { + /* try to prevent mixing null and non-null values */ + penalty = get_float4_infinity(); + } + + return penalty; + } + ``` + + 例如:R-tree 中新插入一条数据的逻辑如下 + + 插入节点时,算法从树的根节点开始递归地向下遍历。检查当前节点的所有外接矩形,并启发式地选择在哪个子节点中插入(例如选择插入后外接矩形扩张最小的那个子节点),然后进入选择的那个子节点继续检查,直到到达叶子节点。满的叶子节点应该在插入之前分裂,所以插入时到达的叶子节点一定有空位来写数据。 + + 查找对应的处理函数: + + pg\_opclass 定义索引的 operator class。 每个 operator class 为一种特定的 数据类型 和 特定索引 访问方法定义字段的语义。 一个操作符类本质上指定一个特定的 operator class 适用于一个特定的可索引的字段数据类型。 + + ![](figures/查找对应的处理函数.png) + + 通过查询 pg\_opclass 可以查到 gist 索引支持的操作符类,以及索引的类型、索引数据的类型等 + + ![](figures/通过查询-pg_opclass-可以查到.png) + + 可以看到 gist 支持的操作符类 包括 + + box\_ops、point\_ops、poly\_ops、circle\_ops、tsvector\_ops、tsquery\_ops 及 range\_ops + + 以 box\_ops 为例,查询到关联的 am\_proc 有 + + gist\_box\_consistent、gist\_box\_union、gist\_box\_compress、gist\_box\_decompress、gist\_box\_penalty、gist\_box\_picksplit、gist\_box\_same + + 其中包含了 consistent、union、penalty 和 picksplit 这四个要实现的接口 + + 查询 pg\_amproc 获取索引关联的 opfamily 的支持的 proc + + ``` + postgres=# select * from pg_amproc where amprocfamily = 2593; + amprocfamily | amproclefttype | amprocrighttype | amprocnum | amproc + --------------+----------------+-----------------+-----------+--------------------- + 2593 | 603 | 603 | 1 | gist_box_consistent + 2593 | 603 | 603 | 2 | gist_box_union + 2593 | 603 | 603 | 3 | gist_box_compress + 2593 | 603 | 603 | 4 | gist_box_decompress + 2593 | 603 | 603 | 5 | gist_box_penalty + 2593 | 603 | 603 | 6 | gist_box_picksplit + 2593 | 603 | 603 | 7 | gist_box_same + (7 rows) + ``` + + 查找到 box 类型的 penalty 函数为 gist\_box\_penalty + + 查看实现 + + ``` + Datum gist_box_penalty(PG_FUNCTION_ARGS) + { + GISTENTRY *origentry = (GISTENTRY *)PG_GETARG_POINTER(0); + GISTENTRY *newentry = (GISTENTRY *)PG_GETARG_POINTER(1); + float *result = (float *)PG_GETARG_POINTER(2); + BOX *origbox = DatumGetBoxP(origentry->key); + BOX *newbox = DatumGetBoxP(newentry->key); + BOX unionbox; + + rt_box_union(&unionbox, origbox, newbox); + *result = (float)(size_box(&unionbox) - size_box(origbox)); + PG_RETURN_POINTER(result); + } + ``` + + 可以看出计算出来的代价值(penalty)是两个 box 做 union 操作后的矩阵面积减去原始矩阵的面积。 + + 如下图所示,根据代价函数计算规则,假设原来有 A 和 C 两个 box,在两个不同的 page page1 和 page2 中, 则根据代价计算要插入的 box B 插入 page2 的代价更小,所以 box 应该插入 page2 中。 + + ![](figures/根据代价函数计算规则.png) + + 以上是 Gist 索引中插入流程的实现,可以看到 penalty 接口在插入流程用于选择插入的 page 。 + + 对于 Gist 索引而言,其叶子节点每一条数据包含一个谓词(bool 类型的表达式)和一个指向基表的 TID,索引的 key 必须满足这个谓词。非叶子节点也包含一个谓词和指向子节点的指针,非叶子节点的所有子节点都必须满足这个谓词。 + + 以在二维空间插入一个 Point 为例,父节点的谓词可能是“所有子节点包含的矩形和点都在某个大的矩形内”,如果新插入的点完全落在父节点所表示的矩形内,则父节点不需要更新;相反,如果不在父节点所在的矩形内,则需要更新父节点所表示的矩形空间;更新完成后,继续向下完成插入动作。如下图所示,左边是插入需要更新父节点的矩形范围的情况(节点 B 表示父节点);右边是插入不需要更新父节点矩形范围的情况。如果父节点范围更新完后,最终插入失败,不影响 Gist 索引的使用。 + + ![](figures/最终插入失败.png) + + +- Gist 索引查找 + + 搜索 gist 需要用到 consistent 接口,在此之前需要了解 gist 索引中支持的数据类型都支持哪些操作符(operator)。 + + ``` + postgres=# select * from pg_amop where amopfamily = 2593; + amopfamily | amoplefttype | amoprighttype | amopstrategy | amoppurpose | amopopr | amopmethod | amopsortfa + mily + ------------+--------------+---------------+--------------+-------------+---------+------------+----------- + ----- + 2593 | 603 | 603 | 1 | s | 493 | 783 | + 0 + 2593 | 603 | 603 | 2 | s | 494 | 783 | + 0 + 2593 | 603 | 603 | 3 | s | 500 | 783 | + 0 + 2593 | 603 | 603 | 4 | s | 495 | 783 | + 0 + 2593 | 603 | 603 | 5 | s | 496 | 783 | + 0 + 2593 | 603 | 603 | 6 | s | 499 | 783 | + 0 + 2593 | 603 | 603 | 7 | s | 498 | 783 | + 0 + 2593 | 603 | 603 | 8 | s | 497 | 783 | + 0 + 2593 | 603 | 603 | 9 | s | 2571 | 783 | + 0 + 2593 | 603 | 603 | 10 | s | 2570 | 783 | + 0 + 2593 | 603 | 603 | 11 | s | 2573 | 783 | + 0 + 2593 | 603 | 603 | 12 | s | 2572 | 783 | + 0 + 2593 | 603 | 603 | 13 | s | 2863 | 783 | + 0 + 2593 | 603 | 603 | 14 | s | 2862 | 783 | + 0 + (14 rows) + ``` + + 例如 box\_ops 共对应 14 个 strategy,查询 pg\_operator 获得这 14 个策略对应的 operator 分别为 + + ``` + postgres=# select oid,oprname,oprleft,oprright,oprresult from pg_operator where oid >= 493 and oid <= 500 or oid >= 2570 and oid <= 2573 or oid = 2862 or oid = 2863; + oid | oprname | oprleft | oprright | oprresult + ------+---------+---------+----------+----------- + 493 | << | 603 | 603 | 16 + 494 | &< | 603 | 603 | 16 + 495 | &> | 603 | 603 | 16 + 496 | >> | 603 | 603 | 16 + 497 | <@ | 603 | 603 | 16 + 498 | @> | 603 | 603 | 16 + 499 | ~= | 603 | 603 | 16 + 500 | && | 603 | 603 | 16 + 2570 | <<| | 603 | 603 | 16 + 2571 | &<| | 603 | 603 | 16 + 2572 | |&> | 603 | 603 | 16 + 2573 | |>> | 603 | 603 | 16 + 2862 | @ | 603 | 603 | 16 + 2863 | ~ | 603 | 603 | 16 + (14 rows) + ``` + + 对于 index entry 下的所有数据项 x,谓词 \(x op query\) 必须是 FALSE,则应该返回 false, 其中 op 是与 pg\_amop 表中策略对应的 operator。参考上面 consistent 的定义。 + + ``` + gist_box_consistent + { + ... + // 获取 strategy number + StrategyNumber strategy = (StrategyNumber)PG_GETARG_UINT16(2); + ... + if (GIST_LEAF(entry)) { + // 叶子节点,调用 gist_box_leaf_consistent + PG_RETURN_BOOL(gist_box_leaf_consistent(DatumGetBoxP(entry->key), query, strategy)); + } else { + // 非叶子节点,调用 rtree_internal_consistent + PG_RETURN_BOOL(rtree_internal_consistent(DatumGetBoxP(entry->key), query, strategy)); + } + } + ``` + + 看一个具体的例子,对于 box 类型,查询条件为 x << y \(语义是 x 在 y 的左侧\),其中 x 是表中的索引列, y 是一个具体的 box,例如 (Point\(5,5\), Point\(7,6\)) + + 查看实现代码: + + ``` + // 对于 internal 节点,返回的是 !box_overright,即 x 的左边界不在 y 的左边界的右侧 + // 或者 与 y 的左边界相等,即 x 的 左边界 严格小于 y 的左边界 + // 对于非叶子节点不能进行准确判断,排除一定不符合条件的情况,其他情况都可能出现满足条件的结果 + static bool rtree_internal_consistent(BOX *key, BOX *query, StrategyNumber strategy) + { + bool retval = false; + + switch (strategy) { + case RTLeftStrategyNumber: + retval = !DatumGetBool(DirectFunctionCall2(box_overright, PointerGetDatum(key), PointerGetDatum(query))); + break; + ... + + return retval; + } + + // 对于 叶子节点,由于叶子节点精确地指向 box,可以进行精确判断 + static bool gist_box_leaf_consistent(BOX *key, BOX *query, StrategyNumber strategy) + { + bool retval = false; + + switch (strategy) { + case RTLeftStrategyNumber: + retval = DatumGetBool(DirectFunctionCall2(box_left, PointerGetDatum(key), PointerGetDatum(query))); + break; + ... + return retval; + } + ``` + + 如下图所示,查找出现在 box D 左侧的 box,可以看出其中 box A 和 B 符合条件; + + 蓝色区域为 A B C 的父节点,父节点判断时只需要排除一定不符合条件的情况(父节点的左边界在 D 的右侧,右侧灰色矩形);而到叶子节点判断时,需要用 box 的右边界去和 D 的左边界比较。 + + ![](figures/而到叶子节点判断时.png) + + 整体的查询流程和其他 Tree 结构的索引比较类似,这里不细述了。 + + +## 总结 + +本文主要介绍了 Gist 索引的原理和其中 插入 、查询的实现,其他相关内容在下一篇介绍。 + +参考文档:https://postgrespro.com/blog/pgsql/4175817 + diff --git "a/content/zh/post/July/openGauss-\345\210\227\345\255\230\350\241\250PSort\347\264\242\345\274\225.md" "b/content/zh/post/July/openGauss-\345\210\227\345\255\230\350\241\250PSort\347\264\242\345\274\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..d334089beff4ecac2d2c491ba98614cb6643e231 --- /dev/null +++ "b/content/zh/post/July/openGauss-\345\210\227\345\255\230\350\241\250PSort\347\264\242\345\274\225.md" @@ -0,0 +1,181 @@ ++++ + +title = "openGauss 列存表PSort索引" + +date = "2021-09-24" + +tags = [ "openGauss 列存表PSort索引"] + +archives = "2021-09" + +author = "吴松" + +summary = "openGauss 列存表PSort索引" + +img = "/zh/post/July/title/img10.png" + +times = "12:30" + ++++ + +# openGauss 列存表PSort索引 + +## 概述 + +PSort\(Partial sort\) Index是在列存表的列上建的聚簇索引。CUDesc 上有每个 CU 的 min 和 max 值,但如果业务的数据模型较为离散,查询时通过 min 和 max 值去过滤 CU 会出现大量的 CU 误读取,例如每个 CU 的 min 和 max跨度都比较大时,其查询效率接近全表扫描。例如下图中的场景,查询2基本命中所有的 CU, 此时查找近似全表扫描。 + +![](figures/11.png) + +PSort索引可以对部分区间(一般会包含多个CU覆盖的行)内的数据按照索引键进行排序,使得 CU 之间的交集尽量减少,提升查询的效率。 + +## PSort 索引使用 + +在批量插入列存表的过程中,如果发现有 PSort 索引,会先对这批数据进行排序。PSort索引表的组织形式也是 cstore 表(CUDesc 是 astore 表),表的字段包含了索引键的各个字段,加上对应的行号\(TID\)字段。插入数据的过程中如果发现有PSort索引,会将一定数量的数据按照PSort索引的索引键进行排序,与 TID 字段共同拼装成向量数组,再插入到 PSort 索引的 cstore 表中。 所以 PSort 索引数据中列数比实际的索引键要多一列,多出的这一列用于存储这条记录在数据 cstore 存储中的位置。 + +``` +// 构建 PSort 索引过程中构造索引数据 +inline void ProjectToIndexVector(VectorBatch *scanBatch, VectorBatch *outBatch, IndexInfo *indexInfo) +{ + Assert(scanBatch && outBatch && indexInfo); + int numAttrs = indexInfo->ii_NumIndexAttrs; + AttrNumber *attrNumbers = indexInfo->ii_KeyAttrNumbers; + Assert(outBatch->m_cols == (numAttrs + 1)); + + // index column + for (int i = 0; i < numAttrs; i++) { + AttrNumber attno = attrNumbers[i]; + Assert(attno > 0 && attno <= scanBatch->m_cols); + + // shallow copy + outBatch->m_arr[i].copy(&scanBatch->m_arr[attno - 1]); + } + + // ctid column + // 最后一列是 tid + outBatch->m_arr[numAttrs].copy(scanBatch->GetSysVector(-1)); + + outBatch->m_rows = scanBatch->m_rows; +} +``` + +cstore 表执行插入流程,如果有 Psort 索引,会先将数据插入排序队列 + +``` +void CStoreInsert::BatchInsert(_in_ VectorBatch* pBatch, _in_ int options) +{ + Assert(pBatch || IsEnd()); + + /* keep memory space from leaking during bulk-insert */ + MemoryContext oldCnxt = MemoryContextSwitchTo(m_tmpMemCnxt); + + // Step 1: relation has partial cluster key + // We need put data into sorter contatiner, and then do + // batchinsert data + if (NeedPartialSort()) { + Assert(m_tmpBatchRows); + + if (pBatch) { + Assert(pBatch->m_cols == m_relation->rd_att->natts); + m_sorter->PutVecBatch(m_relation, pBatch); // 插入局部排序队列 + } + + if (m_sorter->IsFull() || IsEnd()) { // 排序队列满了或者插入数据输入结束 + m_sorter->RunSort(); // 按照索引键排序 + + /* reset and fetch next batch of values */ + DoBatchInsert(options); + m_sorter->Reset(IsEnd()); + + /* reset and free all memory blocks */ + m_tmpBatchRows->reset(false); + } + } + + // Step 2: relation doesn't have partial cluster key + // We need cache data until batchrows is full + else { + Assert(m_bufferedBatchRows); + + // If batch row is full, we can do batchinsert now + if (IsEnd()) { + if (ENABLE_DELTA(m_bufferedBatchRows)) { + InsertDeltaTable(m_bufferedBatchRows, options); + } else { + BatchInsertCommon(m_bufferedBatchRows, options); + } + m_bufferedBatchRows->reset(true); + } + + // we need cache data until batchrows is full + if (pBatch) { + Assert(pBatch->m_rows <= BatchMaxSize); + Assert(pBatch->m_cols && m_relation->rd_att->natts); + Assert(m_bufferedBatchRows->m_rows_maxnum > 0); + Assert(m_bufferedBatchRows->m_rows_maxnum % BatchMaxSize == 0); + + int startIdx = 0; + while (m_bufferedBatchRows->append_one_vector( + RelationGetDescr(m_relation), pBatch, &startIdx, m_cstorInsertMem)) { + BatchInsertCommon(m_bufferedBatchRows, options); + m_bufferedBatchRows->reset(true); + } + Assert(startIdx == pBatch->m_rows); + } + } + + // Step 3: We must update index data for this batch data + // if end of batchInsert + FlushIndexDataIfNeed(); + + MemoryContextReset(m_tmpMemCnxt); + (void)MemoryContextSwitchTo(oldCnxt); +} +``` + +![](figures/22.png) + +图 cstore表插入流程示意图 + +插入流程中更新索引数据的代码 + +``` +void CStoreInsert::InsertIdxTableIfNeed(bulkload_rows* batchRowPtr, uint32 cuId) +{ + Assert(batchRowPtr); + + if (relation_has_indexes(m_resultRelInfo)) { + /* form all tids */ + bulkload_indexbatch_set_tids(m_idxBatchRow, cuId, batchRowPtr->m_rows_curnum); + + for (int indice = 0; indice < m_resultRelInfo->ri_NumIndices; ++indice) { + /* form index-keys data for index relation */ + for (int key = 0; key < m_idxKeyNum[indice]; ++key) { + bulkload_indexbatch_copy(m_idxBatchRow, key, batchRowPtr, m_idxKeyAttr[indice][key]); + } + + /* form tid-keys data for index relation */ + bulkload_indexbatch_copy_tids(m_idxBatchRow, m_idxKeyNum[indice]); + + /* update the actual number of used attributes */ + m_idxBatchRow->m_attr_num = m_idxKeyNum[indice] + 1; + + if (m_idxInsert[indice] != NULL) { + /* 插入PSort 索引 */ + m_idxInsert[indice]->BatchInsert(m_idxBatchRow, 0); + } else { + /* 插入 cbtree/cgin 索引 */ + CStoreInsert::InsertNotPsortIdx(indice); + } + } + } +} +``` + +索引插入流程和普通 cstore 数据插入相同。 + +使用 PSort 索引查询时,由于 PSort 索引 CU 内部已经有序,因此可以使用二分查找快速找到对应数据在 psort 索引中的行号,这一行数据的 tid 字段就是这条数据在数据 cstore 中的行号。 + +![](figures/33.png) + +图-2 PSort 索引查询示意图 + diff --git "a/content/zh/post/July/openGauss-\345\271\266\345\217\221\351\207\215\345\273\272\347\264\242\345\274\225\344\273\243\347\240\201\345\256\236\347\216\260.md" "b/content/zh/post/July/openGauss-\345\271\266\345\217\221\351\207\215\345\273\272\347\264\242\345\274\225\344\273\243\347\240\201\345\256\236\347\216\260.md" new file mode 100644 index 0000000000000000000000000000000000000000..02a52a9c1affe29c504ee585cac7d75f48068ea7 --- /dev/null +++ "b/content/zh/post/July/openGauss-\345\271\266\345\217\221\351\207\215\345\273\272\347\264\242\345\274\225\344\273\243\347\240\201\345\256\236\347\216\260.md" @@ -0,0 +1,134 @@ ++++ + +title = "openGauss 并发重建索引代码实现" + +date = "2021-09-22" + +tags = [ "openGauss 并发重建索引代码实现"] + +archives = "2021-09" + +author = "李宏达" + +summary = "openGauss 并发重建索引代码实现" + +img = "/zh/post/July/title/img9.png" + +times = "12:30" + ++++ + +# openGauss 并发重建索引代码实现 + +本文主要讲解并发创建索引过程中,索引数据追加部分的原理和代码实现。 + +先看一下代码中关于这部分功能实现的注释。 + +``` +/* + +validate_index - support code for concurrent index builds We do a concurrent index build by first inserting the catalog entry for the index via index_create(), marking it not indisready and not indisvalid. +Then we commit our transaction and start a new one, then we wait for all transactions that could have been modifying the table to terminate. Now we know that any subsequently-started transactions will see the index and honor its constraints on HOT updates; so while existing HOT-chains might be broken with respect to the index, no currently live tuple will have an incompatible HOT update done to it. We now build the index normally via index_build(), while holding a weak lock that allows concurrent insert/update/delete. Also, we index only tuples that are valid as of the start of the scan (see IndexBuildHeapScan), whereas a normal build takes care to include recently-dead tuples. This is OK because we won’t mark the index valid until all transactions that might be able to see those tuples are gone. The reason for doing that is to avoid bogus unique-index failures due to concurrent UPDATEs (we might see different versions of the same row as being valid when we pass over them, if we used HeapTupleSatisfiesVacuum). This leaves us with an index that does not contain any tuples added to the table while we built the index. +Next, we mark the index “indisready” (but still not “indisvalid”) and commit the second transaction and start a third. Again we wait for all transactions that could have been modifying the table to terminate. Now we know that any subsequently-started transactions will see the index and insert their new tuples into it. We then take a new reference snapshot which is passed to validate_index(). Any tuples that are valid according to this snap, but are not in the index, must be added to the index. +(Any tuples committed live after the snap will be inserted into the index by their originating transaction. Any tuples committed dead before the snap need not be indexed, because we will wait out all transactions that might care about them before we mark the index valid.) +validate_index() works by first gathering all the TIDs currently in the index, using a bulkdelete callback that just stores the TIDs and doesn’t ever say “delete it”. (This should be faster than a plain indexscan; also, not all index AMs support full-index indexscan.) Then we sort the TIDs, and finally scan the table doing a “merge join” against the TID list to see which tuples are missing from the index. Thus we will ensure that all tuples valid according to the reference snapshot are in the index. +Building a unique index this way is tricky: we might try to insert a tuple that is already dead or is in process of being deleted, and we mustn’t have a uniqueness failure against an updated version of the same row. We could try to check the tuple to see if it’s already dead and tell index_insert() not to do the uniqueness check, but that still leaves us with a race condition against an in-progress update. To handle that,we expect the index AM to recheck liveness of the to-be-inserted tuple +before it declares a uniqueness error. +After completing validate_index(), we wait until all transactions that were alive at the time of the reference snapshot are gone; this is necessary to be sure there are none left with a transaction snapshot older than the reference (and hence possibly able to see tuples we did not index). Then we mark the index “indisvalid” and commit. Subsequent transactions will be able to use it for queries. +Doing two full table scans is a brute-force strategy. We could try to be cleverer, eg storing new tuples in a special area of the table (perhaps making the table append-only by setting use_fsm). However that would add yet more locking issues. +*/ +``` + +以上是代码中的官方注释,可以看出整个并发建索引过程中需要两次 table scan: + +第一次获取 snapshot1,然后 scan table 中 snapshot1 可见的 heap tuple,据此构建索引,然后将索引标记为可写。这部分代码相对比较容易理解,主要是 scan table 基于 snapshot 判断 heap tuple 的可见性,然后基于 scan 出的 heap tuple,根据索引类型创建索引。代码实现主要在 index\_build 中。 + +以 B-tree 索引为例,核心代码如下: + +``` +bt_build +{ + // table scan + // 表扫描,基于 snapshot 判断 heap tuple 可见性 + if (RelationIsGlobalIndex(index)) { + allPartTuples = GlobalIndexBuildHeapScan(heap, index, indexInfo, btbuildCallback, (void*)&buildstate); + } else { + reltuples = tableam_index_build_scan(heap, index, indexInfo, true, btbuildCallback, (void*)&buildstate); + } + // 按照索引 key 对 tuple 进行排序 + // 基于排完序的 tuple 构建 btree + _bt_leafbuild(buildstate.spool, buildstate.spool2); + ... +} +``` + +第二次获取 snapshot2,在索引数据中追加 snapshot1 及 snpashot2 之间插入且不在索引中的数据。做法是首先获取当前索引中索引到的所有tids (用的 bulkdelete callback 而不是 index scan,因为前者速度更快,且不是所有的索引都支持 full-index indexscan),然后 scan table 中 snapshot2 可见的所有 heap tuple,获得 tids’,最后 tids’ 和 tids 的差集就是需要在索引中追加的 heap tuple 的 tids。 + +唯一索引处理起来要更麻烦一些,在一条数据的多个版本时,不应该误报违反唯一原则,这可能需要在发现违反唯一原则的时候重新做一次检查。 + +这部分代码的实现是 validate\_index,这里列出其中的关键代码 + +``` +validate_index +{ + ... + // scan index and gather all the tids into a tuplesort object + // 这段代码收集索引中的 tids 走的是 vacuum 流程中扫描索引的流程,是按照 physical order 扫描 index pages, + // 但在 callback 中只是收集 tids 并不会真正删除任何内容 + state.tuplesort = tuplesort_begin_datum( + TIDOID, TIDLessOperator, InvalidOid, false, u_sess->attr.attr_memory.maintenance_work_mem, false); + state.htups = state.itups = state.tups_inserted = 0; + (void)index_bulk_delete(&ivinfo, NULL, validate_index_callback, (void*)&state); + /* Execute the sort */ + // 按照 tid 大小排序 + tuplesort_performsort(state.tuplesort); + /* + * Now scan the heap and "merge" it with the index + */ + // 第二次 table scan ,每个 scan 出的 tuple, 如果是在 hot-chain 上则是 + // hot-chain 的 root tuple ,在 索引 scan 出的 tuple 中(已经按照 tid 排序)查找,找不到则说明不在索引中,应该追加到索引中。 + // 调用 index_insert 将这个 heap tuple 的索引数据插入索引 + tableam_index_validate_scan(heapRelation, indexRelation, indexInfo, snapshot, &state); + ... +} + +validate_index_heapscan 的主要代码逻辑如下: + +validate_index_heapscan +{ + ... + // 遍历 heap tuple + while ((heapTuple = heap_getnext(scan, ForwardScanDirection)) != NULL) + { + ... + // 如果在 hot-chain,用 hot-chain 的 root tuple 的 tid 在索引中查找 + if (HeapTupleIsHeapOnly(heapTuple)) { + root_offnum = root_offsets[root_offnum - 1]; + Assert(OffsetNumberIsValid(root_offnum)); + ItemPointerSetOffsetNumber(&rootTuple, root_offnum); + } + ... + // 在 索引的 tids 中查找,由于索引的 tids 是有序的, + // 当 heap tuple 的 tid 小于索引的 tid 继续查找,否则 + // 1. 在索引中找到(tid相等),不需要再插入索引 + // 2. 不在索引中,需要插入 + while (!tuplesort_empty && (!indexcursor || ItemPointerCompare(indexcursor, &rootTuple) < 0)) { + ... + } + // 没有找到对应的 tid,需要插入索引 + if ((tuplesort_empty || ItemPointerCompare(indexcursor, &rootTuple) > 0) && !in_index[root_offnum - 1]) { + ... + // 追加索引 + (void)index_insert(indexRelation, + values, + isnull, + &rootTuple, + heapRelation, + indexInfo->ii_Unique ? UNIQUE_CHECK_YES : UNIQUE_CHECK_NO); + } + } +} +``` + +本文主要内容是结合代码详解 并发创建索引 过程中第二次 table scan 追加索引部分的实现,希望能对理解这部分的代码有所帮助。 + diff --git "a/content/zh/post/July/openGauss-\351\224\201\346\234\272\345\210\266\345\256\236\347\216\260\346\265\205\346\236\220.md" "b/content/zh/post/July/openGauss-\351\224\201\346\234\272\345\210\266\345\256\236\347\216\260\346\265\205\346\236\220.md" new file mode 100644 index 0000000000000000000000000000000000000000..ae3a607fd8461bb0410efe50c12fc2d446a6efe8 --- /dev/null +++ "b/content/zh/post/July/openGauss-\351\224\201\346\234\272\345\210\266\345\256\236\347\216\260\346\265\205\346\236\220.md" @@ -0,0 +1,100 @@ ++++ + +title = "openGauss 锁机制实现浅析" + +date = "2021-07-21" + +tags = [ "openGauss 锁机制实现浅析"] + +archives = "2021-07" + +author = "Walrus" + +summary = "openGauss 锁机制实现浅析" + +img = "/zh/post/July/title/img4.png" + +times = "12:30" + ++++ + +# openGauss 锁机制实现浅析 + +数据库锁是一块比较大的内容,为了便于理解,我们准备先讲一些浅显易懂的内容,然后准备一个最简单SQL在执行过程中的加锁情况来进行演示,希望大家对opengauss/PG数据库内部锁机制有简单地认识。 + +在没有看PostgreSQL/opengauss的源码之前,如果你是一个程序员,你会设计该怎么实现这个锁机制。100个程序员写实现同样功能的程序,可能会有100个版本。就像全世界最流行的几个关系型数据库,关于锁部分的内部实现机制应该都不一样。因此,唯有剖析其内部源码,才能知道其内涵。 + +因为并发访问所带来的一致性问题,所以引用锁机制来解决。最容易被我们感知的锁对象有行/表等,我们来看一下在PostgreSQL/opengauss里面哪些锁对象(LockTag). + +锁对象的种类有如下图这些,例如表,行,page, 事务号锁等等。 + +![](figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_35b53f3a-e9d8-11eb-a08b-00163e068ecd.png) + +每一种锁对象有不同的属性,但是它们却用同一个结构体来表示, 通过不同的属性来表示不同的锁对象。 锁对象的结构体如下: + +![](figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_35d0ba44-e9d8-11eb-a08b-00163e068ecd.png) + +在LOCKTAG结构体中,表示锁对象的类型的字段为locktag\_type,其他属性用来具体化这个对象,例如表的锁对象的宏定义如下: + +![](figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_35eab66a-e9d8-11eb-a08b-00163e068ecd.png) + +从上面的定义可以看出,对于表的锁对象,实际上只使用locktag结构体中locktag\_field1 ,locktag\_field2,以及locktag\_type这三个属性变量,其他属性变量被其他类型的锁对象使用。 其中locktag\_field1表示库的oid, locktag\_field2表示表的oid. 通过这两个属性组合,可以定位到唯一的表。 构建一个行锁,则需要更多信息来定位唯一的行,信息如下: + +![](figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_36030a76-e9d8-11eb-a08b-00163e068ecd.png) + +还有更多的其他锁对象的定义,在这里不再扩展。 + +锁对象(locktag),是被锁保护的对象,现在我们来看看锁的结构,其中必含有锁对象属性的变量,这样才可以关联到锁保护的对象。锁,我们可以简单类比为某个锁对象的管理员, 他需要记录当前有哪些类型的操作已作用在这个对象上,以及哪些类型的操作还在等待中,等待被允许。下图是锁的结构体: + +![](figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_361a3890-e9d8-11eb-a08b-00163e068ecd.png) + +刚才提到了哪些类型的操作,也就是操作的类型,通常理解为锁的类型。总共有如下8种类型,分别用数字1-8表示。 + +![](figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_3632182a-e9d8-11eb-a08b-00163e068ecd.png) + +我们在同时操作同一对象的时候(例如操作同一个表),操作跟操作之间是否有冲突,则需要看锁类型之间是否有冲突,这个是由冲突数组决定的。冲突数组的定义跟赋值如下: + +![](figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_367bf634-e9d8-11eb-a08b-00163e068ecd.png) + +前面讲到,锁的类型用1-8数字表示(0表示无锁),冲突数组中的元组序号跟锁类型的编号一一对应:因为0代表无锁,所以冲突数组中的第0个元组的值为0,表示其不跟任何锁冲突;1代表的是AccessShareLock锁,冲突数组中的第1个元组的值为 \(1 << AccessExclusiveLock\),AccessExclusiveLock对应数值为8, 则表示AccessShareLock 只跟 AccessExclusiveLock冲突。 其他类型的锁的冲突关系请见冲突数组各元组对应的值,这里不再一一说明。 + +上面是关于opengauss/pg 里面关于锁的内容的最简单的说明,下面我们举一个最简单的SQL的例子,来说明一下,这个SQL在操作过程中,会对哪些对象加锁,以及加什么类型的锁。 + +示列表中包含一个主键,一个普通索引。表,主键,普通索引的oid等信息如下图所示: + +![](figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_3695b43e-e9d8-11eb-a08b-00163e068ecd.png) + +测试SQL为: + +``` +update xcytest set name ='xcytesttest' where name='asdfa23' +``` + +我们通过设置断点在LockAcquire函数上,来跟踪加锁过程。另外,由于在执行过程中,会对一些系统表进行查询,以获取表的结构等信息,因此会对很多系统表加类型为1的锁(也就是AccessShareLock类型),为了简单起见,这些加锁信息不再提及。 + +加锁的先后顺序如下: + +![](figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_36b659aa-e9d8-11eb-a08b-00163e068ecd.png) + +加锁对象的oid先后为16385-\>16388-\>16427, 对应的对象为表xcytest,主键xcytest\_pkey,普通索引xcy\_test\_name\(对应关系请见前面展示的表结构图),锁的类型为3. 即RowExclusiveLock, 这个名字容易混淆,名字上虽然带着row关键字,但实际上是针对表的加锁类型,跟该类型冲突的类型为: + +\(ShareLock\) | \(ShareRowExclusiveLock\) | \(ExclusiveLock\) | \(AccessExclusiveLock\)。需要提一点的是,锁跟锁之间是否冲突,是建立在同一对象上的,表锁跟行锁之间,不存在是否冲突的关系,因为它们不是同一对象。 + +接下来,还会再次对表,主键,普通索引加锁,加锁顺序跟类型,跟第一次一模一样。 为什么会再次加锁?这是因为SQL执行是一个步骤较多的过程,执行期间需要多次(至少两次)访问表,所以会多次加锁。 + +![](figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_370122b4-e9d8-11eb-a08b-00163e068ecd.png) + +再接下来,捕获到下面的加锁信息 + +加锁的lockmode等于7, 也就是ExclusiveLock, 但locktag\_type为6, 参考前面提到的锁对象种类的信息,6对应为LOCKTAG\_TRANSACTION. 并非是行锁,查看调用栈,了解到是在执行assigntransactionid函数的时候加的锁,从字面意义来看,是分配事务id的时候加的锁。 + +再然后,SQL执行完成了,但并没有对被修改的元组(行)进行加锁。 为什么没有对被修改的行加锁? + +通过进一个挖代码, 发现如下逻辑: + +![](figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210721_374c758e-e9d8-11eb-a08b-00163e068ecd.png) + +只有发现行正在被修改的时候,才会调用lockTuple函数,对行进行加锁,否则不会对被修改的行加锁。为了验证这个结论,我们启动两个会话,对同一行进行修改,先修改的会话先不提交事务,然后第二个会话再次修改该行,就会看到第二个会话的线程会执行lockTuple函数。 + +以上是最简单的update SQL的加锁过程,后续将继续演示其他SQL的加锁过程。 + diff --git "a/content/zh/post/July/openGauss2-0-1\347\232\204\345\256\211\350\243\205\346\255\245\351\252\244.md" "b/content/zh/post/July/openGauss2-0-1\347\232\204\345\256\211\350\243\205\346\255\245\351\252\244.md" new file mode 100644 index 0000000000000000000000000000000000000000..fec94ab9322a6e3d03927fb70c12a1195818ed74 --- /dev/null +++ "b/content/zh/post/July/openGauss2-0-1\347\232\204\345\256\211\350\243\205\346\255\245\351\252\244.md" @@ -0,0 +1,315 @@ ++++ + +title = "openGauss2.0.1的安装步骤" + +date = "2021-10-21" + +tags = [ "openGauss2.0.1的安装步骤"] + +archives = "2021-10" + +author = "吴毅" + +summary = "openGauss2.0.1的安装步骤" + +img = "/zh/post/July/title/img5.png" + +times = "12:30" + ++++ + +# openGauss2.0.1的安装步骤 + + + +openGauss的安装,测试环境:操作系统版本:CentOS7.6 x86\_64,硬件配置:2C4G 1台,服务器名称:kafka1.wuyi.com, IP地址: 172.16.32.5。 + +- 1 关闭防火墙和关闭SELinux + + ``` + systemctl disable firewalld + + systemctl stop firewalld + + sed -i s/SELINUX=.*/SELINUX=disabled/ /etc/selinux/config + + cat /etc/selinux/config + + getenforce + + setenforce 0 + + getenforce + ``` + +- 2 设置操作系统字符集编码和设置操作系统时区 + + ``` + echo $LANG + ``` + + +- 3 关闭SWAP分区 \[对于2G内存的设备,建议待安装完毕后再打开SWAP以间接 “扩容内存容量”\] +- 4 配置SSH服务,关闭Banner,允许root远程登录 + + ``` + sed -i '/Banner/s/^/#/' /etc/ssh/sshd_config + + sed -i '/PermitRootLogin/s/^/#/' /etc/ssh/sshd_config + + echo -e "\n" >> /etc/ssh/sshd_config + + echo "Banner none " >> /etc/ssh/sshd_config + + echo "PermitRootLogin yes" >> /etc/ssh/sshd_config + + cat /etc/ssh/sshd_config |grep -v ^#|grep -E 'PermitRoot|Banner' + ``` + +- 5 配置YUM源、安装依赖包、修改默认Python3版本 + + ``` + mkdir /etc/yum.repos.d/bak + + mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/bak/ */ + + wget -O /etc/yum.repos.d/CentOS-Base.repo https://repo.huaweicloud.com/repository/conf/CentOS-7-reg.repo + + yum clean all + + yum install -y bzip2 python3 + + yum install -y libaio-devel flex bison ncurses-devel glibc-devel patch redhat-lsb-core readline-devel net-tools tar + + mv /usr/bin/python /usr/bin/python2_bak + + ln -s /usr/bin/python3 /usr/bin/python + + python -V + ``` + +- 6 配置 sysctl.conf 和 performance.sh + + ``` + cat >> /etc/sysctl.conf << EOF + + net.ipv4.tcp_retries1 = 5 + + net.ipv4.tcp_syn_retries = 5 + + net.sctp.path_max_retrans = 10 + + net.sctp.max_init_retransmits = 10 + + EOF + + sysctl -p + ``` + +- 7 配置资源限制 + + ``` + echo "* soft stack 3072" >> /etc/security/limits.conf + + echo "* hard stack 3072" >> /etc/security/limits.conf + + echo "* soft nofile 1000000" >> /etc/security/limits.conf + + echo "* hard nofile 1000000" >> /etc/security/limits.conf + + echo "* soft nproc unlimited" >> /etc/security/limits.d/90-nproc.conf + + tail -n 4 /etc/security/limits.conf + + tail -n 1 /etc/security/limits.d/90-nproc.conf + ``` + +- 8 关闭透明大页\[Only for CentOS\] + + ``` + cat >>/etc/rc.d/rc.local< /sys/kernel/mm/transparent_hugepage/enabled + + fi + + if test -f /sys/kernel/mm/transparent_hugepage/defrag; then + + echo never > /sys/kernel/mm/transparent_hugepage/defrag + + fi + + EOF + + chmod +x /etc/rc.d/rc.local + + /usr/bin/sh /etc/rc.d/rc.local + + cat /sys/kernel/mm/transparent_hugepage/enabled + + cat /sys/kernel/mm/transparent_hugepage/defrag + + ************* + ``` + +- 9 下载openGauss软件包 + + ``` + mkdir -p /soft/ + + cd /soft/ + + wget https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.1/x86/openGauss-2.0.1-CentOS-64bit-all.tar.gz + + # /soft/clusterconfig.xml + ``` + +- 10 配置XML文件 + + ``` + cat >> /soft/clusterconfig.xml < + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + EOF + + cat /soft/clusterconfig.xml + ``` + +- 11 解压安装包并修改目录权限 + + ``` + cd /soft + + tar -zxvf *all.tar.gz + + tar -zxvf *om.tar.gz + + chmod -R 777 * + ``` + +- 12 执行 gs\_preinstall + + ``` + python script/gs_preinstall -U omm -G dbgrp -X clusterconfig.xml + + ***???*** + ``` + +- 13 检查预安装环境 + + ``` + /soft/script/gs_checkos -i A -h kafka1.wuyi.com --detail + ``` + + +- 14 执行 gs\_install + + ``` + touch /home/omm/install_db + + cat >> /home/omm/install_db < + + + +## 一、环境描述 + +操作系统: + +openEuler 20.03 LTS + +openEuler 20.03 LTS SP2 + +数据库: + +openGauss2.1.0 + +软件包: + +openGauss-2.1.0-openEuler-64bit-all.tar.gz + +## 二、安装过程 + +安装过程自动创建初始用户omm。 + +## 三、问题 + +数据库安装使用正常,但是使用yum,会报错: + +``` +[root@node1 ~]# yum list +Traceback (most recent call last): + File "/usr/lib64/python3.7/site-packages/libdnf/common_types.py", line 14, in swig_import_helper + return importlib.import_module(mname) + File "/usr/lib64/python3.7/importlib/__init__.py", line 127, in import_module + return _bootstrap._gcd_import(name[level:], package, level) + File "", line 1006, in _gcd_import + File "", line 983, in _find_and_load + File "", line 967, in _find_and_load_unlocked + File "", line 670, in _load_unlocked + File "", line 583, in module_from_spec + File "", line 1043, in create_module + File "", line 219, in _call_with_frames_removed +ImportError: /lib64/libcurl.so.4: symbol SSLv3_client_method version OPENSSL_1_1_0 not defined in file libssl.so.1.1 with link time reference + +During handling of the above exception, another exception occurred: + +Traceback (most recent call last): + File "/usr/bin/yum", line 57, in + from dnf.cli import main + File "/usr/lib/python3.7/site-packages/dnf/__init__.py", line 30, in + import dnf.base + File "/usr/lib/python3.7/site-packages/dnf/base.py", line 29, in + import libdnf.transaction + File "/usr/lib64/python3.7/site-packages/libdnf/__init__.py", line 3, in + from . import common_types + File "/usr/lib64/python3.7/site-packages/libdnf/common_types.py", line 17, in + _common_types = swig_import_helper() + File "/usr/lib64/python3.7/site-packages/libdnf/common_types.py", line 16, in swig_import_helper + return importlib.import_module('_common_types') + File "/usr/lib64/python3.7/importlib/__init__.py", line 127, in import_module + return _bootstrap._gcd_import(name[level:], package, level) +ModuleNotFoundError: No module named '_common_types' +[root@node1 ~]# +``` + +## 四、问题分析 + +``` +[root@node1 lib64]# ldd /lib64/libcurl.so.4 + linux-vdso.so.1 (0x00007fff98577000) + libnghttp2.so.14 (0x00007f06c3221000) + libidn2.so.0 (0x00007f06c3201000) + libssh.so.4 (0x00007f06c3180000) + libpsl.so.5 (0x00007f06c316d000) + libssl.so.1.1 => /opt/software/openGauss/script/gspylib/clib/libssl.so.1.1 (0x00007f06c30db000) + libcrypto.so.1.1 => /opt/software/openGauss/script/gspylib/clib/libcrypto.so.1.1 (0x00007f06c2e14000) + libgssapi_krb5.so.2 (0x00007f06c2dc4000) + libkrb5.so.3 (0x00007f06c2ce4000) + libk5crypto.so.3 (0x00007f06c2cc9000) + libcom_err.so.2 (0x00007f06c2cc3000) + libldap-2.4.so.2 (0x00007f06c2c76000) + liblber-2.4.so.2 (0x00007f06c2c63000) + libbrotlidec.so.1 (0x00007f06c2c54000) + libz.so.1 (0x00007f06c2c3a000) + libpthread.so.0 (0x00007f06c2c19000) + libc.so.6 (0x00007f06c2a58000) + libunistring.so.2 (0x00007f06c28d4000) + librt.so.1 (0x00007f06c28c7000) + /lib64/ld-linux-x86-64.so.2 (0x00007f06c32d9000) + libdl.so.2 (0x00007f06c28c2000) + libkrb5support.so.0 (0x00007f06c28b2000) + libkeyutils.so.1 (0x00007f06c28ac000) + libresolv.so.2 (0x00007f06c2894000) + libsasl2.so.3 (0x00007f06c2873000) + libm.so.6 (0x00007f06c26f0000) + libbrotlicommon.so.1 (0x00007f06c26cd000) + libselinux.so.1 (0x00007f06c26a1000) + libcrypt.so.1 (0x00007f06c2666000) + libpcre2-8.so.0 (0x00007f06c25d4000) +[root@node1 lib64]# nm /opt/software/openGauss/script/gspylib/clib/libssl.so.1.1| grep SSLv3_client_method +``` + +确实没有SSLv3\_client\_method + +## 五、解决 + +1 、下载openssl-1.1.1l,编译: + +编译openssl增加enable-ssl3与enable-ssl3-method选项 + +``` +# +tar -zxvf openssl-1.1.1l.tar.gz + cd openssl-1.1.1l +./config shared enable-ssl3 enable-ssl3-method +make +make install +``` + +检查新编译的文件是否包含SSLv3\_client\_method: + +``` +[root@node1 lib64]# nm /usr/local/lib64/libssl.so.1.1 | grep SSLv3_client_method +00000000000214f0 T SSLv3_client_method +``` + +编译完成后是有SSLv3\_client\_method + +编译好的库拷贝替换原来的libssl.so.1.1: + +``` +[root@node1 lib64]# cp /usr/local/lib64/libssl.so.1.1 /opt/software/openGauss/script/gspylib/clib/ +cp: overwrite '/opt/software/openGauss/script/gspylib/clib/libssl.so.1.1'? y +[root@node1 lib64]# +``` + +检查: + +``` +[root@node1 lib64]# nm /opt/software/openGauss/script/gspylib/clib/libssl.so.1.1| grep SSLv3_client_method +00000000000214f0 T SSLv3_client_method +``` + +至此,该文件正常。 + +``` +yum测试: +[root@node1 lib64]# yum install -y tree +Last metadata expiration check: 0:29:30 ago on Tue 23 Nov 2021 11:15:02 AM CST. +Package tree-1.7.0-18.oe1.x86_64 is already installed. +Dependencies resolved. +Nothing to do. +Complete! +``` + +修复完成,yum正常使用 + +## 六、深入问题 + +``` +[root@node1 lib64]# ldd /lib64/libcurl.so.4 + linux-vdso.so.1 (0x00007fff98577000) + libnghttp2.so.14 (0x00007f06c3221000) + libidn2.so.0 (0x00007f06c3201000) + libssh.so.4 (0x00007f06c3180000) + libpsl.so.5 (0x00007f06c316d000) + **libssl.so.1.1 => /opt/software/openGauss/script/gspylib/clib/libssl.so.1.1 (0x00007f06c30db000)** +``` + +操作系统默认libssl.so.1.1链接到/lib64目录中,但是安装数据库后 + +发现libssl.so.1.1链接到/opt/software/openGauss/script/gspylib/clib/libssl.so.1.1 , + +怀疑安装脚本有问题。 + +这里参考曾庆峰老师的解决方案: + +“先创建omm用户dbgrp组,预安装时就不再创建omm”,安装后,yum可以使用。 + +## 七、最简单方法 + +按照官方文档正常安装,yum报错,只需要: + +``` +vim /etc/profile +#export LD_LIBRARY_PATH=/opt/software/openGauss/script/gspylib/clib: +#export PATH=/root/gauss_om/omm/script:$PATH +``` + +重启即可。 + +原因: + +估计是安装脚本有问题。 + diff --git "a/content/zh/post/July/openGauss2-1-0\346\226\260\347\211\271\346\200\247-\350\264\246\346\234\254\346\225\260\346\215\256\345\272\223\345\256\236\351\252\214.md" "b/content/zh/post/July/openGauss2-1-0\346\226\260\347\211\271\346\200\247-\350\264\246\346\234\254\346\225\260\346\215\256\345\272\223\345\256\236\351\252\214.md" new file mode 100644 index 0000000000000000000000000000000000000000..52758e084b1219e5910e036de8409b8e16cf2ca6 --- /dev/null +++ "b/content/zh/post/July/openGauss2-1-0\346\226\260\347\211\271\346\200\247-\350\264\246\346\234\254\346\225\260\346\215\256\345\272\223\345\256\236\351\252\214.md" @@ -0,0 +1,190 @@ ++++ + +title = "openGauss2.1.0新特性-账本数据库实验" + +date = "2021-10-21" + +tags = [ "openGauss2.1.0新特性-账本数据库实验"] + +archives = "2021-10" + +author = "姜殿斌" + +summary = "openGauss2.1.0新特性-账本数据库实验" + +img = "/zh/post/July/title/img3.png" + +times = "12:30" + ++++ + +# openGauss2.1.0新特性-账本数据库实验 + + + +账本数据库融合了区块链思想,将用户操作记录至两种历史表中:用户历史表和全局区块表。当用户创建防篡改用户表时,系统将自动为该表添加一个hash列来保存每行数据的hash摘要信息,同时在blockchain模式下会创建一张用户历史表来记录对应用户表中每条数据的变更行为;而用户对防篡改用户表的一次修改行为将记录至全局区块表中。由于历史表具有只可追加不可修改的特点,因此历史表记录串联起来便形成了用户对防篡改用户表的修改历史。 + +下面,通过实验来理解账本数据库这一新特性: + +## 1、创建防篡改模式: ledgernsp + +登录数据库: + +``` +[omm@node1 ~]$ gsql -d postgres -p 26000 -r + +gsql ((openGauss 2.1.0 build 590b0f8e) compiled at 2021-09-30 14:29:04 commit 0 last mr ) + +openGauss=# create schema ledgernsp with blockchain; + +CREATE SCHEMA +``` + +查看新建的模式 ledgernsp: + +``` +openGauss=# \dn +List of schemas +Name | Owner +----------------+------- +blockchain | omm +cstore | omm +db4ai | omm +dbe_perf | omm +dbe_pldebugger | omm +ledgernsp | omm +pkg_service | omm +public | omm +snapshot | omm +sqladvisor | omm +(10 rows) +``` + +## 2、在防篡改模式下创建防篡改用户表: + +``` +openGauss=# CREATE TABLE ledgernsp.usertable(id int, name text); +CREATE TABLE +``` + +查看防篡改用户表结构及其对应的用户历史表结构: + +``` +openGauss=# \d+ ledgernsp.usertable; +Table "ledgernsp.usertable" +Column | Type | Modifiers | Storage | Stats target | Description +--------+---------+-----------+----------+--------------+------------- +id | integer | | plain | | +name | text | | extended | | +hash | hash16 | | plain | | +Has OIDs: no +Options: orientation=row, compression=no + +openGauss=# \d+ blockchain.ledgernsp_usertable_hist; +Table "blockchain.ledgernsp_usertable_hist" +Column | Type | Modifiers | Storage | Stats target | Description +----------+--------+-----------+---------+--------------+------------- +rec_num | bigint | | plain | | +hash_ins | hash16 | | plain | | +hash_del | hash16 | | plain | | +pre_hash | hash32 | | plain | | +Indexes: +"gs_hist_24788_index" PRIMARY KEY, btree (rec_num int4_ops) TABLESPACE pg_default +Has OIDs: no +Options: internal_mask=263 +``` + +## 3、修改防篡改用户表数据,并查看hash值的相应变化: + +1)插入数据: + +``` +openGauss=# INSERT INTO ledgernsp.usertable VALUES(1, 'alex'), (2, 'bob'), (3, 'peter'); +INSERT 0 3 +openGauss=# SELECT *, hash FROM ledgernsp.usertable ORDER BY id; +id | name | hash +----+-------+------------------ +1 | alex | 1f2e543c580cb8c5 +2 | bob | 8fcd74a8a6a4b484 +3 | peter | f51b4b1b12d0354b +(3 rows) +``` + +2)更新数据: + +``` +openGauss=# UPDATE ledgernsp.usertable SET name = 'bob2' WHERE id = 2; +UPDATE 1 +openGauss=# SELECT *, hash FROM ledgernsp.usertable ORDER BY id; +id | name | hash +----+-------+------------------ +1 | alex | 1f2e543c580cb8c5 +2 | bob2 | 437761affbb7c605 +3 | peter | f51b4b1b12d0354b +(3 rows) +``` + +3)删除数据: + +``` +openGauss=# DELETE FROM ledgernsp.usertable WHERE id = 3; +DELETE 1 +openGauss=# SELECT *, hash FROM ledgernsp.usertable ORDER BY id +openGauss-# ; +id | name | hash +----+------+------------------ +1 | alex | 1f2e543c580cb8c5 +2 | bob2 | 437761affbb7c605 +(2 rows) +``` + +## 4、查询历史表记录: + +``` +openGauss=# select * from blockchain.ledgernsp_usertable_hist; +rec_num | hash_ins | hash_del | pre_hash +---------+------------------+------------------+---------------------------------- +0 | 1f2e543c580cb8c5 | | e45acf22fe042b2373d148f52903d29a +1 | 8fcd74a8a6a4b484 | | af08f23d38ecfec2ad9c6f1c4685a837 +2 | f51b4b1b12d0354b | | 69e2885fb802fbb2b191211623115f9d +3 | 437761affbb7c605 | 8fcd74a8a6a4b484 | fd61cb772033da297d10c4e658e898d7 +4 | | f51b4b1b12d0354b | 6475a497b7a272a92bab012d7f3d615b +(5 rows) +``` + +也可以通过查询gs\_global\_chain,查询全局区块表记录。: + +``` +openGauss=# SELECT * FROM gs_global_chain; +blocknum | dbname | username | starttime | relid | relnsp | relname | relhash | +globalhash | txcommand +----------+----------+----------+-------------------------------+-------+-----------+-----------+------------------+-------- +0 | postgres | omm | 2021-10-29 16:52:29.929996+08 | 24788 | ledgernsp | usertable | a41714001181a294 | 84c0a24 +3ed2def4580f74cec812732fa | INSERT INTO ledgernsp.usertable VALUES(1, 'alex'), (2, 'bob'), (3, 'peter'); +1 | postgres | omm | 2021-10-29 16:53:09.009009+08 | 24788 | ledgernsp | usertable | b3a9ed0755131181 | 9571d5a +0595aaf528917a6fe23d6e80a | UPDATE ledgernsp.usertable SET name = 'bob2' WHERE id = 2; +2 | postgres | omm | 2021-10-29 16:54:17.525065+08 | 24788 | ledgernsp | usertable | 0ae4b4e4ed2fcab5 | aa016e1 +c8768857857815684f36aa4b2 | DELETE FROM ledgernsp.usertable WHERE id = 3; +(3 rows) +``` + +查询用户表数据及hash校验列: + +``` +openGauss=# SELECT *, hash FROM ledgernsp.usertable; +id | name | hash +----+------+------------------ +1 | alex | 1f2e543c580cb8c5 +2 | bob2 | 437761affbb7c605 + +(2 rows) +``` + +## 【实验结论】: + +查询结果显示,用户表中剩余2条数据,与全局区块表记录中的记录一致。 + +实验结束。 + +参考:《openGauss2.1.0开发者指南》 + diff --git "a/content/zh/post/July/openGauss\344\270\200\344\270\273\344\270\200\345\244\207\344\270\200\347\272\247\345\256\211\350\243\205.md" "b/content/zh/post/July/openGauss\344\270\200\344\270\273\344\270\200\345\244\207\344\270\200\347\272\247\345\256\211\350\243\205.md" new file mode 100644 index 0000000000000000000000000000000000000000..1a3cfe5dee3e7f942e28f0eec6f32694ac76f733 --- /dev/null +++ "b/content/zh/post/July/openGauss\344\270\200\344\270\273\344\270\200\345\244\207\344\270\200\347\272\247\345\256\211\350\243\205.md" @@ -0,0 +1,825 @@ ++++ + +title = "openGauss一主一备一级安装" + +date = "2021-07-21" + +tags = [ "openGauss一主一备一级安装"] + +archives = "2021-07" + +author = "Walrus" + +summary = "openGauss一主一备一级安装" + +img = "/zh/post/July/title/img5.png" + +times = "12:30" + ++++ + +# openGauss一主一备一级安装 + +## 1. 准备三台CentOS7.6 + +![](figures/OpenGauss一主一备一级安装1.png) + +![](figures/OpenGauss一主一备一级安装2.png) + +![](figures/OpenGauss一主一备一级安装3.png) + +## 2. 上传操作系统配置修改脚本 + +脚本OSprepare\_node1.sh内容如下:此脚本参考贾军峰老师的《一键部署openGauss2.0.0》https://www.modb.pro/db/52552 + +``` +#!/bin/bash +mkdir -p opt/sofeware/openGauss +chmod 775 -R opt/sofeware/openGauss +export HOSTNAME=node1 +export HOSTIP=192.168.59.26 +export SOFTWARE_DIRECTORY=/opt/soft/openGauss +## 1. 设置主机名并配置hosts文件 +hostnamectl set-hostname $HOSTNAME +sed -i '/$HOSTIP/d' etc/hosts +echo "$HOSTIP $HOSTNAME #Gauss OM IP Hosts Mapping" >> etc/hosts +cat etc/hosts +echo "1.Configure etc/hosts completed." +echo -e "\n" +## 2. 关闭防火墙 +systemctl disable firewalld.service +systemctl stop firewalld.service +echo "Firewalld " `systemctl status firewalld|grep Active` +echo "2.Disable firewalld service completed." +echo -e "\n" +## 3. 关闭SELinux +sed -i '/^SELINUX=/d' etc/selinux/config +echo "SELINUX=disabled" >> etc/selinux/config +cat etc/selinux/config|grep "SELINUX=disabled" +echo "3.Disable SELINUX completed." +echo -e "\n" +## 4. 设置操作系统字符集编码 +echo "LANG=en_US.UTF-8" >> etc/profile +source etc/profile +echo $LANG +echo "4.Configure encoding completed." +echo -e "\n" +## 5. 设置操作系统时区 +rm -fr etc/localtime +ln -s usr/share/zoneinfo/Asia/Shanghai etc/localtime +date -R +hwclock +echo "5.Configure Timezone completed." +echo -e "\n" +## 6. 关闭SWAP分区 [对于2G内存的设备,建议待安装完毕后再打开SWAP以间接 “扩容内存容量”] +sed -i '/swap/s/^/#/' etc/fstab +swapoff -a +free -m +echo "6.Close swap partition completed." +echo -e "\n" +## 7. 配置SSH服务,关闭Banner,允许root远程登录 +sed -i '/Banner/s/^/#/' etc/ssh/sshd_config +sed -i '/PermitRootLogin/s/^/#/' etc/ssh/sshd_config +echo -e "\n" >> etc/ssh/sshd_config +echo "Banner none " >> etc/ssh/sshd_config +echo "PermitRootLogin yes" >> etc/ssh/sshd_config +cat etc/ssh/sshd_config |grep -v ^#|grep -E 'PermitRoot|Banner' +echo "7.Configure SSH Service completed." +echo -e "\n" +## 8. 配置YUM源、安装依赖包、修改默认Python3版本 +yum install -y bzip2 python3 +yum install -y libaio-devel libnsl flex bison ncurses-devel glibc-devel patch readline-devel net-tools tar +mv usr/bin/python usr/bin/python2_bak +ln -s usr/bin/python3 usr/bin/python +python -V +echo "8.Configure Install Packages and change default Python version completed." +echo -e "\n" +## 9. 配置 sysctl.conf 和 performance.sh +cat >> etc/sysctl.conf << EOF +net.ipv4.tcp_retries1 = 5 +net.ipv4.tcp_syn_retries = 5 +net.sctp.path_max_retrans = 10 +net.sctp.max_init_retransmits = 10 +EOF +sysctl -p +## 10. 配置资源限制 +echo "* soft stack 3072" >> etc/security/limits.conf +echo "* hard stack 3072" >> etc/security/limits.conf +echo "* soft nofile 1000000" >> etc/security/limits.conf +echo "* hard nofile 1000000" >> etc/security/limits.conf +echo "* soft nproc unlimited" >> etc/security/limits.d/90-nproc.conf +tail -n 4 etc/security/limits.conf +tail -n 1 etc/security/limits.d/90-nproc.conf +echo "10.Configure resource limits completed." +echo -e "\n" +## 11. 关闭透明大页[Only for CentOS] +cat >>/etc/rc.d/rc.local< sys/kernel/mm/transparent_hugepage/enabled +fi +if test -f sys/kernel/mm/transparent_hugepage/defrag; then + echo never > sys/kernel/mm/transparent_hugepage/defrag +fi +EOF +chmod +x etc/rc.d/rc.local usr/bin/sh etc/rc.d/rc.local +cat sys/kernel/mm/transparent_hugepage/enabled +cat sys/kernel/mm/transparent_hugepage/defrag +echo "11.Close transparent_hugepage completed." +echo -e "\n" +``` + +## 3. 脚本执行过程: + +``` +[root@node1 ~]# ls +anaconda-ks.cfg OSprepare_node1.sh +[root@node1 ~]# ll +total 8 +-rw-------. 1 root root 1683 Jul 19 12:39 anaconda-ks.cfg +-rw-r--r--. 1 root root 3419 Jul 19 14:40 OSprepare_node1.sh +[root@node1 ~]# chmod +x OSprepare_node1.sh +[root@node1 ~]# ll +total 8 +-rw-------. 1 root root 1683 Jul 19 12:39 anaconda-ks.cfg +-rwxr-xr-x. 1 root root 3419 Jul 19 14:40 OSprepare_node1.sh +[root@node1 ~]# sh OSprepare_node1.sh +127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 +::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 +192.168.59.26 node1 #Gauss OM IP Hosts Mapping +1.Configure etc/hosts completed. +Removed symlink etc/systemd/system/multi-user.target.wants/firewalld.service. +Removed symlink etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service. +Firewalld Active: inactive (dead) +2.Disable firewalld service completed. +SELINUX=disabled +3.Disable SELINUX completed. +en_US.UTF-8 +4.Configure encoding completed. +Mon, 19 Jul 2021 14:41:00 +0800 +Mon 19 Jul 2021 01:48:58 PM CST -0.241247 seconds +5.Configure Timezone completed. + total used free shared buff/cache available +Mem: 4411 110 4042 8 259 4053 +Swap: 0 0 0 +6.Close swap partition completed. +Banner none +PermitRootLogin yes +7.Configure SSH Service completed. +Loaded plugins: fastestmirror +Loading mirror speeds from cached hostfile + * base: mirrors.aliyun.com + * extras: mirrors.aliyun.com + * updates: mirrors.aliyun.com +Resolving Dependencies +--> Running transaction check +---> Package bzip2.x86_64 0:1.0.6-13.el7 will be installed +---> Package python3.x86_64 0:3.6.8-18.el7 will be installed +--> Processing Dependency: python3-libs(x86-64) = 3.6.8-18.el7 for package: python3-3.6.8-18.el7.x86_64 +--> Processing Dependency: python3-setuptools for package: python3-3.6.8-18.el7.x86_64 +--> Processing Dependency: python3-pip for package: python3-3.6.8-18.el7.x86_64 +--> Processing Dependency: libpython3.6m.so.1.0()(64bit) for package: python3-3.6.8-18.el7.x86_64 +--> Running transaction check +---> Package python3-libs.x86_64 0:3.6.8-18.el7 will be installed +--> Processing Dependency: libtirpc.so.1()(64bit) for package: python3-libs-3.6.8-18.el7.x86_64 +---> Package python3-pip.noarch 0:9.0.3-8.el7 will be installed +---> Package python3-setuptools.noarch 0:39.2.0-10.el7 will be installed +--> Running transaction check +---> Package libtirpc.x86_64 0:0.2.4-0.16.el7 will be installed +--> Finished Dependency Resolution +Dependencies Resolved +========================================================================================================= + Package Arch Version Repository Size +========================================================================================================= +Installing: + bzip2 x86_64 1.0.6-13.el7 base 52 k + python3 x86_64 3.6.8-18.el7 updates 70 k +Installing for dependencies: + libtirpc x86_64 0.2.4-0.16.el7 base 89 k + python3-libs x86_64 3.6.8-18.el7 updates 6.9 M + python3-pip noarch 9.0.3-8.el7 base 1.6 M + python3-setuptools noarch 39.2.0-10.el7 base 629 k + +Transaction Summary +========================================================================================================== +Install 2 Packages (+4 Dependent packages) + +Total download size: 9.4 M +Installed size: 48 M +Downloading packages: +(1/6): bzip2-1.0.6-13.el7.x86_64.rpm | 52 kB 00:00:00 +(2/6): python3-3.6.8-18.el7.x86_64.rpm | 70 kB 00:00:00 +(3/6): libtirpc-0.2.4-0.16.el7.x86_64.rpm | 89 kB 00:00:00 +(4/6): python3-pip-9.0.3-8.el7.noarch.rpm | 1.6 MB 00:00:00 +(5/6): python3-setuptools-39.2.0-10.el7.noarch.rpm | 629 kB 00:00:00 +(6/6): python3-libs-3.6.8-18.el7.x86_64.rpm | 6.9 MB 00:00:01 +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +Total 7.3 MB/s | 9.4 MB 00:00:01 +Running transaction check +Running transaction test +Transaction test succeeded +Running transaction + Installing : libtirpc-0.2.4-0.16.el7.x86_64 1/6 + Installing : python3-setuptools-39.2.0-10.el7.noarch 2/6 + Installing : python3-pip-9.0.3-8.el7.noarch 3/6 + Installing : python3-3.6.8-18.el7.x86_64 4/6 + Installing : python3-libs-3.6.8-18.el7.x86_64 5/6 + Installing : bzip2-1.0.6-13.el7.x86_64 6/6 + Verifying : libtirpc-0.2.4-0.16.el7.x86_64 1/6 + Verifying : python3-3.6.8-18.el7.x86_64 2/6 + Verifying : python3-libs-3.6.8-18.el7.x86_64 3/6 + Verifying : bzip2-1.0.6-13.el7.x86_64 4/6 + Verifying : python3-setuptools-39.2.0-10.el7.noarch 5/6 + Verifying : python3-pip-9.0.3-8.el7.noarch 6/6 +Installed: + bzip2.x86_64 0:1.0.6-13.el7 python3.x86_64 0:3.6.8-18.el7 +Dependency Installed: + libtirpc.x86_64 0:0.2.4-0.16.el7 python3-libs.x86_64 0:3.6.8-18.el7 python3-pip.noarch 0:9.0.3-8.el7 python3-setuptools.noarch 0:39.2.0-10.el7 + +Complete! +Loaded plugins: fastestmirror +Loading mirror speeds from cached hostfile + * base: mirrors.aliyun.com + * extras: mirrors.aliyun.com + * updates: mirrors.aliyun.com +No package libnsl available. +Package net-tools-2.0-0.25.20131004git.el7.x86_64 already installed and latest version +Package 2:tar-1.26-35.el7.x86_64 already installed and latest version +Resolving Dependencies +--> Running transaction check +---> Package bison.x86_64 0:3.0.4-2.el7 will be installed +--> Processing Dependency: m4 >= 1.4 for package: bison-3.0.4-2.el7.x86_64 +---> Package flex.x86_64 0:2.5.37-6.el7 will be installed +---> Package glibc-devel.x86_64 0:2.17-324.el7_9 will be installed +--> Processing Dependency: glibc-headers = 2.17-324.el7_9 for package: glibc-devel-2.17-324.el7_9.x86_64 +--> Processing Dependency: glibc = 2.17-324.el7_9 for package: glibc-devel-2.17-324.el7_9.x86_64 +--> Processing Dependency: glibc-headers for package: glibc-devel-2.17-324.el7_9.x86_64 +---> Package libaio-devel.x86_64 0:0.3.109-13.el7 will be installed +---> Package ncurses-devel.x86_64 0:5.9-14.20130511.el7_4 will be installed +---> Package patch.x86_64 0:2.7.1-12.el7_7 will be installed +---> Package readline-devel.x86_64 0:6.2-11.el7 will be installed +--> Processing Dependency: readline = 6.2-11.el7 for package: readline-devel-6.2-11.el7.x86_64 +--> Running transaction check +---> Package glibc.x86_64 0:2.17-260.el7 will be updated +--> Processing Dependency: glibc = 2.17-260.el7 for package: glibc-common-2.17-260.el7.x86_64 +---> Package glibc.x86_64 0:2.17-324.el7_9 will be an update +---> Package glibc-headers.x86_64 0:2.17-324.el7_9 will be installed +--> Processing Dependency: kernel-headers >= 2.2.1 for package: glibc-headers-2.17-324.el7_9.x86_64 +--> Processing Dependency: kernel-headers for package: glibc-headers-2.17-324.el7_9.x86_64 +---> Package m4.x86_64 0:1.4.16-10.el7 will be installed +---> Package readline.x86_64 0:6.2-10.el7 will be updated +---> Package readline.x86_64 0:6.2-11.el7 will be an update +--> Running transaction check +---> Package glibc-common.x86_64 0:2.17-260.el7 will be updated +---> Package glibc-common.x86_64 0:2.17-324.el7_9 will be an update +---> Package kernel-headers.x86_64 0:3.10.0-1160.31.1.el7 will be installed +--> Finished Dependency Resolution + +Dependencies Resolved + +========================================================================================================== Package Arch Version Repository Size +========================================================================================================= +Installing: + bison x86_64 3.0.4-2.el7 base 674 k + flex x86_64 2.5.37-6.el7 base 293 k + glibc-devel x86_64 2.17-324.el7_9 updates 1.1 M + libaio-devel x86_64 0.3.109-13.el7 base 13 k + ncurses-devel x86_64 5.9-14.20130511.el7_4 base 712 k + patch x86_64 2.7.1-12.el7_7 base 111 k + readline-devel x86_64 6.2-11.el7 base 139 k +Installing for dependencies: + glibc-headers x86_64 2.17-324.el7_9 updates 691 k + kernel-headers x86_64 3.10.0-1160.31.1.el7 updates 9.0 M + m4 x86_64 1.4.16-10.el7 base 256 k +Updating for dependencies: + glibc x86_64 2.17-324.el7_9 updates 3.6 M + glibc-common x86_64 2.17-324.el7_9 updates 12 M + readline x86_64 6.2-11.el7 base 193 k + +Transaction Summary +========================================================================================================= +Install 7 Packages (+3 Dependent packages) +Upgrade ( 3 Dependent packages) + +Total download size: 28 M +Downloading packages: +Delta RPMs disabled because /usr/bin/applydeltarpm not installed. +(1/13): flex-2.5.37-6.el7.x86_64.rpm | 293 kB 00:00:00 +(2/13): bison-3.0.4-2.el7.x86_64.rpm | 674 kB 00:00:00 +(3/13): glibc-2.17-324.el7_9.x86_64.rpm | 3.6 MB 00:00:01 +(4/13): glibc-devel-2.17-324.el7_9.x86_64.rpm | 1.1 MB 00:00:00 +(5/13): glibc-headers-2.17-324.el7_9.x86_64.rpm | 691 kB 00:00:00 +(6/13): libaio-devel-0.3.109-13.el7.x86_64.rpm | 13 kB 00:00:00 +(7/13): m4-1.4.16-10.el7.x86_64.rpm | 256 kB 00:00:00 +(8/13): patch-2.7.1-12.el7_7.x86_64.rpm | 111 kB 00:00:00 +(9/13): glibc-common-2.17-324.el7_9.x86_64.rpm | 12 MB 00:00:02 +(10/13): ncurses-devel-5.9-14.20130511.el7_4.x86_64.rpm | 712 kB 00:00:00 +(11/13): readline-6.2-11.el7.x86_64.rpm | 193 kB 00:00:00 +(12/13): readline-devel-6.2-11.el7.x86_64.rpm | 139 kB 00:00:00 +(13/13): kernel-headers-3.10.0-1160.31.1.el7.x86_64.rpm | 9.0 MB 00:00:01 +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +Total 9.7 MB/s | 28 MB 00:00:02 +Running transaction check +Running transaction test +Transaction test succeeded +Running transaction + Updating : glibc-common-2.17-324.el7_9.x86_64 1/16 + Updating : glibc-2.17-324.el7_9.x86_64 2/16 +warning: /etc/nsswitch.conf created as /etc/nsswitch.conf.rpmnew + Installing : m4-1.4.16-10.el7.x86_64 3/16 + Updating : readline-6.2-11.el7.x86_64 4/16 + Installing : kernel-headers-3.10.0-1160.31.1.el7.x86_64 5/16 + Installing : glibc-headers-2.17-324.el7_9.x86_64 6/16 + Installing : ncurses-devel-5.9-14.20130511.el7_4.x86_64 7/16 + Installing : readline-devel-6.2-11.el7.x86_64 8/16 + Installing : glibc-devel-2.17-324.el7_9.x86_64 9/16 + Installing : bison-3.0.4-2.el7.x86_64 10/16 + Installing : flex-2.5.37-6.el7.x86_64 11/16 + Installing : patch-2.7.1-12.el7_7.x86_64 12/16 + Installing : libaio-devel-0.3.109-13.el7.x86_64 13/16 + Cleanup : readline-6.2-10.el7.x86_64 14/16 + Cleanup : glibc-common-2.17-260.el7.x86_64 15/16 + Cleanup : glibc-2.17-260.el7.x86_64 16/16 + Verifying : patch-2.7.1-12.el7_7.x86_64 1/16 + Verifying : bison-3.0.4-2.el7.x86_64 2/16 + Verifying : readline-6.2-11.el7.x86_64 3/16 + Verifying : glibc-2.17-324.el7_9.x86_64 4/16 + Verifying : glibc-common-2.17-324.el7_9.x86_64 5/16 + Verifying : ncurses-devel-5.9-14.20130511.el7_4.x86_64 6/16 + Verifying : glibc-headers-2.17-324.el7_9.x86_64 7/16 + Verifying : kernel-headers-3.10.0-1160.31.1.el7.x86_64 8/16 + Verifying : glibc-devel-2.17-324.el7_9.x86_64 9/16 + Verifying : flex-2.5.37-6.el7.x86_64 10/16 + Verifying : libaio-devel-0.3.109-13.el7.x86_64 11/16 + Verifying : m4-1.4.16-10.el7.x86_64 12/16 + Verifying : readline-devel-6.2-11.el7.x86_64 13/16 + Verifying : glibc-common-2.17-260.el7.x86_64 14/16 + Verifying : readline-6.2-10.el7.x86_64 15/16 + Verifying : glibc-2.17-260.el7.x86_64 16/16 + +Installed: + bison.x86_64 0:3.0.4-2.el7 flex.x86_64 0:2.5.37-6.el7 glibc-devel.x86_64 0:2.17-324.el7_9 libaio-devel.x86_64 0:0.3.109-13.el7 + ncurses-devel.x86_64 0:5.9-14.20130511.el7_4 patch.x86_64 0:2.7.1-12.el7_7 readline-devel.x86_64 0:6.2-11.el7 + +Dependency Installed: + glibc-headers.x86_64 0:2.17-324.el7_9 kernel-headers.x86_64 0:3.10.0-1160.31.1.el7 m4.x86_64 0:1.4.16-10.el7 + +Dependency Updated: + glibc.x86_64 0:2.17-324.el7_9 glibc-common.x86_64 0:2.17-324.el7_9 readline.x86_64 0:6.2-11.el7 + +Complete! +Python 3.6.8 +8.Configure Install Packages and change default Python version completed. + +net.ipv4.tcp_retries1 = 5 +net.ipv4.tcp_syn_retries = 5 +sysctl: cannot stat /proc/sys/net/sctp/path_max_retrans: No such file or directory +sysctl: cannot stat /proc/sys/net/sctp/max_init_retransmits: No such file or directory +* soft stack 3072 +* hard stack 3072 +* soft nofile 1000000 +* hard nofile 1000000 +* soft nproc unlimited +10.Configure resource limits completed. +[always] madvise never +[always] madvise never +11.Close transparent_hugepage completed. +[root@node1 ~]# +``` + +## 4. 在其他节点上修改脚本并执行 + +根据实际情况修改OSprepare\_node1.sh脚本中HOSTNAME和HOSTIP两个参数,分别在其他节点操作系统上使用root用户执行。 + +![](figures/OpenGauss一主一备一级安装4.png) + +## 5. 在主节点上创建XML配置文件 + +使用root用户创建. + +实际使用过程中可以根据自己需求参考官方配置模板,模板地址:https://opengauss.org/zh/docs/2.0.1/docs/installation/%E5%88%9B%E5%BB%BAXML%E9%85%8D%E7%BD%AE%E6%96%87%E4%BB%B6.html + +``` +[root@node1 ~]# cd /opt/sofeware/openGauss/ +[root@node1 openGauss]# ls +openGauss-2.0.1-CentOS-64bit-all.tar.gz +[root@node1 openGauss]# vi cluster_config.xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +## 6. 上传并解压openGauss安装包 + +``` +[root@node1 openGauss]# ll +total 99152 +-rw-r--r--. 1 root root 2556 Jul 19 14:57 cluster_config.xml +-rw-r--r--. 1 root root 101525905 Jul 19 14:52 openGauss-2.0.1-CentOS-64bit-all.tar.gz +[root@node1 openGauss]# tar -zxvf openGauss-2.0.1-CentOS-64bit-all.tar.gz +openGauss-2.0.1-CentOS-64bit-om.tar.gz +openGauss-2.0.1-CentOS-64bit.tar.bz2 +openGauss-2.0.1-CentOS-64bit-om.sha256 +openGauss-2.0.1-CentOS-64bit.sha256 +upgrade_sql.tar.gz +upgrade_sql.sha256 +[root@node1 openGauss]# ll +total 199252 +-rw-r--r--. 1 root root 2556 Jul 19 14:57 cluster_config.xml +-rw-r--r--. 1 root root 101525905 Jul 19 14:52 openGauss-2.0.1-CentOS-64bit-all.tar.gz +-rw-r--r--. 1 root root 65 Jun 2 19:48 openGauss-2.0.1-CentOS-64bit-om.sha256 +-rw-r--r--. 1 root root 12647027 Jun 2 19:48 openGauss-2.0.1-CentOS-64bit-om.tar.gz +-rw-r--r--. 1 root root 65 Jun 2 19:48 openGauss-2.0.1-CentOS-64bit.sha256 +-rw-r--r--. 1 root root 89705672 Jun 2 19:48 openGauss-2.0.1-CentOS-64bit.tar.bz2 +-rw-------. 1 root root 65 Jun 2 19:47 upgrade_sql.sha256 +-rw-------. 1 root root 133700 Jun 2 19:47 upgrade_sql.tar.gz +[root@node1 openGauss]# tar -zxvf openGauss-2.0.1-CentOS-64bit-om.tar.gz +./lib/ +./lib/pyasn1/ +./lib/pyasn1/codec/ +./lib/pyasn1/codec/cer/ +./lib/pyasn1/codec/cer/__init__.py +./lib/pyasn1/codec/cer/decoder.py +./lib/pyasn1/codec/cer/encoder.py +./lib/pyasn1/codec/native/ +./lib/pyasn1/codec/native/__init__.py +./lib/pyasn1/codec/native/decoder.py +./lib/pyasn1/codec/native/encoder.py +./lib/pyasn1/codec/__init__.py +./lib/pyasn1/codec/der/ +./lib/pyasn1/codec/der/__init__.py +./lib/pyasn1/codec/der/decoder.py +./lib/pyasn1/codec/der/encoder.py +./lib/pyasn1/codec/ber/ +./lib/pyasn1/codec/ber/eoo.py +./lib/pyasn1/codec/ber/__init__.py +./lib/pyasn1/codec/ber/decoder.py +./lib/pyasn1/codec/ber/encoder.py +...... +...... +...... +...... +./simpleInstall/requirements_centos_x86_64 +./simpleInstall/one_master_one_slave.sh +./simpleInstall/requirements_openEuler_aarch64 +./simpleInstall/template.xml +./simpleInstall/school.sql +./simpleInstall/one_master_one_slave_template.xml +./simpleInstall/install.sh +./simpleInstall/requirements_openEuler_x86_64 +./simpleInstall/README.md +./simpleInstall/common.sh +./version.cfg +``` + +## 7. 执行下面命令准备安装环境 + +交互式过程需要输入三台root密码,安装前保证各个节点root密码相同。 + +``` +[root@node1 openGauss]# cd script/ +[root@node1 script]# ./gs_preinstall -U omm -G dbgrp -X /opt/sofeware/openGauss/cluster_config.xml +Parsing the configuration file. +Successfully parsed the configuration file. +Installing the tools on the local node. +Successfully installed the tools on the local node. +Are you sure you want to create trust for root (yes/no)? yes +Please enter password for root. +Password: +Creating SSH trust for the root permission user. +Checking network information. +All nodes in the network are Normal. +Successfully checked network information. +Creating SSH trust. +Creating the local key file. +Successfully created the local key files. +Appending local ID to authorized_keys. +Successfully appended local ID to authorized_keys. +Updating the known_hosts file. +Successfully updated the known_hosts file. +Appending authorized_key on the remote node. +Successfully appended authorized_key on all remote node. +Checking common authentication file content. +Successfully checked common authentication content. +Distributing SSH trust file to all node. +Successfully distributed SSH trust file to all node. +Verifying SSH trust on all hosts. +Successfully verified SSH trust on all hosts. +Successfully created SSH trust. +Successfully created SSH trust for the root permission user. +Setting pssh path +Successfully set core path. +Distributing package. +Begin to distribute package to tool path. +Successfully distribute package to tool path. +Begin to distribute package to package path. +Successfully distribute package to package path. +Successfully distributed package. +Are you sure you want to create the user[omm] and create trust for it (yes/no)? yes +Please enter password for cluster user. +Password: +Please enter password for cluster user again. +Password: +Successfully created [omm] user on all nodes. +Preparing SSH service. +Successfully prepared SSH service. +Installing the tools in the cluster. +Successfully installed the tools in the cluster. +Checking hostname mapping. +Successfully checked hostname mapping. +Creating SSH trust for [omm] user. +Checking network information. +All nodes in the network are Normal. +Successfully checked network information. +Creating SSH trust. +Creating the local key file. +Successfully created the local key files. +Appending local ID to authorized_keys. +Successfully appended local ID to authorized_keys. +Updating the known_hosts file. +Successfully updated the known_hosts file. +Appending authorized_key on the remote node. +Successfully appended authorized_key on all remote node. +Checking common authentication file content. +Successfully checked common authentication content. +Distributing SSH trust file to all node. +Successfully distributed SSH trust file to all node. +Verifying SSH trust on all hosts. +Successfully verified SSH trust on all hosts. +Successfully created SSH trust. +Successfully created SSH trust for [omm] user. +Checking OS software. +Successfully check os software. +Checking OS version. +Successfully checked OS version. +Creating cluster's path. +Successfully created cluster's path. +Setting SCTP service. +Successfully set SCTP service. +Set and check OS parameter. +Setting OS parameters. +Successfully set OS parameters. +Warning: Installation environment contains some warning messages. +Please get more details by "/opt/sofeware/openGauss/script/gs_checkos -i A -h node1,node2,node3 --detail". +Set and check OS parameter completed. +Preparing CRON service. +Successfully prepared CRON service. +Setting user environmental variables. +Successfully set user environmental variables. +Setting the dynamic link library. +Successfully set the dynamic link library. +Setting Core file +Successfully set core path. +Setting pssh path +Successfully set pssh path. +Set ARM Optimization. +No need to set ARM Optimization. +Fixing server package owner. +Setting finish flag. +Successfully set finish flag. +Preinstallation succeeded. +``` + +## 8. 执行安装命令 + +安装命令需要在omm用户下执行。 + +``` +[root@node1 script]# su - omm +Last login: Mon Jul 19 15:05:04 CST 2021 +``` + +``` +[omm@node1 ~]$ gs_install -X /opt/sofeware/openGauss/cluster_config.xml --gsinit-parameter="--encoding=UTF8" --dn-guc="max_process_memory=2GB" --dn-guc="shared_buffers=128MB" --dn-guc="cstore_buffers=16MB" +Parsing the configuration file. +Check preinstall on every node. +Successfully checked preinstall on every node. +Creating the backup directory. +Successfully created the backup directory. +begin deploy.. +Installing the cluster. +begin prepare Install Cluster.. +Checking the installation environment on all nodes. +begin install Cluster.. +Installing applications on all nodes. +Successfully installed APP. +begin init Instance.. +encrypt cipher and rand files for database. +Please enter password for database: +Please repeat for database: +begin to create CA cert files +The sslcert will be generated in /opt/huawei/install/app/share/sslcert/om +Cluster installation is completed. +Configuring. +Deleting instances from all nodes. +Successfully deleted instances from all nodes. +Checking node configuration on all nodes. +Initializing instances on all nodes. +Updating instance configuration on all nodes. +Check consistence of memCheck and coresCheck on database nodes. +Successful check consistence of memCheck and coresCheck on all nodes. +Configuring pg_hba on all nodes. +Configuration is completed. +Successfully started cluster. +Successfully installed application. +end deploy.. +``` + +## 9. 查看数据库状态建立测试库 + +``` +[omm@node1 ~]$ gs_om -t status +----------------------------------------------------------------------- + +cluster_name : Cluster_openGauss +cluster_state : Normal +redistributing : No + +----------------------------------------------------------------------- +[omm@node1 ~]$ gs_om -t status --detail +[ Cluster State ] + +cluster_state : Normal +redistributing : No +current_az : AZ_ALL + +[ Datanode State ] + +node node_ip instance state | node node_ip instance state | node node_ip instance state +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +1 node1 192.168.59.26 6001 /opt/huawei/install/data/d1 P Primary Normal | 2 node2 192.168.59.27 6002 /opt/huawei/install/data/d2 S Standby Normal | 3 node3 192.168.59.28 6003 /opt/huawei/install/data/d3 C Cascade Normal +[omm@node1 ~]$ gsql -d postgres -p 15400 -r +gsql ((openGauss 2.0.1 build d97c0e8a) compiled at 2021-06-02 19:37:17 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +postgres=# \l + List of databases + Name | Owner | Encoding | Collate | Ctype | Access privileges +-----------+-------+----------+---------+-------+------------------- + postgres | omm | UTF8 | C | C | + template0 | omm | UTF8 | C | C | =c/omm + + | | | | | omm=CTc/omm + template1 | omm | UTF8 | C | C | =c/omm + + | | | | | omm=CTc/omm +(3 rows) + ^ +postgres=# create database mydb with encoding 'GBK' template = template0; +CREATE DATABASE +postgres=# \l + List of databases + Name | Owner | Encoding | Collate | Ctype | Access privileges +-----------+-------+----------+---------+-------+------------------- + mydb | omm | GBK | C | C | + postgres | omm | UTF8 | C | C | + template0 | omm | UTF8 | C | C | =c/omm + + | | | | | omm=CTc/omm + template1 | omm | UTF8 | C | C | =c/omm + + | | | | | omm=CTc/omm +(4 rows) + +postgres=# \q +[omm@node1 ~]$ +``` + +以上为一主一备一级安装部署过程。 + +## 遇到的报错及解决方法 + +在执行完OSprepare\_node1.sh脚本后,会把系统python命令替换为python3版本,所以再次使用yum会报如下错误: + +所以需要修改/usr/bin/yum、/usr/libexec/urlgrabber-ext-down两个文件的引用头文件由原来的python改为python2。 + +![](figures/OpenGauss一主一备一级安装5.png) + +``` +[root@node1 ~]# yum install mlocate + File "/usr/bin/yum", line 30 + except KeyboardInterrupt, e: + ^ +SyntaxError: invalid syntax +[root@node1 ~]# vi vi /usr/bin/yum +2 files to edit +[root@node1 ~]# yum install mlocate +Loaded plugins: fastestmirror +Loading mirror speeds from cached hostfile + * base: mirrors.aliyun.com + * extras: mirrors.aliyun.com + * updates: mirrors.aliyun.com +Resolving Dependencies +--> Running transaction check +---> Package mlocate.x86_64 0:0.26-8.el7 will be installed +--> Finished Dependency Resolution + +Dependencies Resolved + +============================================================================================================================================================================ + Package Arch Version Repository Size +============================================================================================================================================================================ +Installing: + mlocate x86_64 0.26-8.el7 base 113 k + +Transaction Summary +============================================================================================================================================================================ +Install 1 Package + +Total download size: 113 k +Installed size: 379 k +Is this ok [y/d/N]: y +Downloading packages: + File "/usr/libexec/urlgrabber-ext-down", line 28 + except OSError, e: + ^ +SyntaxError: invalid syntax + +Exiting on user cancel +[root@node1 ~]# vi /usr/libexec/urlgrabber-ext-down +[root@node1 ~]# yum install mlocate +Loaded plugins: fastestmirror +Loading mirror speeds from cached hostfile + * base: mirrors.aliyun.com + * extras: mirrors.aliyun.com + * updates: mirrors.aliyun.com +Resolving Dependencies +--> Running transaction check +---> Package mlocate.x86_64 0:0.26-8.el7 will be installed +--> Finished Dependency Resolution + +Dependencies Resolved +============================================================================================================================================================================ + Package Arch Version Repository Size +============================================================================================================================================================================ +Installing: + mlocate x86_64 0.26-8.el7 base 113 k + +Transaction Summary +============================================================================================================================================================================ +Install 1 Package + +Total download size: 113 k +Installed size: 379 k +Is this ok [y/d/N]: y +Downloading packages: +mlocate-0.26-8.el7.x86_64.rpm | 113 kB 00:00:00 +Running transaction check +Running transaction test +Transaction test succeeded +Running transaction + Installing : mlocate-0.26-8.el7.x86_64 1/1 + Verifying : mlocate-0.26-8.el7.x86_64 1/1 +Installed: + mlocate.x86_64 0:0.26-8.el7 +Complete! +``` + diff --git "a/content/zh/post/July/openGauss\345\220\257\345\212\250-\345\201\234\346\255\242-\346\237\245\347\234\213\347\212\266\346\200\201-\345\210\207\346\215\242\344\270\273\345\244\207.md" "b/content/zh/post/July/openGauss\345\220\257\345\212\250-\345\201\234\346\255\242-\346\237\245\347\234\213\347\212\266\346\200\201-\345\210\207\346\215\242\344\270\273\345\244\207.md" new file mode 100644 index 0000000000000000000000000000000000000000..abb6e6bcad3dd7162646bb2b3b95dc6e4afec749 --- /dev/null +++ "b/content/zh/post/July/openGauss\345\220\257\345\212\250-\345\201\234\346\255\242-\346\237\245\347\234\213\347\212\266\346\200\201-\345\210\207\346\215\242\344\270\273\345\244\207.md" @@ -0,0 +1,316 @@ ++++ + +title = "openGauss启动、停止、查看状态、切换主备" + +date = "2021-07-21" + +tags = [ "openGauss启动、停止、查看状态、切换主备"] + +archives = "2021-07" + +author = "Walrus" + +summary = "openGauss启动、停止、查看状态、切换主备" + +img = "/zh/post/July/title/img2.png" + +times = "12:30" + ++++ + +# openGauss启动、停止、查看状态、切换主备 + +## 1.查看各节点状态 + +``` +Last login: Mon Jul 19 17:27:53 CST 2021 on pts/0 +[omm@node1 ~]$ gs_om -t status --detail +[ Cluster State ] + +cluster_state : Normal +redistributing : No +current_az : AZ_ALL + +[ Datanode State ] + +node node_ip instance state | node node_ip instance state | node node_ip instance state +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +1 node1 192.168.59.26 6001 opt/huawei/install/data/d1 P Primary Normal | 2 node2 192.168.59.27 6002 opt/huawei/install/data/d2 S Standby Normal | 3 node3 192.168.59.28 6003 opt/huawei/install/data/d3 C Cascade Normal +[omm@node1 ~]$ gs_om -t status -h node2 +----------------------------------------------------------------------- + +cluster_state : Normal +redistributing : No + +----------------------------------------------------------------------- + +node : 2 +node_name : node2 +instance_id : 6002 +node_ip : 192.168.59.27 +data_path : /opt/huawei/install/data/d2 +type : Datanode +instance_state : Normal +az_name : AZ1 +instance_role : Standby +HA_state : Streaming +sender_sent_location : 0/6845098 +sender_write_location : 0/6845098 +sender_flush_location : 0/6845098 +sender_replay_location : 0/6845098 +receiver_received_location: 0/6845098 +receiver_write_location : 0/6845098 +receiver_flush_location : 0/6845098 +receiver_replay_location : 0/6845098 +sync_percent : 100% +sync_state : Async + +----------------------------------------------------------------------- + +[omm@node1 ~]$ +[omm@node1 ~]$ +[omm@node1 ~]$ +[omm@node1 ~]$ gs_om -t status -h node3 +----------------------------------------------------------------------- + +cluster_state : Normal +redistributing : No + +----------------------------------------------------------------------- + +node : 3 +node_name : node3 +instance_id : 6003 +node_ip : 192.168.59.28 +data_path : /opt/huawei/install/data/d3 +type : Datanode +instance_state : Normal +az_name : AZ1 +instance_role : Cascade Standby +HA_state : Normal +sender_sent_location : 0/68451B0 +sender_write_location : 0/68451B0 +sender_flush_location : 0/68451B0 +sender_replay_location : 0/68451B0 +receiver_received_location: 0/68451B0 +receiver_write_location : 0/68451B0 +receiver_flush_location : 0/68451B0 +receiver_replay_location : 0/68451B0 +sync_percent : 100% +sync_state : Async +upstream_nodeIp : 192.168.59.27:15401 + +----------------------------------------------------------------------- + +[omm@node1 ~]$ gs_om -t status -h node1 +----------------------------------------------------------------------- + +cluster_state : Normal +redistributing : No + +----------------------------------------------------------------------- + +node : 1 +node_name : node1 +instance_id : 6001 +node_ip : 192.168.59.26 +data_path : /opt/huawei/install/data/d1 +type : Datanode +instance_state : Normal +az_name : AZ1 +static_connections : 2 +HA_state : Normal +instance_role : Primary + +----------------------------------------------------------------------- +``` + +## 2.关闭、启动、重启openGauss群集 + +``` +[omm@node1 ~]$ gs_om -t stop +Stopping cluster. +========================================= +Successfully stopped cluster. +========================================= +End stop cluster. +[omm@node1 ~]$ gs_om -t status +----------------------------------------------------------------------- + +cluster_name : Cluster_openGauss +cluster_state : Unavailable +redistributing : No + +----------------------------------------------------------------------- +[omm@node1 ~]$ gs_om -t status --detail +[ Cluster State ] + +cluster_state : Unavailable +redistributing : No +current_az : AZ_ALL + +[ Datanode State ] + +node node_ip instance state | node node_ip instance state | node node_ip instance state +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +1 node1 192.168.59.26 6001 opt/huawei/install/data/d1 P Down Manually stopped | 2 node2 192.168.59.27 6002 opt/huawei/install/data/d2 S Down Manually stopped | 3 node3 192.168.59.28 6003 opt/huawei/install/data/d3 C Down Manually stopped +[omm@node1 ~]$ gs_om -t start +Starting cluster. +========================================= +[SUCCESS] node1 +2021-07-20 17:53:25.332 60f69d15.1 [unknown] 139907310331648 [unknown] 0 dn_6001_6002 01000 0 [BACKEND] WARNING: Failed to initialize the memory protect for g_instance.attr.attr_storage.cstore_buffers (16 Mbytes) or shared memory (1496 Mbytes) is larger. +[SUCCESS] node2 +2021-07-20 17:53:28.046 60f69d17.1 [unknown] 140135379003136 [unknown] 0 dn_6001_6002 01000 0 [BACKEND] WARNING: Failed to initialize the memory protect for g_instance.attr.attr_storage.cstore_buffers (16 Mbytes) or shared memory (1496 Mbytes) is larger. +[SUCCESS] node3 +2021-07-20 17:53:30.896 60f69d1a.1 [unknown] 139820708103936 [unknown] 0 dn_6001_6002 01000 0 [BACKEND] WARNING: Failed to initialize the memory protect for g_instance.attr.attr_storage.cstore_buffers (16 Mbytes) or shared memory (1496 Mbytes) is larger. +========================================= +Successfully started. +[omm@node1 ~]$ gs_om -t status +----------------------------------------------------------------------- + +cluster_name : Cluster_openGauss +cluster_state : Normal +redistributing : No +----------------------------------------------------------------------- +[omm@node1 ~]$ +[omm@node1 ~]$ gs_om -t stop & gs_om -t start +[1] 7772 +Stopping cluster. +========================================= +Starting cluster. +========================================= +[SUCCESS] node1 +2021-07-20 17:54:48.867 60f69d68.1 [unknown] 139885120349952 [unknown] 0 dn_6001_6002 01000 0 [BACKEND] WARNING: Failed to initialize the memory protect for g_instance.attr.attr_storage.cstore_buffers (16 Mbytes) or shared memory (1496 Mbytes) is larger. +Successfully stopped cluster. +========================================= +End stop cluster. +[SUCCESS] node2 +2021-07-20 17:54:51.576 60f69d6b.1 [unknown] 140327226377984 [unknown] 0 dn_6001_6002 01000 0 [BACKEND] WARNING: Failed to initialize the memory protect for g_instance.attr.attr_storage.cstore_buffers (16 Mbytes) or shared memory (1496 Mbytes) is larger. +[SUCCESS] node3 +2021-07-20 17:54:53.604 60f69d6d.1 [unknown] 140109769361152 [unknown] 0 dn_6001_6002 01000 0 [BACKEND] WARNING: Failed to initialize the memory protect for g_instance.attr.attr_storage.cstore_buffers (16 Mbytes) or shared memory (1496 Mbytes) is larger. +========================================= +Successfully started. +[1]+ Done gs_om -t stop +[omm@node1 ~]$ gs_om -t status --detail +[ Cluster State ] + +cluster_state : Normal +redistributing : No +current_az : AZ_ALL + +[ Datanode State ] + +node node_ip instance state | node node_ip instance state | node node_ip instance state +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +1 node1 192.168.59.26 6001 opt/huawei/install/data/d1 P Standby Normal | 2 node2 192.168.59.27 6002 opt/huawei/install/data/d2 S Primary Normal | 3 node3 192.168.59.28 6003 opt/huawei/install/data/d3 C Cascade Normal +``` + +## 3. 切换主备 + +登录备机,在omm用户下操作。 + +``` +[omm@node1 ~]$ gs_ctl switchover -D /opt/huawei/install/data/d1/ +[2021-07-20 17:59:51.465][9769][][gs_ctl]: gs_ctl switchover ,datadir is opt/huawei/install/data/d1 +[2021-07-20 17:59:51.465][9769][][gs_ctl]: switchover term (1) +[2021-07-20 17:59:51.474][9769][][gs_ctl]: waiting for server to switchover......... +[2021-07-20 17:59:57.527][9769][][gs_ctl]: done +[2021-07-20 17:59:57.527][9769][][gs_ctl]: switchover completed (/opt/huawei/install/data/d1) +[omm@node1 ~]$ gs_om -t status --detail +[ Cluster State ] + +cluster_state : Normal +redistributing : No +current_az : AZ_ALL + +[ Datanode State ] + +node node_ip instance state | node node_ip instance state | node node_ip instance state +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +1 node1 192.168.59.26 6001 opt/huawei/install/data/d1 P Primary Normal | 2 node2 192.168.59.27 6002 opt/huawei/install/data/d2 S Standby Normal | 3 node3 192.168.59.28 6003 opt/huawei/install/data/d3 C Cascade Normal +[omm@node1 ~]$ gs_om -t status -h node1 +----------------------------------------------------------------------- + +cluster_state : Normal +redistributing : No + +----------------------------------------------------------------------- + +node : 1 +node_name : node1 +instance_id : 6001 +node_ip : 192.168.59.26 +data_path : /opt/huawei/install/data/d1 +type : Datanode +instance_state : Normal +az_name : AZ1 +static_connections : 2 +HA_state : Normal +instance_role : Primary + +----------------------------------------------------------------------- + +[omm@node1 ~]$ gs_om -t status -h node2 +----------------------------------------------------------------------- + +cluster_state : Normal +redistributing : No + +----------------------------------------------------------------------- + +node : 2 +node_name : node2 +instance_id : 6002 +node_ip : 192.168.59.27 +data_path : /opt/huawei/install/data/d2 +type : Datanode +instance_state : Normal +az_name : AZ1 +instance_role : Standby +HA_state : Streaming +sender_sent_location : 0/68475E0 +sender_write_location : 0/68475E0 +sender_flush_location : 0/68475E0 +sender_replay_location : 0/68475E0 +receiver_received_location: 0/68475E0 +receiver_write_location : 0/68475E0 +receiver_flush_location : 0/68475E0 +receiver_replay_location : 0/68475E0 +sync_percent : 100% +sync_state : Async + +----------------------------------------------------------------------- + +[omm@node1 ~]$ gs_om -t status -h node3 +----------------------------------------------------------------------- + +cluster_state : Normal +redistributing : No + +----------------------------------------------------------------------- + +node : 3 +node_name : node3 +instance_id : 6003 +node_ip : 192.168.59.28 +data_path : /opt/huawei/install/data/d3 +type : Datanode +instance_state : Normal +az_name : AZ1 +instance_role : Cascade Standby +HA_state : Normal +sender_sent_location : 0/68475E0 +sender_write_location : 0/68475E0 +sender_flush_location : 0/68475E0 +sender_replay_location : 0/68475E0 +receiver_received_location: 0/68475E0 +receiver_write_location : 0/68475E0 +receiver_flush_location : 0/68475E0 +receiver_replay_location : 0/68475E0 +sync_percent : 100% +sync_state : Async +upstream_nodeIp : 192.168.59.27:15401 + +----------------------------------------------------------------------- +``` + diff --git "a/content/zh/post/July/openGauss\345\244\207\344\273\275\346\201\242\345\244\215.md" "b/content/zh/post/July/openGauss\345\244\207\344\273\275\346\201\242\345\244\215.md" new file mode 100644 index 0000000000000000000000000000000000000000..be19aa1a8889047949ccfd7a5488d172f463e18c --- /dev/null +++ "b/content/zh/post/July/openGauss\345\244\207\344\273\275\346\201\242\345\244\215.md" @@ -0,0 +1,558 @@ ++++ + +title = "openGauss备份恢复" + +date = "2021-09-20" + +tags = [ "openGauss备份恢复"] + +archives = "2021-09" + +author = "李宏达" + +summary = "openGauss备份恢复" + +img = "/zh/post/July/title/img7.png" + +times = "12:30" + ++++ + +# openGauss备份恢复 + +## gs\_probackup + +- docker + + - 1.0.1 + + ``` + docker run --name brm_opengauss \ + --privileged=true -d -e GS_PASSWORD=mtkOP@123 \ + -v `pwd`/conf/brm.yaml:/etc/brm.yaml \ + -v `pwd`/var/lib/brm:/var/lib/brm \ + -v `pwd`/var/log/brm:/var/log/brm \ + enmotech/opengauss:1.0.1 + ``` + + - 1.1.0 + + ``` + docker run --name brm_opengauss_1230 \ + --privileged=true -d -e GS_PASSWORD=mtkOP@123 \ + -v `pwd`/conf/brm.yaml:/etc/brm.yaml \ + -v `pwd`/var/lib/brm:/var/lib/brm \ + -v `pwd`/var/log/brm:/var/log/brm \ + enmotech/opengauss:1.1.0 + ``` + + + +## 初始化 + +``` +export BACKUP_PATH=/home/omm/backup +gs_probackup init +``` + +## 添加实例 + +``` +gs_probackup add-instance --instance testdb --pgdata +``` + +## 备份 + +``` +gs_probackup backup --instance testdb -b full +``` + +## 配置数据库归档 + +设置参数 + +目录为 /wal/ + +``` +archive_mode = on +archive_command = 'cp %p /wal//%f' +# cp %p /usr/local/pgsql/data/pg_archive/%f' +``` + +## 查看备份 + +``` +gs_probackup show --instance testdb +``` + +- 查看归档备份 + + ``` + gs_probackup show --instance testdb --archive + ``` + + +## 恢复 + +``` +gs_probackup restore -B backup-path --instance=instance_name + [-D pgdata-path] [-i backup-id] [-j threads_num] [--progress] + [--force] [--no-sync] [--no-validate] [--skip-block-validation] + [--external-mapping=OLDDIR=NEWDIR] [-T OLDDIR=NEWDIR] + [--skip-external-dirs] [-I incremental_mode] + [--recovery-target-time=time|--recovery-target-xid=xid + |--recovery-target-lsn=lsn|--recovery-target-name=target-name] + [--recovery-target-inclusive=boolean] + [--recovery-target-timeline=timeline] + [--recovery-target=immediate|latest] + [--recovery-target-action=pause|promote|shutdown] + [--restore-command=cmdline] + [--remote-proto=protocol] [--remote-host=destination] + [--remote-path=path] [--remote-user=username] + [--remote-port=port] [--ssh-options=ssh_options] + [--log-level-console=log-level-console] + [--log-level-file=log-level-file] + [--log-filename=log-filename] + [--error-log-filename=error-log-filename] + [--log-directory=log-directory] + [--log-rotation-size=log-rotation-size] + [--log-rotation-age=log-rotation-age] + + -B, --backup-path=backup-path location of the backup storage area + --instance=instance_name name of the instance + -D, --pgdata=pgdata-path location of the database storage area + -i, --backup-id=backup-id backup to restore + -j, --threads=threads_num number of parallel threads + --progress show progress + --force ignore invalid status of the restored backup + --no-sync do not sync restored files to disk + --no-validate disable backup validation during restore + --skip-block-validation set to validate only file-level checksum + --external-mapping=OLDDIR=NEWDIR + relocate the external directory from OLDDIR to NEWDIR + -T, --tablespace-mapping=OLDDIR=NEWDIR + relocate the tablespace from directory OLDDIR to NEWDIR + --skip-external-dirs do not restore all external directories + -I, --incremental-mode=none|checksum|lsn + reuse valid pages available in PGDATA if they have not changed + (default: none) +``` + +## 场景 + +- 环境配置 + + ``` + gs_probackup + + export BACKUP_PATH=/var/lib/brm + gs_probackup init + gs_probackup add-instance --instance testdb01 -D /var/lib/opengauss/data + gs_probackup set-config --instance testdb01 --pgdatabase postgres + gs_probackup show-config --instance testdb01 + # Backup instance information + pgdata = /var/lib/opengauss/data + system-identifier = 6910097200378281726 + # Connection parameters + pgdatabase = postgres + # Archive parameters + archive-timeout = 5min + # Logging parameters + log-level-console = LOG + log-level-file = OFF + log-filename = pg_probackup.log + log-rotation-size = 0TB + log-rotation-age = 0d + # Retention parameters + retention-redundancy = 0 + retention-window = 0 + wal-depth = 0 + # Compression parameters + compress-algorithm = none + compress-level = 1 + # Remote access parameters + remote-proto = ssh + ``` + + +- 数据库设置 + + ``` + [omm@0150b32d2461 ~]$ gsql + gsql ((openGauss 1.0.1 build e9da9fb9) compiled at 2020-10-01 13:58:32 commit 0 last mr ) + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + + omm=# show archive_mode; + archive_mode + -------------- + on + (1 row) + + omm=# show archive_command; + archive_command + ------------------------------------ + cp %p /var/lib/brm/wal/testdb01/%f + (1 row) + + omm=# select pg_switch_xlog(); + pg_switch_xlog + ---------------- + 0/72000150 + (1 row) + ``` + + +- 查看归档 + + ``` + [omm@0150b32d2461 ~]$ ls -l /var/lib/brm/wal/testdb01/ + total 49152 + -rw------- 1 omm omm 16777216 Jan 11 03:30 00000001000000000000002F + -rw------- 1 omm omm 16777216 Jan 11 03:30 000000010000000000000071 + -rw------- 1 omm omm 16777216 Jan 11 03:30 000000010000000000000072 + ``` + +- 模拟基础环境 + + ``` + [omm@0150b32d2461 ~]$ gsql + gsql ((openGauss 1.0.1 build e9da9fb9) compiled at 2020-10-01 13:58:32 commit 0 last mr ) + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + + omm=# create table brm_test(t timestamp); + CREATE TABLE + + omm=# insert into brm_test values(now()); + INSERT 0 1 + omm=# select * from brm_test; + t + ---------------------------- + 2021-01-11 03:33:40.737837 + 2021-01-11 03:38:46.32794 + 2021-01-11 03:39:42.466014 + 2021-01-11 03:40:02.816579 + 2021-01-11 07:29:21.98839 + (5 rows) + ``` + + +- 全备份 + + ``` + [omm@0150b32d2461 ~]$ gs_probackup backup --instance testdb01 -b full + INFO: Backup start, pg_probackup version: 2.4.2, instance: testdb01, backup ID: QMRFD9, backup mode: FULL, wal mode: STREAM, remote: false, compress-algorithm: none, compress-level: 1 + LOG: Backup destination is initialized + WARNING: This PostgreSQL instance was initialized without data block checksums. pg_probackup have no way to detect data block corruption without them. Reinitialize PGDATA with option '--data-checksums'. + LOG: Database backup start + LOG: started streaming WAL at 0/86000000 (timeline 1) + check identify system success + send START_REPLICATION 0/86000000 success + keepalive message is received + INFO: PGDATA size: 317MB + INFO: Start transferring data files + LOG: Creating page header map "/var/lib/brm/backups/testdb01/QMRFD9/page_header_map" + keepalive message is received + keepalive message is received + keepalive message is received + keepalive message is received + keepalive message is received + keepalive message is received + keepalive message is received + keepalive message is received + keepalive message is received + keepalive message is received + keepalive message is received + keepalive message is received + keepalive message is received + keepalive message is received + keepalive message is received + keepalive message is received + INFO: Data files are transferred, time elapsed: 31s + INFO: wait for pg_stop_backup() + keepalive message is received + INFO: pg_stop backup() successfully executed + LOG: stop_lsn: 0/860001D0 + LOG: Looking for LSN 0/860001D0 in segment: 000000010000000000000086 + LOG: Found WAL segment: /var/lib/brm/backups/testdb01/QMRFD9/database/pg_xlog/000000010000000000000086 + LOG: Thread [0]: Opening WAL segment "/var/lib/brm/backups/testdb01/QMRFD9/database/pg_xlog/000000010000000000000086" + LOG: Found LSN: 0/860001D0 + (null): not renaming 000000010000000000000087, segment is not complete. + LOG: finished streaming WAL at 0/87000130 (timeline 1) + LOG: Getting the Recovery Time from WAL + LOG: Thread [0]: Opening WAL segment "/var/lib/brm/backups/testdb01/QMRFD9/database/pg_xlog/000000010000000000000086" + INFO: Syncing backup files to disk + INFO: Backup files are synced, time elapsed: 2s + INFO: Validating backup QMRFD9 + INFO: Backup QMRFD9 data files are valid + INFO: Backup QMRFD9 resident size: 349MB + INFO: Backup QMRFD9 completed + ``` + +- 模拟增量数据 + + ``` + gsql ((openGauss 1.0.1 build e9da9fb9) compiled at 2020-10-01 13:58:32 commit 0 last mr ) + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + omm=# select pg_current_xlog_location(), + pg_xlogfile_name(pg_current_xlog_location()), + pg_xlogfile_name(pg_current_xlog_location()), + txid_current(), + now();omm-# omm-# omm-# omm-# + pg_current_xlog_location | pg_xlogfile_name | pg_xlogfile_name | txid_current | now + --------------------------+--------------------------+--------------------------+--------------+------------------------------- + 0/87000130 | 000000010000000000000087 | 000000010000000000000087 | 11209 | 2021-01-11 07:57:25.414668+00 + (1 row) + + omm=# select pg_switch_xlog(); + pg_switch_xlog + ---------------- + 0/870001D0 + (1 row) + + omm=# insert into brm_test values(now()); + INSERT 0 1 + omm=# select pg_current_xlog_location(), + pg_xlogfile_name(pg_current_xlog_location()), + pg_xlogfile_name(pg_current_xlog_location()), + txid_current(), + now(); + omm-# omm-# omm-# omm-# pg_current_xlog_location | pg_xlogfile_name | pg_xlogfile_name | txid_current | now + --------------------------+--------------------------+--------------------------+--------------+------------------------------- + 0/88000208 | 000000010000000000000088 | 000000010000000000000088 | 11211 | 2021-01-11 07:57:40.428398+00 + (1 row) + + omm=# select pg_switch_xlog(); + pg_switch_xlog + ---------------- + 0/880002A8 + (1 row) + + omm=# select pg_switch_xlog(); + pg_switch_xlog + ---------------- + 0/89000150 + (1 row) + + omm=# insert into brm_test values(now()); + INSERT 0 1 + omm=# + omm=# select pg_current_xlog_location(), + pg_xlogfile_name(pg_current_xlog_location()), + pg_xlogfile_name(pg_current_xlog_location()), + txid_current(), + now();omm-# omm-# omm-# omm-# + pg_current_xlog_location | pg_xlogfile_name | pg_xlogfile_name | txid_current | now + --------------------------+--------------------------+--------------------------+--------------+------------------------------- + 0/8A000208 | 00000001000000000000008A | 00000001000000000000008A | 11213 | 2021-01-11 07:58:06.702327+00 + (1 row) + + omm=# select pg_switch_xlog(); + pg_switch_xlog + ---------------- + 0/8A0002A8 + (1 row) + + omm=# select pg_current_xlog_location(), + pg_xlogfile_name(pg_current_xlog_location()), + pg_xlogfile_name(pg_current_xlog_location()), + txid_current(), + now(); + omm-# omm-# omm-# omm-# pg_current_xlog_location | pg_xlogfile_name | pg_xlogfile_name | txid_current | now + --------------------------+--------------------------+--------------------------+--------------+------------------------------- + 0/8B000130 | 00000001000000000000008B | 00000001000000000000008B | 11214 | 2021-01-11 07:58:15.204024+00 + (1 row) + + omm=# + ``` + + +- 查看备份信息 + + ``` + [omm@0150b32d2461 ~]$ gs_probackup show --archive + + ARCHIVE INSTANCE 'testdb01' + =============================================================================================================================== + TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status + =============================================================================================================================== + 1 0 0/0 000000010000000000000086 00000001000000000000008A 5 80MB 1.00 1 OK + [omm@0150b32d2461 ~]$ gs_probackup show + + BACKUP INSTANCE 'testdb01' + =================================================================================================================================== + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status + =================================================================================================================================== + testdb01 9.2 QMRFD9 2021-01-11 07:56:30+00 FULL STREAM 1/0 41s 333MB 16MB 0.95 0/86000028 0/860001D0 OK + ``` + +- 基于时间点的恢复 + + 恢复全量备份 —\> 2 .用户指定了xid/time/lsn. brm进行遍历所有备份,找出最近的备份集通过gs\_probackup进行恢复 + + ``` + [omm@0150b32d2461 ~]$ gs_probackup restore --instance testdb01 -D /home/omm/a1/ -i QMRFD9 + LOG: Restore begin. + LOG: there is no file tablespace_map + LOG: check tablespace directories of backup QMRFD9 + LOG: check external directories of backup QMRFD9 + INFO: Validating backup QMRFD9 + INFO: Backup QMRFD9 data files are valid + LOG: Thread [1]: Opening WAL segment "/var/lib/brm/backups/testdb01/QMRFD9/database/pg_xlog/000000010000000000000086" + INFO: Backup QMRFD9 WAL segments are valid + INFO: Backup QMRFD9 is valid. + INFO: Restoring the database from backup at 2021-01-11 07:55:57+00 + LOG: there is no file tablespace_map + LOG: Restore directories and symlinks... + INFO: Start restoring backup files. PGDATA size: 333MB + LOG: Start thread 1 + INFO: Backup files are restored. Transfered bytes: 349MB, time elapsed: 2s + INFO: Restore incremental ratio (less is better): 105% (349MB/333MB) + INFO: Syncing restored files to disk + INFO: Restored backup files are synced, time elapsed: 0 + INFO: Restore of backup QMRFD9 completed. + ``` + +- 编辑recover.conf —\> 3. 如果没有指定time/lsn/xid不生成recover.conf文件.如果指定了生成recover.conf. + + ``` + vi a1/recover.conf + # recovery_target_time = '2021-01-11 03:40:02+00' + recovery_target_lsn = '0/880002A8' + #recovery_target_action = 'pause' + %p --> pg_xlog/000000010000000000000001 + %f --> 000000010000000000000001 + restore_command = 'cp /var/lib/brm/wal/testdb01/%f %p' + # restore_command = 'brm get-wal -f %f -p %p' + pause_at_recovery_target = true + ``` + +- 编辑配置文件(同一台防止端口冲突关闭归档 —\> 4. 是否需要配置postgres.conf文件 + + ``` + echo "port=6433" >> a1/postgresql.conf + echo "archive_mode=off" >> a1/postgresql.conf + ``` + +- 启动实例 + + gs\_ctl start -D /home/omm/a1 —\> 5. 恢复成功进行gs\_ctl start -D 恢复目录 + + ``` + [2021-01-11 08:14:56.533][313][][gs_ctl]: gs_ctl started,datadir is -D "/home/omm/a1" + [2021-01-11 08:14:56.576][313][][gs_ctl]: port:5432 already in use. /proc/net/tcp: + sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode + 0: 00000000:1538 00000000:0000 0A 00000000:00000000 00:00000000 00000000 70 0 2236132 1 0000000000000000 100 0 0 10 0 + [2021-01-11 08:14:56.576][313][][gs_ctl]: CheckPort: popen(command:lsof -i:5432 | grep -E 'COMMAND|LISTEN'). + COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME + + GaussMast 1 omm 7u IPv4 2236132 0t0 TCP *:postgres (LISTEN) + + GaussMast 1 omm 8u IPv6 2236133 0t0 TCP *:postgres (LISTEN) + + [2021-01-11 08:14:56.673][313][][gs_ctl]: port conflict when start server + [2021-01-11 08:14:56.674][313][][gs_ctl]: waiting for server to start... + .0 LOG: The core dump path in core_pattern is an invalid directory. + 0 [BACKEND] LOG: Begin to start openGauss Database. + 2021-01-11 08:14:56.761 [unknown] [unknown] localhost 139701065868352 0 0 [BACKEND] LOG: Transparent encryption disabled. + 2021-01-11 08:14:56.763 [unknown] [unknown] localhost 139701065868352 0 0 [BACKEND] WARNING: could not create any HA TCP/IP sockets + 2021-01-11 08:14:56.765 [unknown] [unknown] localhost 139701065868352 0 0 [BACKEND] WARNING: No explicit IP is configured for listen_addresses GUC. + 2021-01-11 08:14:56.765 [unknown] [unknown] localhost 139701065868352 0 0 [BACKEND] LOG: InitNuma numaNodeNum: 1 numa_distribute_mode: none inheritThreadPool: 0. + 2021-01-11 08:14:56.765 [unknown] [unknown] localhost 139701065868352 0 0 [BACKEND] LOG: shared memory 321 Mbytes, memory context 11454 Mbytes, max process memory 12288 Mbytes + 2021-01-11 08:14:56.765 [unknown] [unknown] localhost 139701065868352 0 0 [BACKEND] LOG: Initilize the memory protect with Process Chunks number 11454, change bits 20 + 2021-01-11 08:14:56.785 [unknown] [unknown] localhost 139701065868352 0 0 [CACHE] LOG: set data cache size(402653184) + 2021-01-11 08:14:56.796 [unknown] [unknown] localhost 139701065868352 0 0 [CACHE] LOG: set metadata cache size(134217728) + 2021-01-11 08:14:56.848 [unknown] [unknown] localhost 139701065868352 0 0 [BACKEND] LOG: gaussdb: fsync file "/home/omm/a1/gaussdb.state.temp" success + 2021-01-11 08:14:56.849 [unknown] [unknown] localhost 139701065868352 0 0 [BACKEND] LOG: create gaussdb state file success: db state(STARTING_STATE), server mode(Normal) + 2021-01-11 08:14:56.908 [unknown] [unknown] localhost 139701065868352 0 0 [BACKEND] LOG: max_safe_fds = 976, usable_fds = 1000, already_open = 14 + 2021-01-11 08:14:56.909 [unknown] [unknown] localhost 139701065868352 0 0 [BACKEND] LOG: The core dump path in core_pattern is an invalid directory. + 2021-01-11 08:14:56.910 [unknown] [unknown] localhost 139701065868352 0 0 [BACKEND] LOG: Success to start openGauss Database. If you specify "&", please press any key to exit... + [2021-01-11 08:14:57.675][313][][gs_ctl]: waitpid 319 failed, exitstatus is 256, ret is 2 + + [2021-01-11 08:14:57.675][313][][gs_ctl]: stopped waiting + [2021-01-11 08:14:57.675][313][][gs_ctl]: could not start server + [2021-01-11 08:14:57.675][313][][gs_ctl]: Examine the log output. + [omm@0150b32d2461 ~]$ vi a1/recovery.conf + [omm@0150b32d2461 ~]$ gs_ctl start -D /home/omm/a1/ + [2021-01-11 08:15:29.342][352][][gs_ctl]: gs_ctl started,datadir is -D "/home/omm/a1" + [2021-01-11 08:15:29.401][352][][gs_ctl]: port:5432 already in use. /proc/net/tcp: + sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode + 0: 00000000:1538 00000000:0000 0A 00000000:00000000 00:00000000 00000000 70 0 2236132 1 0000000000000000 100 0 0 10 0 + [2021-01-11 08:15:29.401][352][][gs_ctl]: CheckPort: popen(command:lsof -i:5432 | grep -E 'COMMAND|LISTEN'). + COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME + + GaussMast 1 omm 7u IPv4 2236132 0t0 TCP *:postgres (LISTEN) + + GaussMast 1 omm 8u IPv6 2236133 0t0 TCP *:postgres (LISTEN) + + [2021-01-11 08:15:29.500][352][][gs_ctl]: port conflict when start server + [2021-01-11 08:15:29.500][352][][gs_ctl]: waiting for server to start... + .0 LOG: The core dump path in core_pattern is an invalid directory. + 0 [BACKEND] LOG: Begin to start openGauss Database. + 2021-01-11 08:15:29.627 [unknown] [unknown] localhost 140439454434368 0 0 [BACKEND] LOG: Transparent encryption disabled. + 2021-01-11 08:15:29.628 [unknown] [unknown] localhost 140439454434368 0 0 [BACKEND] WARNING: could not create any HA TCP/IP sockets + 2021-01-11 08:15:29.631 [unknown] [unknown] localhost 140439454434368 0 0 [BACKEND] WARNING: No explicit IP is configured for listen_addresses GUC. + 2021-01-11 08:15:29.631 [unknown] [unknown] localhost 140439454434368 0 0 [BACKEND] LOG: InitNuma numaNodeNum: 1 numa_distribute_mode: none inheritThreadPool: 0. + 2021-01-11 08:15:29.631 [unknown] [unknown] localhost 140439454434368 0 0 [BACKEND] LOG: shared memory 321 Mbytes, memory context 11454 Mbytes, max process memory 12288 Mbytes + 2021-01-11 08:15:29.631 [unknown] [unknown] localhost 140439454434368 0 0 [BACKEND] LOG: Initilize the memory protect with Process Chunks number 11454, change bits 20 + 2021-01-11 08:15:29.659 [unknown] [unknown] localhost 140439454434368 0 0 [CACHE] LOG: set data cache size(402653184) + 2021-01-11 08:15:29.674 [unknown] [unknown] localhost 140439454434368 0 0 [CACHE] LOG: set metadata cache size(134217728) + 2021-01-11 08:15:29.741 [unknown] [unknown] localhost 140439454434368 0 0 [BACKEND] LOG: gaussdb: fsync file "/home/omm/a1/gaussdb.state.temp" success + 2021-01-11 08:15:29.741 [unknown] [unknown] localhost 140439454434368 0 0 [BACKEND] LOG: create gaussdb state file success: db state(STARTING_STATE), server mode(Normal) + 2021-01-11 08:15:29.775 [unknown] [unknown] localhost 140439454434368 0 0 [BACKEND] LOG: max_safe_fds = 976, usable_fds = 1000, already_open = 14 + 2021-01-11 08:15:29.775 [unknown] [unknown] localhost 140439454434368 0 0 [BACKEND] LOG: The core dump path in core_pattern is an invalid directory. + 2021-01-11 08:15:29.777 [unknown] [unknown] localhost 140439454434368 0 0 [BACKEND] LOG: Success to start openGauss Database. If you specify "&", please press any key to exit... + + [2021-01-11 08:15:30.517][352][][gs_ctl]: done + [2021-01-11 08:15:30.517][352][][gs_ctl]: server started (/home/omm/a1) + ``` + +- 验证 + + ``` + [omm@0150b32d2461 ~]$ gsql -p6433 + gsql ((openGauss 1.0.1 build e9da9fb9) compiled at 2020-10-01 13:58:32 commit 0 last mr ) + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + + omm=# select * from brm_test; + t + ---------------------------- + 2021-01-11 03:33:40.737837 + 2021-01-11 03:38:46.32794 + 2021-01-11 03:39:42.466014 + 2021-01-11 03:40:02.816579 + 2021-01-11 07:29:21.98839 + 2021-01-11 07:57:36.799356 + (6 rows) + ``` + + +## lsn + +``` +[omm@7ec0d4302ea3 ~]$ gs_probackup validate --instance testdb01 -D /home/omm/a1/ --recovery-target-lsn=0/79000228 +LOG: Validate begin. +INFO: Validating backup QMR39R +INFO: Backup QMR39R data files are valid +LOG: Thread [1]: Opening WAL segment "/var/lib/brm/backups/testdb01/QMR39R/database/pg_xlog/000000010000000000000076" +LOG: Extracting pagemap from tli 1 on range from 0/760001D0 to 0/0 +LOG: Thread [1]: Opening WAL segment "/var/lib/brm/wal/testdb01/000000010000000000000076" +WARNING: Thread [1]: Could not read WAL record at 0/77000000: read xlog page failed at 0/77000028 +ERROR: Thread [1]: WAL segment "/var/lib/brm/wal/testdb01/000000010000000000000076" is absent +WARNING: Recovery can be done up to time 2021-01-11 03:35:03+00, xid 9930 and LSN 0/760001D0 +ERROR: Not enough WAL records to lsn 0/79000228 +[omm@7ec0d4302ea3 ~]$ ls -l /var/lib/brm/wal/testdb01/000000010000000000000076 +-rw------- 1 omm omm 16777216 Jan 11 03:35 /var/lib/brm/wal/testdb01/000000010000000000000076 +[omm@0150b32d2461 ~]$ gs_probackup restore --instance testdb01 --recovery-target-lsn='0/880002A8' --restore-command='cp /var/lib/brm/wal/testdb01/%f %p' --no-validate -D /home/omm/a1/ +LOG: Restore begin. +LOG: there is no file tablespace_map +LOG: check tablespace directories of backup QMRFD9 +# recovery.conf generated by pg_probackup 2.4.2 +LOG: check external directories of backup QMRFD9 +WARNING: Backup QMRFD9 is used without validation. +ERROR: Backup QMRFD9 was created for version 9.2 which doesn't support recovery_target_lsn ---> +``` + diff --git "a/content/zh/post/July/openGauss\345\256\211\350\243\205\344\270\216JDBC\350\277\236\346\216\245\357\274\210\344\274\201\344\270\232\347\211\210\357\274\211.md" "b/content/zh/post/July/openGauss\345\256\211\350\243\205\344\270\216JDBC\350\277\236\346\216\245\357\274\210\344\274\201\344\270\232\347\211\210\357\274\211.md" new file mode 100644 index 0000000000000000000000000000000000000000..1bae0bf9c3387afcc32c17dfe0ee8301bf402700 --- /dev/null +++ "b/content/zh/post/July/openGauss\345\256\211\350\243\205\344\270\216JDBC\350\277\236\346\216\245\357\274\210\344\274\201\344\270\232\347\211\210\357\274\211.md" @@ -0,0 +1,378 @@ ++++ + +title = "openGauss安装与JDBC连接(企业版)" + +date = "2021-12-09" + +tags = [ "openGauss安装与JDBC连接(企业版)"] + +archives = "2021-12" + +author = "awei" + +summary = "openGauss安装与JDBC连接(企业版)" + +img = "/zh/post/July/title/img8.png" + +times = "12:30" + ++++ + +# openGauss安装与JDBC连接(企业版) + +## 一、 openGauss安装(企业版) + +**1. 获取安装包** + +- **1.1. 从openGauss开源社区下载对应平台的安装包。** + + 通过[https://opengauss.org/zh/download.html](https://opengauss.org/zh/download.html) 登录openGauss开源社区,选择 2.0.0版本对应平台企业版安装包(openGauss-2.0.0-CentOS-64bit-all.tar.gz)。 单击“下载”。 + + +- **1.2. 检查安装包。** + + 解压安装包,检查安装目录及文件是否齐全。在安装包所在目录执行以下命令: + + ``` + tar -zxvf openGauss-2.0.0-CentOS-64bit-all.tar.gz ls -1b + ``` + + 执行ls命令,显示类似如下信息: + + ![](figures/zh-cn_image_0000001232608885.png) + + ----结束 + + +**2. 修改操作系统配置** + +- **2.1. 修改文件** + + 如果安装的不是Centos7.6,则要修改/etc/rehat-release文件,将 CentOS Linux release 7.9.2003 \(Core\)修改为CentOS Linux release 7.6 \(Core\) + + ![](figures/zh-cn_image_0000001232808939.png) + +- **2.2. 关闭操作系统防火墙** + + **步骤1 **修改/etc/selinux/config文件中的“SELINUX”值为“disabled”。 使用VIM打开config文件。 vim /etc/selinux/config 修改“SELINUX”的值“disabled”,执行:wq保存并退出修改。 SELINUX=disabled![](figures/zh-cn_image_0000001186929314.png) + + **步骤2** 重新启动操作系统。 + + ``` + reboot + ``` + + **步骤3** 检查防火墙是否关闭。 + + ``` + systemctl status firewalld + ``` + + 若防火墙状态显示为active \(running\),则表示防火墙未关闭,请执行步骤4; 若防火墙状态显示为inactive \(dead\),则无需再关闭防火墙。 + + **步骤4 **关闭防火墙。 + + systemctl disable firewalld.service systemctl stop firewalld.service + + ![](figures/zh-cn_image_0000001232727451.png) + + **步骤5** 在其他主机上重复步骤1到步骤4。 + + ----结束 + +- **2.3. 设置字符集参数** + + 将各数据库节点的字符集设置为相同的字符集,可以在/etc/profile文件中添加"export LANG=XXX"(XXX为Unicode编码)。 vim /etc/profile + + +- **2.4. 设置时区和时间** + + 在各数据库节点上,确保时区和时间一致。 + + **步骤1** 执行如下命令检查各数据库节点时间和时区是否一致。如果不一致,请执行步骤2\~步骤3。 + + date + + **步骤2** 使用如下命令将各数据库节点/usr/share/zoneinfo/目录下的时区文件拷贝为/etc/localtime文件。 + + cp /usr/share/zoneinfo/$地区/$时区/etc/localtime 说明: $地区/$时区为需要设置时区的信息,例如:Asia/Shanghai。 + + **步骤3** 使用date -s命令将各数据库节点的时间设置为统一时间,举例如下。 + + ``` + date -s "Sat Sep 27 16:00:07 CST 2020" + ``` + + ----结束 + +- **2.5. 设置网卡MTU 值** + + 将各数据库节点的网卡MTU值设置为相同大小。 + + **步骤1** 执行如下命令查询服务器的网卡名称 + + ifconfig 如下图所示: + + ![](figures/zh-cn_image_0000001187247856.png) + + **步骤2 **使用如下命令将各数据库节点的网卡MTU值设置为相同大小。 对于X86,MTU值推荐1500;对于ARM,MTU值推荐8192。 + + ifconfig 网卡名称 mtu mtu值 + + ----结束 + + +**3. 安装openGauss** + +- **3.1. 创建XML配置文件** + + 安装openGauss前需要创建cluster\_config.xml文件。cluster\_config.xml文件包含部署 openGauss的服务器信息、安装路径、IP地址以及端口号等。用于告知openGauss如何 部署。用户需根据不同场景配置对应的XML文件。 + + 配置数据库节点名称时,请通过hostname命令获取数据库节点的主机名称。 + + ![](figures/zh-cn_image_0000001187407766.png) + + 单节点配置文件如下: + + ![](figures/zh-cn_image_0000001232487371.png) + +- **3.2.初始化安装环境** +- **3.2.1. 准备安装用户及环境** + + **步骤1** 以root用户登录待安装openGauss的任意主机,并按规划创建存放安装包的目录。 + + ![](figures/zh-cn_image_0000001187089296.png) + + **步骤2** 将安装包“openGauss-2.0.0-CentOS-64bit-all.tar.gz”和配置文件“cluster\_config.xml”都上传至上一步所创建的目录中。 + + **步骤3** 在安装包所在的目录下,解压安装包openGauss-2.0.0-CentOS-64bit-all.tar.gz。安装包解压后,会有OM安装包和Server安装包。继续解压OM安装包,会在/opt/software/openGauss路径下自动生成script子目录,并且在script目录下生成gs\_preinstall等各种OM工具脚本。 + + **步骤4** 进入到工具脚本存放目录下。 cd /opt/software/openGauss/script + + **步骤5** 为确保成功安装,执行命令检查 hostname 与 /etc/hostname 是否一致。 hostname cat /etc/hostname + + ![](figures/zh-cn_image_0000001232608887.png) + + **步骤6** 使用gs\_preinstall需要python3.6的环境,一般自带的是python2.7。 + + 安装CentOS开发工具 【用于允许您从源代码构建和编译软件】 sudo yum -y “groupinstall development” + + 下载epel + + sudo yum install epel-release + + 安装python3 + + sudo yum install python36 + + ![](figures/zh-cn_image_0000001232808941.png) + + 更改默认python + + ![](figures/zh-cn_image_0000001186929316.png) + + **步骤7** 使用gs\_preinstall准备好安装环境 + + 采用交互模式执行前置,并在执行过程中自动创建root用户互信和openGauss用 户互信: ./gs\_preinstall -U omm -G dbgrp -X /opt/software/openGauss/cluster\_config.xml + + ![](figures/zh-cn_image_0000001232727453.png) + + ![](figures/zh-cn_image_0000001187247858.png) + + +- **3.2.2. 建立互信(使用脚本建立互信)** + + **步骤1** 创建一个执行互信脚本所需要的输入文本,并在此文件中添加openGauss中所有主机 IP。 vim hostfile + + **步骤2** 以需要创建互信的用户执行下面脚本建立互信。 ./gs\_sshexkey -f /opt/software/hostfile -W wangjingwei1 + + 运行成功截图如下 + + ![](figures/zh-cn_image_0000001187407768.png) + +- **3.3.执行安装** + + **步骤1 **登录到openGauss的主机,并切换到omm用户。 + + ``` + su - omm + ``` + + **步骤2** 使用gs\_install安装openGauss。 + + gs\_install -X /opt/software/openGauss/cluster\_config.xml 在执行过程中,用户需根据提示输入数据库用户的密码,密码应具有一定的复杂度.![](figures/zh-cn_image_0000001232487373.png)![](figures/zh-cn_image_0000001187089298.png) + + **步骤3** 安装执行成功之后,需要手动删除主机root用户的互信,即删除openGauss数据库各 节点上的互信文件。 rm -rf \~/.ssh + + 安装完成 + +- 4. 安装验证 + + **步骤1** 以omm用户身份登录服务器。 + + **步骤2** 执行如下命令检查数据库状态是否正常,“cluster\_state ”显示“Normal”表示数据 库可正常使用。 gs\_om -t status + + **步骤3** 数据库安装完成后,默认生成名称为postgres的数据库。第一次连接数据库时可以连接到此数据库。其中postgres为需要连接的数据库名称,26000为数据库主节点的端口号,即XML配置.文件中的dataPortBase的值。请根据实际情况替换。 + + gsql -d postgres -p 26000 连接成功后,系统显示类似如下信息表示数据库连接成功。 gsql \(\(openGauss x.x.x build 290d125f\) compiled at 2021-03-08 02:59:43 commit 2143 last mr 131 Non-SSL connection \(SSL connection is recommended when requiring high-security\) Type "help" for help. + + ![](figures/zh-cn_image_0000001232608889.png) + + **步骤4** 建立表,并插入内容进行查询 + + ![](figures/zh-cn_image_0000001232808943.png) + + ![](figures/zh-cn_image_0000001186929318.png) + + +## 二、使用jdbc连接数据库 + +- **1. 确认连接信息** + + **步骤1** 以操作系统用户omm登录数据库主节点。 + + **步骤2** 使用“gs\_om -t status --detail”命令查询openGauss各实例情况。 + + ![](figures/zh-cn_image_0000001232727455.png) + +- **2. 配置服务端远程连接** + + **步骤1** 以操作系统用户omm登录数据库主节点。 + + **步骤2** 配置客户端认证方式 + + 需先本地连接数据库,并在数据库中使用如下语句建立“jack”用户: + + ``` + postgres=# CREATE USER jack PASSWORD 'Test@123'; + ``` + + 允许客户端以“jack”用户连接到本机,此处远程连接禁止使用 + + “omm”用户(即数据库初始化用户)。下面示例中配置允许IP地址为10.27.1.209的客户端访问本机。 + + ![](figures/zh-cn_image_0000001187247860.png) + + **步骤3** 配置listen\_addresses,listen\_addresses即远程客户端连接使用的数据库主节点ip或者主机名。 + + 使用如下命令查看数据库主节点目前的listen\_addresses配置。 + + ``` + gs_guc check -I all -c "listen_addresses" + ``` + + 使用如下命令把要添加的IP追加到listen\_addresses后面,多个配置项之间用英文逗号分隔。例如,追加IP地址10.11.12.13。 + + ``` + gs_guc set -I all -c"listen_addresses='localhost,192.168.0.100,10.11.12.13'" + ``` + + **步骤4** 执行如下命令重启openGauss。 + + ``` + gs_om -t stop && gs_om -t start + ``` + + ![](figures/zh-cn_image_0000001187407770.png) + + **3. JDBC 包、驱动类和环境类** + + 在openGauss官网下载JDBC包,openGauss-2.0.0-JDBC.tar.gz,解压获得驱动jar包postgresql.jar。 + + 在创建数据库连接之前,需要加载数据库驱动类“org.postgresql.Driver”。 + + 终端输入“java -version”,查看JDK版本,确认为JDK1.8版本。 + + ![](figures/zh-cn_image_0000001232487375.png) + + **4. 驱动加载** + + 在代码中创建连接之前任意位置隐含装载:Class.forName\("org.postgresql.Driver"\); + + 在windows下运行代码进行连接时,使用eclipse装载驱动即可。 + + ![](figures/zh-cn_image_0000001187089300.png) + + 在centos系统下连接数据库时,要将postgresql.jar驱动包设置到java的classpath环境变量中。 + + 将postgresql.jar类库文件拷贝到...\\Java\\jdk1.7.0\\jre\\lib\\ext目录下。(这个路径根据JDK的版本和安装路径确定,下同) + + 将postgresql.jar类库文件拷贝到...\\Java\\jre7\\lib\\ext目录下( 最好是,只要是jre文件夹,都复制一个postgresql.jar到jre7\\lib\\ext里去) + + **5. 连接数据库** + + JDBC提供了三个方法,用于创建数据库连接。 + + DriverManager.getConnection\(String url\) + + DriverManager.getConnection\(String url, Properties info\); + + DriverManager.getConnection\(String url, String user, String password\); + + 连接数据库代码如下: + + ![](figures/zh-cn_image_0000001232608893.png) + + ![](figures/zh-cn_image_0000001232808945.png) + + 运行截图 + + ![](figures/zh-cn_image_0000001186929320.png) + + 连接数据库并在表中插入数据 + + 连接时会以某一用户访问某一表,要先在数据库中对该用户进行授权。 + + ![](figures/zh-cn_image_0000001232727457.png) + + 代码如下: + + ![](figures/zh-cn_image_0000001187247862.png) + + 运行截图: + + ![](figures/zh-cn_image_0000001187407772.png) + + 查询此表进行验证: + + ![](figures/zh-cn_image_0000001232487377.png) + + +## 三、 遇到的问题 + +刚开始安装的极简版,发现有一个命令使用不了,后来改安装企业版就没遇到这个问题。 + +centos版本问题:在网上没有找到centos7.6,只找到7.9,运行时会报错,要修改/etc/rehat-release文件,将 CentOS Linux release 7.9.2003 \(Core\)修改为CentOS Linux release 7.6 \(Core\)。 + +安装python3 + +安装CentOS开发工具 【用于允许您从源代码构建和编译软件】 sudo yum -y “groupinstall development” + +下载epel sudo yum install epel-release + +安装python3 sudo yum install python36 + +更改默认python + +![](figures/zh-cn_image_0000001187089302.png) + +连接问题 + +连接windows主机时,远程连接也配置了,互信也建立了,总是报连接错误问题。后来选择连接虚拟机,连接虚拟机要先配jdk,直接使用yum安装比较方便: + +搜索jdk安装包 + +\# yum search java|grep jdk + +下载jdk1.8,下载后默认目录为:/uer/lib/jvm/ + +\# yum install java-1.8.0-openjdk + +验证安装 + +后面要进行驱动加载,不加载的话使用不了下载的文件。要将postgresql.jar驱动包设置到java的classpath环境变量中。 + +将postgresql.jar类库文件拷贝到...\\Java\\jdk1.7.0\\jre\\lib\\ext目录下。(这个路径根据JDK的版本和安装路径确定,下同) + +将postgresql.jar类库文件拷贝到...\\Java\\jre7\\lib\\ext目录下( 最好是,只要是jre文件夹,都复制一个postgresql.jar到jre7\\lib\\ + diff --git "a/content/zh/post/July/openGauss\345\273\272\347\253\213\347\224\250\346\210\267\345\217\212\345\257\274\345\205\245sql\346\226\207\346\234\254.md" "b/content/zh/post/July/openGauss\345\273\272\347\253\213\347\224\250\346\210\267\345\217\212\345\257\274\345\205\245sql\346\226\207\346\234\254.md" new file mode 100644 index 0000000000000000000000000000000000000000..cbb3e32f2086a489fff03d83ffaff0d757d5667f --- /dev/null +++ "b/content/zh/post/July/openGauss\345\273\272\347\253\213\347\224\250\346\210\267\345\217\212\345\257\274\345\205\245sql\346\226\207\346\234\254.md" @@ -0,0 +1,276 @@ ++++ + +title = "openGauss建立用户及导入sql文本" + +date = "2021-08-07" + +tags = [ "openGauss建立用户及导入sql文本"] + +archives = "2021-08" + +author = "Walrus" + +summary = "openGauss建立用户及导入sql文本" + +img = "/zh/post/July/title/img2.png" + +times = "12:30" + ++++ + +# openGauss建立用户及导入sql文本 + +## 建立用户同时赋予用户创建database权限: + +``` +create user deity with createdb identified by 'Deityle---'; +[omm@node1 tmp]$ gsql -p 15400 -d postgres -r +gsql ((openGauss 2.0.1 build d97c0e8a) compiled at 2021-06-02 19:37:17 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. +postgres=# select * from pg_user; + usename | usesysid | usecreatedb | usesuper | usecatupd | userepl | passwd | valbegin | valuntil | respool | parent | spacelimit | useconfig | nodegroup | tempspacelimit | spillspacelimit | usemonitora +dmin | useoperatoradmin | usepolicyadmin +---------+----------+-------------+----------+-----------+---------+----------+----------+----------+--------------+--------+------------+-----------+-----------+----------------+-----------------+------------ +-----+------------------+---------------- + omm | 10 | t | t | t | t | ******** | | | default_pool | 0 | | | | | | t + | t | t +(1 row) + + +postgres=# create user deity with createdb identified by 'Deity----'; +CREATE ROLE +postgres=# select * from pg_user; + usename | usesysid | usecreatedb | usesuper | usecatupd | userepl | passwd | valbegin | valuntil | respool | parent | spacelimit | useconfig | nodegroup | tempspacelimit | spillspacelimit | usemonitora +dmin | useoperatoradmin | usepolicyadmin +---------+----------+-------------+----------+-----------+---------+----------+----------+----------+--------------+--------+------------+-----------+-----------+----------------+-----------------+------------ +-----+------------------+---------------- + omm | 10 | t | t | t | t | ******** | | | default_pool | 0 | | | | | | t + | t | t + deity | 24941 | t | f | f | f | ******** | | | default_pool | 0 | | | | | | f + | f | f +(2 rows) +``` + +## 使用新建用户创建数据库及schema: + +``` +create database chnbs encoding 'UTF8'; +create schema jack; + +create schema salene; + +[omm@node1 tmp]$ gsql -p 15400 -d postgres -U deity -r +Password for user deity: +gsql ((openGauss 2.0.1 build d97c0e8a) compiled at 2021-06-02 19:37:17 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + + +postgres=> \dn+ + List of schemas + Name | Owner | Access privileges | Description +-------+-------+-------------------+------------- + deity | deity | | +(1 row) + + +postgres=> \l + List of databases + Name | Owner | Encoding | Collate | Ctype | Access privileges +-----------+-------+----------+---------+-------+------------------- + mydb | | GBK | C | C | + postgres | | UTF8 | C | C | + template0 | | UTF8 | C | C | =c/omm + + | | | | | omm=CTc/omm + template1 | | UTF8 | C | C | =c/omm + + | | | | | omm=CTc/omm +(4 rows) + + +postgres=> create database chnbs encoding 'UTF8'; +CREATE DATABASE +postgres=> \l + List of databases + Name | Owner | Encoding | Collate | Ctype | Access privileges +-----------+-------+----------+---------+-------+------------------- + chnbs | deity | UTF8 | C | C | + mydb | | GBK | C | C | + postgres | | UTF8 | C | C | + template0 | | UTF8 | C | C | =c/omm + + | | | | | omm=CTc/omm + template1 | | UTF8 | C | C | =c/omm + + | | | | | omm=CTc/omm +(5 rows) + + +postgres=> \c chnbs +Password for user deity: +Non-SSL connection (SSL connection is recommended when requiring high-security) +You are now connected to database "chnbs" as user "deity". +chnbs=> \dn +List of schemas + Name | Owner +------+------- +(0 rows) +chnbs=> create schema jack; +CREATE SCHEMA +chnbs=> \dn +List of schemas + Name | Owner +------+------- + jack | deity +(1 row) + + +chnbs=> \dn+ + List of schemas + Name | Owner | Access privileges | Description +------+-------+-------------------+------------- + jack | deity | | +(1 row) + + +chnbs=> create schema salene; +CREATE SCHEMA +chnbs=> \dn+ + List of schemas + Name | Owner | Access privileges | Description +--------+-------+-------------------+------------- + jack | deity | | + salene | deity | | +(2 rows) +``` + +## gsql下执行SQL文件,导入表及数据 + +set search\_path=jack; \#\#设置当前schema + +\\i home/omm/mydb.sql \#\#gsql下使用\\i 导入sql文本 + +一般导入前需要对sql文本做响应的修改,其他数据库导出的文本,schema、用户名称都需要替换。 + +``` +chnbs=> show search_path; + search_path +---------------- + "$user",public +(1 row) + +chnbs=> set search_path=jack; +SET +chnbs=> show search_path; + search_path +------------- + jack +(1 row) +chnbs=> \i home/omm/mydb.sql +SET +SET +SET +SET +SET +SET +SET +CREATE PROCEDURE +ALTER FUNCTION +SET +SET +CREATE TABLE +ALTER TABLE +...... +...... +...... + + +COMMENT +COMMENT + setval +-------- + 1 +(1 row) + + + + +ALTER TABLE +ALTER TABLE +ALTER TABLE +ALTER TABLE +ALTER TABLE +CREATE INDEX +CREATE INDEX +CREATE INDEX +REVOKE +REVOKE +GRANT +GRANT + + +chnbs=> \dn +List of schemas + Name | Owner +--------+------- + jack | deity + salene | deity +(2 rows) + + +chnbs=> \d + List of relations + Schema | Name | Type | Owner | Storage +--------+--------------------------------+----------+-------+---------------------------------- + jack | bank_balance_loan | table | deity | {orientation=row,compression=no} + jack | bi_authentication | table | deity | {orientation=row,compression=no} + jack | bi_bank_product | table | deity | {orientation=row,compression=no} + jack | bi_bank_rate | table | deity | {orientation=row,compression=no} + jack | bi_compatible | table | deity | {orientation=row,compression=no} + jack | bi_credit_feedback | table | deity | {orientation=row,compression=no} + jack | bi_customer | table | deity | {orientation=row,compression=no} + jack | bi_disburse_detail | table | deity | {orientation=row,compression=no} + jack | bi_finance_transaction | table | deity | {orientation=row,compression=no} + jack | bi_flow | table | deity | {orientation=row,compression=no} + jack | bi_flow_define | table | deity | {orientation=row,compression=no} + jack | bi_flow_node | table | deity | {orientation=row,compression=no} + jack | bi_monitor_history_record | table | deity | {orientation=row,compression=no} + jack | bi_monitor_record | table | deity | {orientation=row,compression=no} + jack | bi_mutual_excls | table | deity | {orientation=row,compression=no} + jack | bi_order | table | deity | {orientation=row,compression=no} + jack | bi_order_audit | table | deity | {orientation=row,compression=no} + jack | bi_order_collateral | table | deity | {orientation=row,compression=no} + jack | bi_order_collateral_owner | table | deity | {orientation=row,compression=no} + jack | bi_order_push | table | deity | {orientation=row,compression=no} + jack | bi_order_veritify | table | deity | {orientation=row,compression=no} + jack | bi_orglist | table | deity | {orientation=row,compression=no} + jack | bi_pay_repay | table | deity | {orientation=row,compression=no} + jack | bi_pre_credit | table | deity | {orientation=row,compression=no} + jack | bi_reason_rule | table | deity | {orientation=row,compression=no} + jack | bi_reconl_record | table | deity | {orientation=row,compression=no} + jack | bi_schedule_job | table | deity | {orientation=row,compression=no} + jack | bi_schedule_record | table | deity | {orientation=row,compression=no} + jack | bi_tax_organization | table | deity | {orientation=row,compression=no} + jack | bi_transaction_flow | table | deity | {orientation=row,compression=no} + jack | bi_transaction_node_detail | table | deity | {orientation=row,compression=no} + jack | bi_warm_result | table | deity | {orientation=row,compression=no} + jack | bi_whitelist | table | deity | {orientation=row,compression=no} + jack | credit_parse_record | table | deity | {orientation=row,compression=no} + jack | data_anti_fraud | table | deity | {orientation=row,compression=no} + jack | data_blacklist | table | deity | {orientation=row,compression=no} + jack | data_msg_record | table | deity | {orientation=row,compression=no} + jack | data_nosuitable_loan | table | deity | {orientation=row,compression=no} + jack | data_record | table | deity | {orientation=row,compression=no} + jack | data_rule_decisions | table | deity | {orientation=row,compression=no} + jack | data_sample_credit | table | deity | {orientation=row,compression=no} + jack | dm_hydl | table | deity | {orientation=row,compression=no} + jack | dm_hyml | table | deity | {orientation=row,compression=no} + jack | dm_hymx | table | deity | {orientation=row,compression=no} + jack | dm_hyzl | table | deity | {orientation=row,compression=no} + jack | dm_xzqh_wen | table | deity | {orientation=row,compression=no} + jack | er_basic | table | deity | {orientation=row,compression=no} +chnbs=> select count(*) from bi_order; + count +------- + 407 +(1 row) +``` + diff --git "a/content/zh/post/July/openGauss\346\225\260\346\215\256\344\270\216PostgreSQL\347\232\204\345\267\256\345\274\202\345\257\271\346\257\224.md" "b/content/zh/post/July/openGauss\346\225\260\346\215\256\344\270\216PostgreSQL\347\232\204\345\267\256\345\274\202\345\257\271\346\257\224.md" new file mode 100644 index 0000000000000000000000000000000000000000..cc44fc0e711a19f76d7a39ab916383d3f01b686a --- /dev/null +++ "b/content/zh/post/July/openGauss\346\225\260\346\215\256\344\270\216PostgreSQL\347\232\204\345\267\256\345\274\202\345\257\271\346\257\224.md" @@ -0,0 +1,121 @@ ++++ + +title = "openGauss数据与PostgreSQL的差异对比" + +date = "2021-08-21" + +tags = [ "openGauss数据与PostgreSQL的差异对比"] + +archives = "2021-08" + +author = "Walrus" + +summary = "openGauss数据与PostgreSQL的差异对比" + +img = "/zh/post/July/title/img3.png" + +times = "12:30" + ++++ + +# **openGauss数据与PostgreSQL的差异对比** + +## 1. 前言 + +openGauss数据库已经发布2.0.1版本了,中启乘数科技是一家专业的专注于极致性能的数据库服务提供商,所以也关注openGauss数据库的特性。因为openGauss是从PostgreSQL发展出来的,所以我们详细讲解对比一下openGauss与原生PostgreSQL数据库的对比。 + +## 2. openGauss大功能方面的变化 + +openGauss是基于PostgreSQL9.2版本开发的,基本包括了PostgreSQL9.4的功能。目前PostgreSQL正式版本已经到13了, 14的beta版本也发布了。openGauss只把PostgreSQL9.4之后的新版本的极少数功能移植进来了,绝大多数功能都没有纳入。 + +openGauss最大的变化就是把PostgreSQL的进程模式改成了线程模式,当然这两个模式其实各有优缺点。线程模式对短连接有优势,比进程模式的数据库可以承担更大的并发短请求,但线程模式也有明显的缺点,所有的线程共享内存,如果一个线程的的野指针把别人的内存改了,不会报错,一时半会可能还发现不了,极端情况下会导致数据损坏而不被发现。所以说这个改变不能说有什么明显的好处,某些情况下可能还是一个退步。为了改成线程模式,openGauss的把C语言的源代码改成了C++。C++的好处是容易封装,坏处是移植性降低了。 + +当然openGauss增加了线程池的功能,目前还不清楚这个功能是否稳定可靠。如果稳定可靠可以不使用第三方的连接池工具了。 + +openGauss另一个变化是把事务ID\(XID\)从32bit改成了64bit,64bit的xid的好处是永远不可能耗尽,好处是我们永远不用担心会发生xid回卷宕机的风险。注意,虽然xid改为了64bit,但是过期的事务ID依旧需要清理。实际上PostgreSQL数据库默认达到2亿事务就强制整理,而32bit的xid可以达到20亿,所以我们实际上可以修改autovacuum\_freeze\_max\_age为10亿来推迟对xid的整理。 + +我们知道磁盘扇区大小是512字节,一些SSD可以是4k大小,而数据库一般是8k/16k/32k,一个数据库数据块刷到操作系统的过程中可能发生宕机造成这样有块断裂问题,即块中一半是新数据,另一半还是旧数据,这就是块的逻辑损坏,这可能导致数据库无法启动。 + +MySQL通过双写double write来解决这个问题,PostgreSQL是通过full\_page\_write来解决这个问题,就是在数据页第一次发生变更的时候将整个页面记录到xlog日志中,这样出了问题就有了完整的数据页加xlog日志进行恢复,这样做的缺点是大大增加了xlog的日志量,也对性能有一定影响。当然我们可以通过延长checkpoint的间隔时间来缓解这个问题。而openGauss实现了类似MySQL的双写,写数据块的同时将脏页也写到一个共享的双写空间里,如果发生问题会从双写空间里找到完整的数据页进行恢复。双写特性参数enable\_double\_write需要配合增量检查点一起使用。openGauss这个功能有一定的实际价值。 + +openGauss主备库的模式与PostgreSQL有比较大的不同,PostgreSQL的备库模式是拉的模式,即备库主动到主库上拉WAL日志,而openGauss改成了推的模式,推的模式是主库主动把WAL模式推到备库。而实际上改成这样,导致搭建备库更不方便了,因为搭建备库时必须到主库上修改参数replconninfo1或replconninfo2,即replconninfoN, N=1\~8,而可以配置的参数只有8个,所以感觉openGauss后面最多只能挂8个备库。当年从Oracle转到PostgreSQL上时,还比较庆幸不用动主库了,一用openGauss感觉又回到了解放前。 + +openGauss内置的了主备库切换功能,让使用者用起来更方便。但这个功能是和数据库本身紧耦合的,同时不太稳定。笔者在测试中,备库就报从主库中断开了,报大量的日志把空间给撑爆了: + +``` +2021-06-24 08:38:43.824 [unknown] [unknown] localhost 47427058550848 0 0 [BACKEND] LOG: configuration file "/opt/software/openGauss +/data/slave/postgresql.conf" contains errors; unaffected changes were applied +2021-06-24 08:38:43.832 [unknown] [unknown] localhost 47428485064448 0 0 [BACKEND] LOG: Connect failed. +2021-06-24 08:38:43.833 [unknown] [unknown] localhost 47428485064448 0 0 [BACKEND] LOG: Connect failed. +2021-06-24 08:38:43.833 [unknown] [unknown] localhost 47428485064448 0 0 [BACKEND] LOG: Connect failed. +2021-06-24 08:38:43.833 [unknown] [unknown] localhost 47428485064448 0 0 [BACKEND] LOG: Connect failed. +2021-06-24 08:38:43.833 [unknown] [unknown] localhost 47428485064448 0 0 [BACKEND] LOG: Connect failed. +2021-06-24 08:38:43.833 [unknown] [unknown] localhost 47428485064448 0 0 [BACKEND] LOG: Connect failed. +``` + +从上面的日志可以看出,打印日志时,没有任何间隔,不断的打印,很快就会把空间给撑满。这是一个很糟糕的设计,在生产系统中这也是一个很危险的情况,虽然有空间告警,但有可能还没有等工程师来处理,空间就给撑满了。 + +openGauss摒除recovery.conf文件。当然PostgreSQL12的版本也是摒除recovery.conf文件。openGauss是启动数据库是指定是备库还是主库: + +``` +gs_ctl start -D $GAUSSHOME/data/master -M standby +``` + +这个改变实际上是一个非常糟糕的改变,如果DBA忘加了“-M standby”,这个备库就废掉了,需要重新搭建。而原生PostgreSQL是建立了一个文件来指示这个数据库是主库还是备库,不会有这种误操作的风险。好在openGauss提供了gs\_ctl build命令重新搭建备库,部分缓解了这个问题。 + +openGauss有一个最大可用模式most\_available\_sync,openGauss认为原生PostgreSQL的流复制有一个痛点就是在一主一备的同步模式下,如果备库宕机,主库会hang,同步模式不会自动降级。所以openGauss设计了最大可用模式,即开启该参数后在主从连接正常的情况下处于同步模式,如果备机断连会立刻切为异步模式,如果备机再次启动会自动连接并切为同步模式。但实际上这种设计是一种奇怪的设计,如果出现问题立即降级,那么与异步模式有什么区别?同步模式本身就是要保证故障切换后不丢失数据,当故障时主库立即降级了,这时再切换了,直接就丢失数据了。如果允许丢数据,直接使用异步复制就可以了,如果需要不丢数据,使用同步模式,如果一个备库坏了主库也不hang,那么就做两个备库的同步模式,这个most\_available\_sync模式感觉不太实用。 + +openGauss支持了列存表,列存表支持压缩。列存表使用中需要注意膨胀的一些问题,如果了解不深,建议不要使用。 + +openGauss在每个库下面会默认有个叫dbe\_perf的schema,这个schema下有有几百个性能视图,这些视图大部分pg里面都有,但是放在单独的schema中方便查看和管理,这个设计还不错。 + +openGauss中实现了xlog预分配,在xlog未写满时就分配下面一个或者几个xlog。网上有人说PostgreSQL不能预分配WAL,这是错误的认识,实际上PostgreSQL是可以把原先使用的WAL日志改名成预分配的WAL日志的,参数min\_wal\_size就是指定了需要预先预留的WAL文件数,这个参数默认是80MB,这个值对于一些需要灌大量数据的数据库来说,有点小了,可以把此值改大。 + +openGauss实现了增量checkpoint,官方称让数据库更平滑。 + +openGauss实现了并行恢复,默认是关闭的。 + +由于openGauss的物理备库也会建复制槽,为了防止备库把主库的空间撑爆,openGauss又增加了两个参数:enable\_xlog\_prune和 max\_size\_for\_xlog\_prune,允许删除掉过多的WAL日志防止把主库撑爆: + +``` +postgres=# show max_size_for_xlog_prune; + max_size_for_xlog_prune +------------------------- + 2147483647kB +(1 row) +``` + +但默认max\_size\_for\_xlog\_prune设置的比较大,起不到保护作用。 + +openGauss支持与oracle使用方法基本相同的定时任务dbms\_job。 + +openGauss有初步的逻辑解码功能,但不如PostgreSQL完善。没有完整的PostgreSQL的逻辑复制功能。 + +openGauss的索引支持比新版本的PostgreSQL弱一些,如不支持brin索引,PostgreSQL新版本对Btree索引有比较大的优化,这一块openGauss也有一些缺失,也没有布隆过滤器的功能。 + +## 3. openGauss一些硬伤 + +首先是不支持并行。这也很好理解,PostgreSQL是从9.6开始支持并行了,而openGauss是基于PostgreSQL9.4的。目前PostgreSQL有强大的并行功能。目前不清楚openGauss什么时候可以支持并行。 + +编译过于复杂,依赖过多:编译需要很多依赖,而且版本固定,造成跨平台编译的难度非常大,同时改成C++,通用性差,你可能发现编译华为的第三方编译工具比编译数据库还麻烦。当然编译数据库方便是因为数据库是从PostgreSQL中继承过来的。 + +openGauss把原生的psql命令改名为gsql,gsql需要加参数“-r”才能支持上下翻命令和自动补全。原先使用oracle时,oracle的sqlplus就不支持这些功能被一堆人吐槽,后来用rlwrap勉强好一些了。当转到PostgreSQL后,psql的命令自动补全功能让DBA幸福满满的。当初学者不知道“-r”参数时,一用openGauss又回到了Oracle的sqlplus时代。 + +openGauss目前对插件的支持不好,原生的PostgreSQL可以使用很多的插件,也吸引了很多开发者开发插件。而openGauss的“CREATE EXTENSION”还处于内部支持的阶段。目前可以勉强支持PostGIS。当然openGauss把一些常用的插件内置在数据库内部了,缓解了此问题。 + +openGauss不支持表继承,同时把原生PostgreSQL中的一些非常有用的工具都给去掉了,如pg\_waldump(或pg\_xlogdump)、pg\_receivewal。 + +openGauss相对于PostgreSQL数据库来说臃肿一些,在openGauss2.0版本之前内存至少要8GB,小了根本启动不了,2.0版本之后这一块有比较大的改进,小内存也可以启动了。原生PostgreSQL主程序小于10MB,而openGauss则为100MB: + +``` +[root@pg01 ~]# ls -l /usr/pgsql-12/bin/postgres +-rwxr-xr-x 1 root root 7731856 Aug 12 2020 /usr/pgsql-12/bin/postgres +[root@pg01 ~]# +[gauss@pgtrain bin]$ ls -l gaussdb +-rwxr-xr-x 1 gauss gauss 102432784 Jun 2 19:45 gaussdb +``` + +openGauss比较大的问题是很多地方对PostgreSQL做了改动,感觉有些为了改动而改动,导致与很多PostgreSQL生态的软件不能兼容,这对于使用者是一个很大的问题。 + +当然最大的硬伤是文档不足。openGauss对PostgreSQL做了很多的一些改变,却没有提供文档或提供的文档不全,openGauss的官方文档基本是一个残次品,如官方文档中居然没有搭建备库的说明,安装手册中提供的卸载方法是用gs\_uninstall命令,但极简版根本没有gs\_uninstall命令,实际上极简版很多命令都没有,文档中对此无任何提示,这一点很让人无语。所以openGauss的文档比较原生的PostgreSQL基本是一个天上一个地上,比一些其它的著名开源软件如VUE、element-ui的文档也根本没法比,与tidb的文档相比也是差的非常远。希望openGauss社区重视文档,让文档的质量上一个台阶。 + diff --git "a/content/zh/post/July/openGauss\346\225\260\346\215\256\345\272\223SQL\346\250\241\345\235\227\346\272\220\347\240\201\345\210\206\346\236\220.md" "b/content/zh/post/July/openGauss\346\225\260\346\215\256\345\272\223SQL\346\250\241\345\235\227\346\272\220\347\240\201\345\210\206\346\236\220.md" new file mode 100644 index 0000000000000000000000000000000000000000..d7989f5b96da586b2640106e95bd6b7b8e1aee8c --- /dev/null +++ "b/content/zh/post/July/openGauss\346\225\260\346\215\256\345\272\223SQL\346\250\241\345\235\227\346\272\220\347\240\201\345\210\206\346\236\220.md" @@ -0,0 +1,237 @@ ++++ + +title = "openGauss数据库SQL模块源码分析" + +date = "2021-12-09" + +tags = [ "openGauss数据库SQL模块源码分析"] + +archives = "2021-12" + +author = "…" + +summary = "openGauss数据库SQL模块源码分析" + +img = "/zh/post/July/title/img9.png" + +times = "12:30" + ++++ + +# openGauss数据库SQL模块源码分析 + +## 一、词法分析: + +文件位置: + +src/common/backend/parser/scan.l 定义词法结构,采用Lex编译后生成scan.cpp文件 + +原理:根据SQL语言标准对SQL语言中的关键字、标识符、操作符、常量、终结符进行了定义和识别。并且能够进行更精确的检查和操作。词法分析将一个SQL划分成多个不同的token,每个token会有自己的词性 + +代码如下: + +- **1. 定义的形式如下:** + + ![](figures/zh-cn_image_0000001232729629.jpg) + +- **2. 检查的形式如下:** + + ![](figures/zh-cn_image_0000001186931494.jpg) + + 可以看到 当遇到identifier类型的时候,会进行更进一步的检查和操作。首先调用函数确定它是否是从关键字表中查找关键字,如果是则返回关键字的类型。否则调用函数将大写转换成小写。 + + 用到的函数有: + + 1、char\* downcase\_truncate\_identifier\(const char\* ident, int len, bool warn\) + + 将字符都转化成小写,利用大写字母和小写字母之间的差值 + + ![](figures/zh-cn_image_0000001232611073.jpg) + + 2、bool scanner\_isspace\(char ch\) + + 如果找到的是空格,则返回true + + ![](figures/zh-cn_image_0000001232489561.jpg) + + 3、void truncate\_identifier\(char\* ident, int len, bool warn\) + + 截断标识符 + + ![](figures/zh-cn_image_0000001187091472.jpg) + + +## 二 、语法分析 + +- 文件位置: + + src/common/backend/parser/scan.l 定义语法结构,采用Yacc编译后生成gram.cpp文件 + + 原理:根据SQL语言的不同定义了一系列表达Statement的结构体(这些结构体通常以Stmt作为命名后缀),用来保存语法分析结果。 + + +- 结构体如下: + + ![](figures/zh-cn_image_0000001232811117.jpg) + + 结构体中的每一项都对应一个子结构,程序根据不同的情况对其赋值: + + 情况有: + + ![](figures/zh-cn_image_0000001187250036.jpg) + + ![](figures/zh-cn_image_0000001187409942.jpg) + + 这些形式会进一步的递归处理,最终转换为基本的simple\_select形式。代码如下:simple\_select语法分析结构可以看出,一条简单的查询语句由以下子句组成:去除行重复的distinctClause、目标属性targetList、SELECT INTO子句intoClause、FROM子句fromClause、WHERE子句whereClause、GROUP BY子句groupClause、HAVING子句havingClause、窗口子句windowClause和plan\_hint子句。在成功匹配simple\_select语法结构后,将会创建一个Statement结构体,将各个子句进行相应的赋值。 + + simple\_select的其他子句,如distinctClause、groupClause、havingClause等,语法分析方式类似。而其他SQL命令,如CREATE、INSERT、UPDATE、DELETE等,处理方式与SELECT命令类似 + +- 使用的函数: + + ![](figures/zh-cn_image_0000001232729631.jpg) + + +逻辑:创建SelectStmt结构体后,向结构体中填充参数。语法分析树 + +它产生的函数在在文件src/common/backend/parser/parser.cpp文件中的row\_parser中被调用: + +![](figures/zh-cn_image_0000001186931496.jpg) + +最后返回,用于后面的语义分析、查询重写等步骤,该List中的每个ListCell包含一个语法树。 + +![](figures/zh-cn_image_0000001232611075.jpg) + +## 三、语义分析 + +- 文件位置 + +主入口文件src/common/backend/parser/analyze.cpp,入口函数是parse\_analyze + +- 原理:语义分析模块在词法分析和语法分析之后执行,用于检查SQL命令是否符合语义规定,能否正确执行。负责语义分析的是parse\_analyze函数,位于analyze.cpp下。parse\_analyze会根据词法分析和语法分析得到的语法树,生成一个ParseState结构体用于记录语义分析的状态,再调用transformStmt函数,根据不同的命令类型进行相应的处理,最后生成查询树。 +- ParseState保存了许多语义分析的中间信息,如原始SQL命令、范围表、连接表达式、原始WINDOW子句、FOR UPDATE/FOR SHARE子句等。该结构体在语义分析入口函数parse\_analyze下被初始化,在transformStmt函数下根据不同的Stmt存储不同的中间信息,完成语义分析后再被释放。ParseState结构如下。 + +![](figures/zh-cn_image_0000001232489563.jpg) + +在语义分析过程中,语法树parseTree使用Node节点进行包装。Node结构只有一个类型为NodeTag枚举变量的字段,用于识别不同的处理情况。比如SelectStmt 对应的NodeTag值为T\_SelectStmt。Node结构如下。 + +![](figures/zh-cn_image_0000001187091474.jpg) + +transformStmt函数会根据NodeTag的值,将语法树转化为不同的Stmt结构体,调用对应的语义分析函数进行处理。 + +![](figures/zh-cn_image_0000001232811119.jpg) + +openGauss在语义分析阶段处理的NodeTag情况有九种 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

T_InsertStmt

+

transformInsertStmt

+

T_DeleteStmt

+

transformDeleteStmt

+

T_UpdateStmt

+

transformUpdateStmt

+

T_MergeStmt

+

transformMergeStmt

+

T_SelectStmt

+

transformSelectStmt

+

T_DeclareCursorStmt

+

transformDeclareCursorStmt

+

T_ExplainStmt

+

transformExplainStmt

+

T_CreateTableAsStmt

+

transformCreateTableAsStmt

+

T_CreateModelStmt

+

transformCreateModelStmt

+
+ +transformSelectStmt: + +![](figures/zh-cn_image_0000001187250038.jpg)调用关系 + +![](figures/zh-cn_image_0000001187409944.jpg) + +![](figures/zh-cn_image_0000001232729635.jpg) + +处理对应句子的流程。 + +以处理基本SELECT命令的transformSelectStmt函数为例,其处理流程如下。 + +(1) 创建一个新的Query节点,设置commandType为CMD\_SELECT。 + +(2) 检查SelectStmt是否存在WITH子句,存在则调用transformWithClause处理。 + +(3) 调用transformFromClause函数处理FROM子句。 + +(4) 调用transformTargetList函数处理目标属性。 + +(5) 若存在操作符“+”则调用transformOperatorPlus转为外连接。 + +(6) 调用transformWhereClause函数处理WHERE子句和HAVING子句。 + +(7) 调用transformSortClause函数处理ORDER BY子句。 + +(8) 调用transformGroupClause函数处理GROUP BY子句。 + +(9) 调用transformDistinctClause函数或者transformDistinctOnClause函数处理DISTINCT 子句。 + +(10)调用transformLimitClause函数处理LIMIT和OFFSET子句。 + +(11)调用transformWindowDefinitions函数处理WINDOWS子句。 + +(12)调用resolveTargetListUnknowns函数将其他未知类型作为text处理。 + +(13)调用transformLockingClause函数处理FOR UPDATE子句。 + +(14)处理其他情况,如insert语句、foreign table等。 + +(15)返回查询树。 + +## 四、总体的入口函数: + +![](figures/zh-cn_image_0000001186931498.jpg) + +l 位置:\\src\\gausskernel\\process\\tcop\\postgres.cpp + +1、调用 pg\_parse\_query 函数,参数 用户输入的命令,生成 parsetree\_list + +![](figures/zh-cn_image_0000001232611077.jpg) + +2、再调用 pg\_analyze\_and\_rewrite 函数,参数 语法树链表,返回 查询树链表。进行语义分析。 + +![](figures/zh-cn_image_0000001232489565.jpg) + +3、pg\_analyze\_and\_rewrite 函数调用parse\_analyze 函数进行语义分析。 + +![](figures/zh-cn_image_0000001187091476.jpg) + +调用流程图 + +![](figures/zh-cn_image_0000001232811121.png) + diff --git "a/content/zh/post/July/openGauss\346\225\260\346\215\256\345\272\223log_hostname\345\217\202\346\225\260\345\210\206\346\236\220.md" "b/content/zh/post/July/openGauss\346\225\260\346\215\256\345\272\223log_hostname\345\217\202\346\225\260\345\210\206\346\236\220.md" new file mode 100644 index 0000000000000000000000000000000000000000..0dd67ee20dc25ca174a5603c9bcc4449a9ff4138 --- /dev/null +++ "b/content/zh/post/July/openGauss\346\225\260\346\215\256\345\272\223log_hostname\345\217\202\346\225\260\345\210\206\346\236\220.md" @@ -0,0 +1,459 @@ ++++ + +title = "openGauss数据库log_hostname参数分析" + +date = "2021-07-14" + +tags = [ "openGauss数据库log_hostname参数分析"] + +archives = "2021-07" + +author = "民生运维人" + +summary = "openGauss数据库log_hostname参数分析" + +img = "/zh/post/July/title/img3.png" + +times = "12:30" + ++++ + +# openGauss数据库log_hostname参数分析 + +本文主要借助技术文档、源代码、实验三个方面的内容,分析openGauss数据库log\_hostname参数的作用及影响范围。 + +## 技术文档 + +以下是官方文档关于log\_hostname参数的几处描述: + +1. 开发者指南-\>GUC参数说明-\>错误报告和日志-\>记录日志的内容: + + 参数说明:选项关闭状态下,连接消息日志只显示正在连接主机的IP地址。打开此选项同时可以记录主机名。由于解析主机名可能需要一定的时间,可能影响数据库的性能。 + + 该参数属于SIGHUP类型参数 + + 取值范围:布尔型 + + 默认值:off + +2. 工具参考-\>客户端工具-\>gsql-\>常见问题处理-\>连接性能问题 + + 开启了log\_hostname,但是配置了错误的DNS导致的连接性能问题。 + + 连接上数据库,通过“show log\_hostname”语句,检查数据库中是否开启了log\_hostname参数。 + + 如果开启了相关参数,那么数据库内核会通过DNS反查客户端所在机器的主机名。这时如果数据库主节点配置了不正确的/不可达的DNS服务器,那么会导致数据库建立连接过程较慢。 + +3. 开发者指南-\>应用程序开发教程-\>调试 + + log\_hostname 配置是否记录主机名 缺省时,连接日志只记录所连接主机的IP地址。打开这个选项会同时记录主机名。该参数同时影响 查看审计结果、GS\_WLM\_SESSION\_HISTORY、PG\_STAT\_ACTIVITY和log\_line\_prefix参数。 + + log\_connections/log\_disconnections 配置是否在每次会话连接或结束时向服务器日志里打印一条信息。 + +4. log\_line\_prefix参数 + + 参数说明:控制每条日志信息的前缀格式。日志前缀类似于printf风格的字符串,在日志的每行开头输出。用以%为开头的“转义字符”代替表1中的状态信息。 + + 默认值:%m %c %d %p %a %x %n %e表示在日志开头附加会话开始时间戳,会话ID,数据库名,线程ID,应用程序名,事务ID,报错节点,SQLSTATE错误码。 + + +## 源代码 + +梳理源码之后认为,负责与用户交互的postmaster程序在数据库连接建立的时候进行用户认证、创建工作线程等工作,相关信息保存为当前会话/线程的全局变量(knl\_session\_context\* u\_sess;),包括按照log\_hostname设置根据IP反向解析主机名。在连接/会话持续期间,应该不会重新获取连接信息。统计信息、WLM等模块应该也是从这里获取相应信息。 + +涉及log\_hostname参数的主要函数调用关系如下(另外还有一个线程池模式下的函数调用链条,并没有更多有用信息): + +![](figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_55c3c80c-e440-11eb-94c3-38f9d3cd240d.png) + +分别看一下各个函数: + +1. intGaussDbThreadMain\(knl\_thread\_arg\* arg\) (位于postmaster.cpp) + + 用户响应主程序 + +2. static void BackendInitialize\(Port\* port\) + + 函数注释: + + ``` + /* + *BackendInitialize -- initialize an interactive (postmaster-child) + + * backendprocess, and collect the client's startup packet. + …… + */ + ``` + + 先调用PreClientAuthorize函数进行用户认证,再调用ClientConnInitilize函数。 + + 后边实验中openGauss的日志也确实打印了用户认证信息: + + \[winchanged.qin.com\] 2021-07-06 15:23:49.91160e40505.1068 postgres 140518193252096 dn\_6001 0 dn\_6001 00000 0 \[BACKEND\] LOG: connection authorized:user=jack database=postgres + +3. intClientConnInitilize\(Port\* port\) + + 函数中提到了DNS交互:DNS interactions。 + + 代码片段: + + ``` + /* Save session start time. */ + port->SessionStartTime =GetCurrentTimestamp(); + RemoteHostInitilize(port); + /* + * Ready to begin client interaction. We will give up and exit(1) after a + * time delay, so that a broken clientcan't hog a connection + * indefinitely. PreAuthDelayand any DNS interactions above don't count + * against thetime limit. + */ + ``` + +4. static void RemoteHostInitilize\(Port\* port\) + + 函数作用:根据IP地址获取主机名,保存在当前会话中。 + + 代码片段: + + ``` + /* + * Getthe remote host name and port for logging and status display. + */ + remote_host[0] = '\0'; + remote_port[0] = '\0'; + if(pg_getnameinfo_all(&port->raddr.addr, + if(u_sess->attr.attr_storage.Log_connections) { + if (remote_port[0]) + ereport(LOG,(errmsg("connection received: host=%s port=%s", remote_host,remote_port))); + else + ereport(LOG,(errmsg("connection received: host=%s", remote_host))); + ``` + +5. int pg\_getnameinfo\_all\(\) +6. int getnameinfo\(\) + + 函数注释明确说明了反向解析IP地址的功能:Convert anipv4 address to a hostname。 + + 函数注释: + + ``` + /* + * Convert an ipv4 address to a hostname. + * + * Bugs: - Only supports NI_NUMERICHOST andNI_NUMERICSERV + * It will never resolv a hostname. + * - No IPv6 support. + */ + ``` + + 代码片段: + + ``` + if (inet_net_ntop(AF_INET, + &((struct sockaddr_in*)sa)->sin_addr, + sa->sa_family == AF_INET ? 32 : 128, + node, + nodelen) == NULL) { + return EAI_MEMORY; + } + ``` + + 下边的参数初始化函数也提示:开启log\_hostname可能会对性能产生不可忽视的影响。 + + ``` + src\common\backend\utils\misc\guc.cpp: + static void InitConfigureNamesBool() + {{"log_hostname", + PGC_SIGHUP, + LOGGING_WHAT, + gettext_noop("Logs the host name in the connection logs."), + gettext_noop("By default, connection logs only show the IP address" + "of the connecting host. If you want them to show the host name you" + "can turn this on, but depending on your host nameresolution " + "setup it might impose anon-negligible performance penalty.")}, + &u_sess->attr.attr_common.log_hostname, + ``` + + +## 实验 + +在不同场景下通过JDBC客户端程序连接数据库,结合数据库连接耗时(单位为毫秒)、openGauss日志、DNS日志进行分析。 + +“不同场景”主要指openGauss的log\_hostname参数、openGauss服务器的DNS设置、DNS服务设置的不同情况。 + +openGauss服务器的DNS设置: + +- 不配置DNS +- 配置为正确的DNS +- 配置为错误的DNS:DNS服务器不存在、未开机 + +DNS服务器设置: + +- unbound服务正常 +- unboun服务停止 +- 关机 + +实验环境: + + + + + + + + + + + + + + + + + + + + + + + + +

类别

+

系统环境

+

地址

+

说明

+

openGauss服务器

+

CentOS7.6虚拟机

+

192.168.80.201

+
  

DNS服务器

+

CentOS7.6虚拟机

+

Unbound1.6.6(DNS)

+

192.168.80.111

+

DNS服务器与openGauss位于同一网段;只有一级DNS;

+

客户端测试程序

+

Windows+eclipse

+

openGauss JDBC2.0.0

+

192.168.80.1

+
  
+ +客户端程序代码(官方示例代码): + +``` +public static void main(String[] args) { + long startTime=System.currentTimeMillis(); + //创建数据库连接,连接地址:"jdbc:postgresql://192.168.80.201:26000/postgres"。 + Connection conn = GetConnection("jack", "gauss@123"); + long endTime=System.currentTimeMillis(); + long time=endTime-startTime; + System.out.println("连接耗时:"+ time); + //批插数据。 + BatchInsertData(conn); + BatchInsertData(conn); + //关闭数据库连接。 + try { + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +``` + +为方便查看日志,设置openGauss数据库的GUC参数: + +- log\_line\_prefix:\[%h\] %m %c %d %p %a %x %n %e + + 日志行首信息,\[%h\]表示开启log\_hostname的情况下,如果获取到了主机名则显示为主机名,否则显示为IP + +- log\_connections:on + + 客户端连接时打印连接信息 + + +- log\_disconnections:on + + 客户端断开连接时打印会话持续时间 + + +测试结果如下表: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

序号

+

log_hostname

+

openGauss服务器

+

DNS服务器

+

10次连接耗时:

+

最短、最长(毫秒)

+

说明

+

1

+

off

+

不配置DNS

+

——

+

483-559

+

日志打印IP

+

2

+

on

+

配置DNS

+

服务正常

+

475-555

+
  

3

+

on

+

不配置DNS

+

——

+

500左右

+
  

4

+

on

+

配置DNS

+

服务正常,但是不包含客户端IP与主机的映射关系

+

500左右

+

日志打印IP,而不是主机名

+

5

+

on

+

配置DNS

+

服务关闭

+

476-525

+

日志打印IP,而不是主机名

+

6

+

on

+

错误的DNS服务器:19.19.19.19

+

——

+

10497-10566

+

关闭log_hostname之后新连接恢复正常

+

7

+

on

+

错误的DNS服务器:192.168.80.222(不存在,或者未开机)

+

——

+

首次:10513

+

后续九次:6534-6844

+

关闭log_hostname之后新连接恢复正常

+

8

+

on

+

配置DNS

+

客户端连接期间,停止DNS服务、修改IP和主机名映射、DNS指向不存在的IP

+

489-534

+

客户端连接期间,更改DNS服务不影响

+

9

+

off->on

+

先关闭log_hostname,客户端连接上之后再开启

+

服务正常

+

——

+

客户端连接期间,更改log_hostname不影响,还是只显示连接时的IP

+
+ +根据实验结果得出以下结论: + +- 启用log\_hostname并正常设置DNS,数据库连接速度基本没有影响,耗时500毫秒左右; +- 启用log\_hostname但是不配置DNS,数据库连接速度基本没有影响(与不启用log\_hostname比) +- 启用log\_hostname并设置DNS,但是DNS不包含客户端IP与主机的映射关系,数据库连接速度基本没有影响(与不启用log\_hostname比) +- 启用log\_hostname并且DNS设置为不存在的IP地址,数据库连接速度骤降至6-10秒,因为要等DNS反向解析超时,关闭log\_hostname之后新的数据库连接恢复正常; +- 启用log\_hostname并设置DNS,DNS服务器开机但是unbound服务未启动,数据库连接速度基本没有影响,此时由于获取不到主机名,日志只会打印IP; +- 启用log\_hostname,客户端连接持续期间,DNS设置的变化不影响已建立的连接,包括停止DNS服务、修改IP和主机名映射、DNS指向不存在的IP,说明反向解析行为只会在连接的时候进行一次; +- 启用log\_hostname并设置DNS,DNS服务停止,连接时间不受影响,只是获取不到主机名;但是如果DNS服务器关机,连接速度极度恶化,与第四种情况一样了 + +提示:实验环境DNS服务器与openGauss服务器位于同一网段,只有一级DNS,且映射关系很少,这可能是DNS反向解析未增加时间消耗的原因。 + +其他相关内容查询: + +以下是查询到的与log\_hostname相关的部分审计信息、统计信息: + +- select \* from pg\_query\_audit\('2021-07-0708:00:00','2021-07-07 10:00:00'\); +- select time,type,result,username,database,client\_conninfo,node\_namefrom pg\_query\_audit\('2021-07-0708:00:00','2021-07-07 10:00:00'\); --倒数第二列显示了主机名 + + ![](figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_55ccb462-e440-11eb-94c3-38f9d3cd240d.png) + +- select datid,datname,pid,sessionid,usesysid,usename,application\_name,client\_addr,client\_hostname,waiting,state from PG\_STAT\_ACTIVITY;--倒数第三列显示了主机名 + + ![](figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_55df6abc-e440-11eb-94c3-38f9d3cd240d.png) + + +主要证迹: + +- openGauss日志,行首中括号为客户端的IP或者hostname: + + ![](figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_55ed0a50-e440-11eb-94c3-38f9d3cd240d.png) + + ![](figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_5600e228-e440-11eb-94c3-38f9d3cd240d.png) + +- DNS服务器unbound映射: + + ![](figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_560d1d68-e440-11eb-94c3-38f9d3cd240d.png) + +- DNS日志: + + ![](figures/https-oss-emcsprod-public-modb-pro-wechatSpider-modb_20210714_561e3044-e440-11eb-94c3-38f9d3cd240d.png) + + +## 总结 + +启用log\_hostname,只会影响新的数据库连接,在DNS服务正常或DNS服务虽然未启动但所在机器正常运行的情况下,基本不影响连接耗时,但是在DNS服务器不可用(服务器关机、或者配置为不存在的服务器)的情况下,连接耗时明显增加。 + diff --git "a/content/zh/post/July/openGauss\346\225\260\346\215\256\345\272\223xlog\347\233\256\345\275\225\346\273\241\351\227\256\351\242\230\345\244\204\347\220\206.md" "b/content/zh/post/July/openGauss\346\225\260\346\215\256\345\272\223xlog\347\233\256\345\275\225\346\273\241\351\227\256\351\242\230\345\244\204\347\220\206.md" new file mode 100644 index 0000000000000000000000000000000000000000..4939d0217880765a9675cfc39afdd20854a57acb --- /dev/null +++ "b/content/zh/post/July/openGauss\346\225\260\346\215\256\345\272\223xlog\347\233\256\345\275\225\346\273\241\351\227\256\351\242\230\345\244\204\347\220\206.md" @@ -0,0 +1,148 @@ ++++ + +title = "openGauss数据库xlog目录满问题处理" + +date = "2021-09-21" + +tags = [ "openGauss数据库xlog目录满问题处理"] + +archives = "2021-09" + +author = "阎书利" + +summary = "openGauss数据库xlog目录满问题处理" + +img = "/zh/post/July/title/img11.png" + +times = "12:30" + ++++ + +# openGauss数据库xlog目录满问题处理 + +openGauss数据库xlog满通常为以下几个原因: + +1.主备状态不正常,存在网络问题,集群内有宕机的节点 + +2.xlog保留数量过多 + +3.逻辑复制槽失效,且未及时清理 + +4.开启归档,但归档失败导致xlog不清理 + +**首先,确认数据库状态** + +``` +gs_om -t query +``` + +确认主备状态,是否存在宕机的节点。 + +查看是否存在down,Standby Need repair\(WAL\)或者unkown的状态。 + +如果数据库状态不正常,xlog目录100% + +需要手动移走一部分xlog后,检查数据库状态后将库拉起,并排查相关问题。 + +如果数据库状态正常,仅xlog目录大,则继续排查其他问题。 + +**清理:** + +1.找一个空间大的目录 + +例如: + +``` +su - omm +cd /opengauss_bak +mkdir xlog_mv_0919 +``` + +2.移走部分xlog,到xlog路径下 + +``` +cd /ogdata/data/dn1/pg_xlog +``` + +查看xlog数量,看是否xlog保留过多 + +``` +ls | wc -l +``` + +!!!为了恢复环境,移动一小部分xlog,其余等处理之后,自己清理 + +生成移动xlog语句,并检查(前1000条) + +``` +ls -ltr | head -n 1000 | awk '{print "mv "$9 " /opengauss_bak/xlog_mv_0919/"}' +``` + +3.\#实际执行移动操作 + +``` +ls -ltr | head -n 1000 | awk '{print "mv "$9 " /opengauss_bak/xlog_mv_0919/"}' | sh +``` + +4.移动之后df -Th看空间是否下来 + +5.gs\_om -t query 查看数据库状态 + +如果不正常,需要先尝试拉起主数据库 + +``` +gs_ctl start -D /ogdata/data/dn1 +``` + +然后依次拉起备机数据库 + +``` +gs_ctl start -D /ogdata/data/dn1 -M standby +``` + +备库拉不起来则先不处理,等找到xlog目录满源头后(例如主库删除失效逻辑复制后),考虑做build\(先尝试增量不行再用增量) + +``` +gs_ctl build -D /ogdata/data/dn1 -b incremental +gs_ctl build -D /ogdata/data/dn1 -b full +``` + +6.登录主数据库查看逻辑复制槽状态,查看有无失效逻辑复制槽 + +``` +select * from pg_replication_slots; +``` + +7.在主库删除失效逻辑复制槽 + +``` +select * from pg_drop_replication_slot('aohdoasdaoiodiandoan'); +---------aohdoasdaoiodiandoan为逻辑复制槽名字 +``` + +删除失效的逻辑复制槽,主库和备库的xlog目录应该都会释放一部分空间 + +8.删除后 df -Th看空间是否下来 + +9.参数调整 + +(1)查看wal\_keep\_segments参数,该参数为Xlog日志文件段数量,“pg\_xlog”目录下保留事务日志文件的最小数目。 + +(2)查看max\_size\_for\_xlog\_prune参数,在enable\_xlog\_prune打开时生效,如果有备机断连且xlog日志大小大于此阈值,则回收日志。 + +根据实际状况,可进行修改。 + +(3)如果是PG13版本,可考虑开启max\_slot\_wal\_keep\_size参数,他是允许replication slot 保留的wal文件的最大 + +大小,用于防止wal无限增大导致主库的文件系统空间被撑爆,设置该参数之后如果超过该参数值,PostgreSQL将开始删除最 + +早的WAL文件。默认值是-1,-1表示表示禁用本功能。单位是MB。 + +10.检查归档模式是否开启 + +``` +show archive_mode; +``` + +到归档目录下,看开启归档参数时,是否有归档。并检查归档空间,排除归档相关问题。 + diff --git "a/content/zh/post/July/openGauss\346\225\260\346\215\256\345\272\223\347\216\257\345\242\203\351\205\215\347\275\256.md" "b/content/zh/post/July/openGauss\346\225\260\346\215\256\345\272\223\347\216\257\345\242\203\351\205\215\347\275\256.md" new file mode 100644 index 0000000000000000000000000000000000000000..df61e042c67e3a759f9291bb9e65db6c7c56d6e5 --- /dev/null +++ "b/content/zh/post/July/openGauss\346\225\260\346\215\256\345\272\223\347\216\257\345\242\203\351\205\215\347\275\256.md" @@ -0,0 +1,220 @@ ++++ + +title = "openGauss数据库环境配置" + +date = "2021-12-09" + +tags = [ "openGauss数据库环境配置"] + +archives = "2021-12" + +author = "…" + +summary = "openGauss数据库环境配置" + +img = "/zh/post/July/title/img11.png" + +times = "12:30" + ++++ + +# openGauss数据库环境配置 + +## 一、首先在虚拟机装入centos系统 + +参考链接 + +[openGauss——VMware安装 | C1everF0x's Blog](https://c1everf0x.github.io/2021/04/10/openGauss%25E2%2580%2594%25E2%2580%2594VMware%25E5%25AE%2589%25E8%25A3%2585/) + +创建用户 + +![](figures/zh-cn_image_0000001187252632.png) + +安装好后查看系统的版本 + +![](figures/zh-cn_image_0000001232813719.png) + +## 二、网络配置 + +点一下安装位置然后点完成退出来,默认设置就行,再点 “网络和主机名”,打开以太网的开关 + +主机名字自己定一个,ip地址也要记住,两个信息都要记住。 + +![](figures/zh-cn_image_0000001187412548.png) + +![](figures/zh-cn_image_0000001187094074.png) + +![](figures/zh-cn_image_0000001186934098.png) + +问题一:虚拟机能够ping通过主机、主机ping不通虚拟机。 + +参考链接: + +https://blog.csdn.net/weixin\_43837229/article/details/94733475?utm\_medium=distribute.pc\_relevant.none-task-blog-2\~default\~baidujs\_title\~default-1.control&spm=1001.2101.3001.4242 + +虚拟机能够ping通过主机 + +![](figures/zh-cn_image_0000001232732233.png) + +本机ping虚拟机ip,无法通信 + +![](figures/zh-cn_image_0000001232613679.png) + +解决方式: + +在本机查看虚拟机ip,和虚拟机本身的ip不符合 + +![](figures/zh-cn_image_0000001232492169.png) + +以win10为例,打开电脑设置=》网络和lnelnternet=》网络和共享中心=》更高适配器设置,找到如下虚拟机 + +![](figures/zh-cn_image_0000001187252634.png) + +右键点击属性,找到 + +![](figures/zh-cn_image_0000001232813721.png) + +右键点击属性,找到 + +![](figures/zh-cn_image_0000001187412550.png) + +这时不管是主机ping虚拟机还是虚拟机ping主机都通了 + +实验结果: + +![](figures/zh-cn_image_0000001187094076.png) + +问题二 :ssh连接不了 + +失败: + +![](figures/zh-cn_image_0000001186934100.png) + +经过查询资料问题解决,主要是使用ssh命令并不代表开启了ssh服务器,我们通常在powershell中直接使用的ssh命令其实是win10专业版默认开启了OpenSSH客户端(OpenSSH Client),而现在想要远程ssh登录到win10,则需要开启ssh服务端。 + +解决步骤: + +1、打开设置——应用,找到可选功能,点击进入 + +![](figures/zh-cn_image_0000001232732235.png) + +2、在可选功能页面,点击添加功能,找到OpenSSH 服务器并安装 + +![](figures/zh-cn_image_0000001232613681.png) + +3、接下来启动sshserver服务,按win+r打开运行,输入services.msc,并回车键打开 + +![](figures/zh-cn_image_0000001232492171.png) + +4、在服务中找到OpenSSH SSH Server 和 OpenSSH Authentication Agent 两个服务,启动它们并右键——属性,设置为自动启动 + +![](figures/zh-cn_image_0000001187252636.png) + +成功 + +![](figures/zh-cn_image_0000001232813723.png) + +问题三:ssh服务器拒绝了密码,请再试一次 + +![](figures/zh-cn_image_0000001187412552.jpg) + +虚拟机用ssh连接自己可以连接上,但是主机的ssh连接不上虚拟机。并且密码正确。 + +在查找多种解决办法,经过多次尝试都没有用处的情况下,我准备换一种方式。 + +最终解决办法: + +利用容器安装OpenGauss数据库: + +1、安装curl + +sudo apt install curl + +2、安装docker + +curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun + +3、运行 opengauss 镜像 + +sudo docker run --name opengauss --privileged=true -d -p 5432:5432 -e GS\_PASSWORD=Enmo@123 enmotech/opengauss:latest + +4、进入容器 + +sudo docker exec -it opengauss bash + +5、连接数据库 ,切换到omm用户 ,用gsql连接到数据库 + +![](figures/zh-cn_image_0000001187094078.png) + +第二次启动镜像. + +先启动容器,然后进入shell + +1、必须先启动容器 + +sudo docker start “容器ID” + +2、然后使用下边的命令进入shell + +sudo docker exec -it “容器ID” bash + +3、将主机的文件复制到容器里 + +sudo docker cp 主机目录 容器ID:容器目录 + +如果要编辑里边的配置文件,例如编辑nginx的配置文件,docker容器里没有默认的编辑工具,需要安装 + +sudo apt-get update + +sudo apt-get install vim + +也可以通过替换的方式,编辑文件 + +sudo docker cp :/path/to/file.ext . // 复制出来修改 + +sudo docker cp file.ext :/path/to/file.ext //修改完复制回去 + +4、编辑完容器之后,将改动嵌入到镜像里,因为下次更新站点的话,是首先更新镜像,然后创建新容器的 + +sudo docker commit 容器ID 镜像名称 + +![](figures/zh-cn_image_0000001186934102.png) + +使用: + +连接成功 + +![](figures/zh-cn_image_0000001232732237.png) + +创建用户 + +![](figures/zh-cn_image_0000001232613683.png) + +创建数据库 + +![](figures/zh-cn_image_0000001232492173.png) + +![](figures/zh-cn_image_0000001187252638.png) + +效果: + +![](figures/zh-cn_image_0000001232813725.png) + +还能够容器外部连接。 + +创建SCHEMA + +![](figures/zh-cn_image_0000001187412554.jpg) + +创建表 + +![](figures/zh-cn_image_0000001187094080.jpg) + +插入数据 + +![](figures/zh-cn_image_0000001186934104.png) + +结果 + +![](figures/zh-cn_image_0000001232732239.png) + diff --git "a/content/zh/post/July/openGauss\346\225\260\346\215\256\345\272\223\347\232\204\345\256\211\350\243\205\350\277\220\350\241\214.md" "b/content/zh/post/July/openGauss\346\225\260\346\215\256\345\272\223\347\232\204\345\256\211\350\243\205\350\277\220\350\241\214.md" new file mode 100644 index 0000000000000000000000000000000000000000..e5592f46802b65a9254f1d8e006b88dcaf81cbd3 --- /dev/null +++ "b/content/zh/post/July/openGauss\346\225\260\346\215\256\345\272\223\347\232\204\345\256\211\350\243\205\350\277\220\350\241\214.md" @@ -0,0 +1,247 @@ ++++ + +title = "openGauss数据库的安装运行" + +date = "2021-12-09" + +tags = [ "openGauss数据库的安装运行"] + +archives = "2021-12" + +author = "匿名" + +summary = "openGauss数据库的安装运行" + +img = "/zh/post/July/title/img10.png" + +times = "12:30" + ++++ + +# openGauss数据库的安装运行 + +## 安装 CentOS 7.6 操作系统(可用虚拟机) + +- 下载安装openGauss之前,需要先在虚拟机上安装centOS系统,注意版本要限制为7.6。可以选择在主机上安装软件MobaXterm,该软件适用于在主机和虚拟机之间传输文件 +- 安装centOS:选择VMware作为虚拟机,需自行下载。启动Vmware,进入其主页面。 + + ![](figures/20211210-bdbc7405-0da5-40c5-a551-7f80d62a6bee.png) + + 点击“创建新的虚拟机”,进入虚拟机设置向导界面,选择“自定义(高级)”。 + + ![](figures/20211210-9293d8e6-3a8c-4d77-998a-5836c1668a33.png) + + 点击“下一步”,进入“安装操作系统”界面,下载CentOS-7-x86\_64-DVD-1810.iso映像文件,并通过“浏览”按钮找到文件位置 + + ![](figures/20211210-a5afea0f-957e-4043-9eb6-c14c1f59c069.png) + + 点击“下一步”,进入“选择客户机操作系统”,选择“Linux”,并在“版本”下拉列表框中选择要安装的对应的Linux版本,这里选择CentOS 7 64位。 + + ![](figures/20211210-74432ea4-1267-4ff5-954b-14eb8af1cdea.png) + + 继续“下一步”,进入“命名虚拟机”界面,给虚拟机起一个名字,然后单击“浏览”按钮,选择虚拟机系统安装文件的保存位置,建议选择C盘以外的盘。 + + ![](figures/20211210-28d6263f-8c33-47ea-ae52-defe7ee5ecd0.png) + + 继续“下一步”,进入“处理器配置”界面,选择处理器数量和每个处理器的内核数量,可以按图配置,具体以自己电脑的配置为准。 + + ![](figures/20211210-63337a88-2942-4254-8e7d-5df49d059915.png) + + 继续“下一步”,进入“此虚拟机的内存”界面,这里建议内存选择4GB到8GB最佳。 + + ![](figures/20211210-3cc8da73-a3f5-4ba6-9563-99b3ef34820f.png) + + 继续“下一步”,直到“指定磁盘容量”界面,由于OpenGauss源代码较大,最大磁盘大小建议40GB以上,之后选择将虚拟磁盘拆分成多个文件。 + + ![](figures/20211210-ea06bee8-b0b7-44f7-a656-f4e9eddea98e.png) + + 点击“下一步”,直至完成。完成后将进入下图的界面,点击开启此虚拟机。 + + ![](figures/20211210-106af296-2dc6-4b45-a794-6dce76a12901.png) + + 选择Install CentOS 7。 + + ![](figures/20211210-66d6be04-870c-45e6-8a61-684e41032431.png) + + 进入安装引导界面,在安装引导过程中可以使用中文。 + + ![](figures/20211210-8a062cb0-cb96-485c-b2da-1d1cb5cd0b0b.png) + + 随后进入“安装信息摘要”界面,其中安装源选择“本地介质” 。 + + ![](figures/20211210-7bf5f3c6-2b6a-4ab9-9b6d-90a0ed0aea41.png) + + 点击软件选择。勾选左侧“带GUI的服务器”,加选项可以选择“FTP服务器”、“Java平台”、“PostgreSQL数据库服务器”、“开发工具”等。点击完成。 + + ![](figures/20211210-d5e22e70-6156-4350-800e-2ffb897830dc.png) + + 其他默认选择,点击开始安装。设置ROOT密码和创建用户即可。 + + ![](figures/20211210-848ee9f8-263a-4376-9fc4-f9543764687d.png) + + 成功后点击重启,同意协议,登录即可。使用语言请选择英文。得到下图页面即成功 + + ![](figures/20211210-5a69bbef-d423-4705-a2ed-9065d2e288a9.png) + +- MobaXterm的使用 + + ![](figures/20211210-1940ef8d-5cc8-433a-9eaf-41462282a702.png) + + 下载MobaXterm\_Personal\_20.3.exe文件并打开,这是可以直接使用的版本 + + ![](figures/20211210-aac434b7-9ca3-4120-b309-99aa2e9e21a9.png) + + 点击右上角Session-\>SSH,与刚刚创建好的虚拟机建立SSH连接,其中Remote host 为虚拟机的IP,得到下图的界面并且可以运行即可。 + +- 源码下载地址:https://gitee.com/opengauss/openGauss-server + + 下载完成之后,将整个openGauss-server通过MobaXterm上传到centos上。本例将其放在/sda下。 + + ![](figures/20211210-db3fb345-411f-4db3-8560-7a01dbecd324.png) + + +## 编译 + +- 在centos中预先配置编译openGauss所需的环境。 + + 一键执行环境初始化脚本: + + https://www.modb.pro/db/48909 + + 注意事项:需要将脚本文件中的IP地址该为个人虚拟机的ip。 + + (通过ifconfig命令查看虚拟机ip) + + ![](figures/20211210-96bb7e65-c7eb-4e31-aeef-84b891971183.png) + + ![](figures/20211210-bbf89f72-abb0-4f41-a03f-d15eb662944f.png) + + +- 如何使用脚本文件:创建脚本文件,xxx.sh,将上一步的脚本内容更改后保存到文件中sh xxx.sh。编译openGauss需要openGauss-server和binarylibs两个组件。openGauss-server:openGauss的主要代码。binarylibs:openGauss依赖的第三方开源软件。通过以下网站获取编译好的binarylibs。下载后解压缩并重命名为binarylibs。 + + https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.0/openGauss-third\_party\_binarylibs.tar.gz + +- 已经拥有完整的openGauss代码,把它存储在以下目录中(以sda为例)。 + + /sda/openGauss-server + + /sda/binarylibs + + ![](figures/20211210-c820f96b-328a-4d16-a45c-19cf5779a8a4.png) + + 执行以下脚本获取系统版本号: + + ![](figures/20211210-93bb4f52-7b28-48b6-a13c-0f864d518183.png) + + 命令回显信息即为openGauss支持的操作系统。目前openGauss支持的操作系统为centos7.6\_x86\_64和openeuler\_aarch64。 + + 如果显示Failed或其他版本,表示openGauss不支持当前操作系统。 + +- 配置环境变量,例如,在CENTOS X86-64平台上,binarylibs目录被作为openGauss-server目录的兄弟目录。 在openGauss-server目录下执行以下命令。 + + 选择debug版本进行配置: + + ![](figures/20211210-685f5e8c-6074-4356-831e-a6354dc1d658.png) + +- 执行以下命令编译openGauss + + ![](figures/20211210-0e476672-b5db-4ad6-9289-c10feb76f434.png) + + ![](figures/20211210-557280d9-ad84-4d05-91fa-43c6feff40df.png) + +- 显示如下信息,则表示编译和安装成功。 + + ![](figures/20211210-7c90cd31-2185-45ba-acb9-04d2bb079b14.png) + + ![](figures/20211210-99750504-0e35-4b30-bf35-263e6c17640d.png) + + +## 启动数据库 + +- 启动数据库,首先需要创建omm用户:useradd omm + + 切换至omm用户下(su - omm) + + 运行: + + ![](figures/20211210-2110a0e7-93a5-4f25-b530-b193e48c6e21.png) + + +- /opt/gaussdb为安装后数目录,Bigdata@123为数据库用户密码\(密码可以自己设\)。 + +注意: + +只有omm用户才可以使用gs\_initdb等命令 + +如果提示有.so文件没有连接,则重新设置一遍环境变量 + +- 启动数据库 + +![](figures/20211210-327c67fc-3a60-4737-b7c0-841cbccb8fce.png) + +出现以下信息为启动成功 + +![](figures/20211210-7bc0bdbf-4577-42c5-99ff-3a5462e887d5.png) + +连接数据库 + +![](figures/20211210-3d1d1b84-c4aa-4dc2-bf0b-b33629a87bf1.png) + +其中5432为端口号,可以通过/opt/gaussdbpostgresql.conf文件查找。 + +![](figures/20211210-b5e98799-1e5c-411a-a4b8-bc25c8187085.png) + +启动成功: + +![](figures/20211210-415b9d65-5d55-4ac2-a8c4-81a5205d6b6e.png) + +## 调试 + +首先在centos虚拟机中安装调试工具eclipse,安装eclipse前需要先安装Java,进入命令行页面,查看Java是否已正常安装 + +![](figures/20211210-31165906-62b2-4632-ac2c-25490d109830.png) + +若未安装Java,执行如下命令 + +![](figures/20211210-9fdeb2db-b7d4-49ee-8dd5-3a933ddf6bc8.png) + +如果出现类似如下信息,则说明Java已正常安装。 + +![](figures/20211210-a86cee82-2d69-4306-bc33-be92507f0cf9.png) + +下载eclipse文件: + +http://www.eclipse.org/downloads/packages/release/Luna/SR2 + +下载与操作系统版本对应的Eclipse软件 + +将下载好的tar.gz文件上传到centos上去,例如放在/opt目录下。解压压缩包。 + +解压后进入eclipse文件夹,双击eclipse可执行文件或命令行中执行”./eclipse”即可运行eclipse。 + +在eclipse中导入代码:File/Import,选择C/C++下的Existing as Makefile Project + +![](figures/20211210-ad5cc11c-fe1e-4905-b05d-5f7f5307c029.png) + +选择解压后的代码目录;language复选框中把C++去掉,因为openGauss是用C语言写的;toolchain选择linux GCC; + +导入之后可以看到这样的信息: + +![](figures/20211210-a47a2b19-4bef-43d0-8c5d-b2e8da4715a3.png) + +首先需要启动openGauss数据库 + +![](figures/20211210-5fb969ff-5bb1-46dc-bb8f-3587b9d4d0ca.png) + +Run/debug configunations中,设置挂载进程的路径 + +![](figures/20211210-4da5c7fb-32fd-4bf7-b359-9a1759f2b4eb.png) + +![](figures/20211210-bd249bef-5603-499b-8379-2ef1d28d796f.png) + +Tips: 节约时间,可以选择 Disable auto build 从而避免每次开始调试时的 make 环节,事实上每次 make 的结果并不会用到 + +开始调试时,确保数据库的服务端已经启动,此时选择进程名 guassdb\(omm\) + +在文件 execMain.cpp 中设置断点,开始调试. + diff --git "a/content/zh/post/July/openGauss\347\211\251\347\220\206\345\244\207\344\273\275\346\201\242\345\244\215\344\271\213gsbasebackup.md" "b/content/zh/post/July/openGauss\347\211\251\347\220\206\345\244\207\344\273\275\346\201\242\345\244\215\344\271\213gsbasebackup.md" new file mode 100644 index 0000000000000000000000000000000000000000..dc3874100c8b39b0c04508ff692dd0fa6bc61d90 --- /dev/null +++ "b/content/zh/post/July/openGauss\347\211\251\347\220\206\345\244\207\344\273\275\346\201\242\345\244\215\344\271\213gsbasebackup.md" @@ -0,0 +1,344 @@ ++++ + +title = "openGauss物理备份恢复之gs basebackup" + +date = "2021-08-09" + +tags = [ "openGauss物理备份恢复之gs basebackup"] + +archives = "2021-08" + +author = "Walrus" + +summary = "openGauss物理备份恢复之gs basebackup" + +img = "/zh/post/July/title/img8.png" + +times = "12:30" + ++++ + + + +# openGauss物理备份恢复之gs\_basebackup + +## gs\_basebackup背景说明 + +- gs\_basebackup仅支持全量备份,不支持增量。 +- gs\_basebackup当前支持热备份模式和压缩格式备份模式。 +- gs\_basebackup在备份包含绝对路径的表空间时,如果在同一台机器上进行备份,可以通过tablespace-mapping重定向表空间路径,或使用归档模式进行备份。 + +## gs\_basebackup参数说明 + +gs\_basebackup参数可以分为如下几类: + +- -D directory 备份文件输出的目录,必选项。 + +常用参数: + +- -c,–checkpoint=fast|spread 设置检查点模式为fast或者spread\(默认\)。 +- -l,–label=LABEL 为备份设置标签。 +- -P,–progress 启用进展报告。 +- -v, –verbose 启用冗长模式。 +- -V, –version 打印版本后退出。 +- -?,–help 显示gs\_basebackup命令行参数。 +- -T,–tablespace-mapping=olddir=newdir + + 在备份期间将目录olddir中的表空间重定位到newdir中。为使之有效,olddir必须正好匹配表空间所在的路径(但如果备份中没有包含olddir中的表空间也不是错误)。olddir和newdir必须是绝对路径。如果一个路径凑巧包含了一个=符号,可用反斜线对它转义。对于多个表空间可以多次使用这个选项。 + +- -F,–format=plain|tar + + 设置输出格式为plain\(默认\)或者tar。没有设置该参数的情况下,默认–format=plain。plain格式把输出写成平面文件,使用和当前数据目录和表空间相同的布局。当集簇没有额外表空间时,整个数据库将被放在目标目录中。如果集簇包含额外的表空间,主数据目录将被放置在目标目录中,但是所有其他表空间将被放在它们位于服务器上的相同的绝对路径中。tar模式将输出写成目标目录中的 tar 文件。主数据目录将被写入到一个名为base.tar的文件中,并且其他表空间将被以其 OID 命名。生成的tar包,需要用gs\_tar命令解压。 + +- -X, –xlog-method=fetch|stream + + 设置xlog传输方式。没有设置该参数的情况下,默认–xlog-method=stream。在备份中包括所需的预写式日志文件(WAL文件)。这包括所有在备份期间产生的预写式日志。fetch方式在备份末尾收集预写式日志文件。因此,有必要把wal\_keep\_segments参数设置得足够高,这样在备份末尾之前日志不会被移除。如果在要传输日志时它已经被轮转,备份将失败并且是不可用的。stream方式在备份被创建时流传送预写式日志。这将开启一个到服务器的第二连接并且在运行备份时并行开始流传输预写式日志。因此,它将使用最多两个由max\_wal\_senders参数配置的连接。只要客户端能保持接收预写式日志,使用这种模式不需要在主控机上保存额外的预写式日志。 + +- -x,–xlog 使用这个选项等效于和方法fetch一起使用-X。 +- -Z –compress=level + + 启用对 tar 文件输出的 gzip 压缩,并且制定压缩级别(0 到 9,0 是不压缩,9 是最佳压缩)。只有使用 tar 格式时压缩才可用,并且会在所有tar文件名后面自动加上后缀.gz。 + +- -z 启用对 tar 文件输出的 gzip 压缩,使用默认的压缩级别。只有使用 tar 格式时压缩才可用,并且会在所有tar文件名后面自动加上后缀.gz。 +- -t,–rw-timeout 设置备份期间checkpoint的时间限制,默认限制时间为120s。当数据库全量checkpoint耗时较长时,可以适当增大rw-timeout限制时间。 + +连接参数 + +- -h, –host=HOSTNAME 指定正在运行服务器的主机名或者Unix域套接字的路径。 +- -p,–port=PORT 指定数据库服务器的端口号 +- -U,–username=USERNAME 指定连接数据库的用户。 +- -s, –status-interval=INTERVAL 发送到服务器的状态包的时间\(以秒为单位\) +- -w,–no-password 不出现输入密码提示。 +- -W, –password 当使用-U参数连接本地数据库或者连接远端数据库时,可通过指定该选项出现输入密码提示。 + +## **实验示例** + +**主节点丢失重要文件模拟实验:** + +- 查看群集状态: + + ``` + [omm@wzsy01 ~]$ gs_om -t status --detail + [ Cluster State ] + + + cluster_state : Normal + redistributing : No + current_az : AZ_ALL + + + [ Datanode State ] + node node_ip instance state | node node_ip instance state + -------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 1 wzsy01 9.1.14.39 6001 opt/huawei/install/data/d1 P Primary Normal | 2 wzsy02 9.1.14.40 6002 opt/huawei/install/data/d2 S Standby Normal + + ``` + + +- 备份主节点: + + ``` + [omm@wzsy01 ~]$ gs_basebackup -D home/omm/backup/ -h wzsy01 -p 15400 + INFO: The starting position of the xlog copy of the full build is: 0/4B000028. The slot minimum LSN is: 0/4B000140. + [2021-07-26 11:22:00]:begin build tablespace list + [2021-07-26 11:22:00]:finish build tablespace list + [2021-07-26 11:22:00]:begin get xlog by xlogstream + [2021-07-26 11:22:00]: check identify system success + [2021-07-26 11:22:00]: send START_REPLICATION 0/4B000000 success + [2021-07-26 11:22:00]: keepalive message is received + [2021-07-26 11:22:00]: keepalive message is received + [2021-07-26 11:22:03]: keepalive message is received + [2021-07-26 11:22:11]:gs_basebackup: base backup successfully + [omm@wzsy01 ~]$ cd backup/ + [omm@wzsy01 backup]$ ls -lh + total 4.9M + -rw------- 1 omm dbgrp 208 Jul 26 11:22 backup_label + drwx------ 6 omm dbgrp 54 Jul 26 11:22 base + -rw------- 1 omm dbgrp 4.3K Jul 26 11:22 cacert.pem + drwx------ 2 omm dbgrp 4.0K Jul 26 11:22 global + -rw------- 1 omm dbgrp 4.7M Jul 26 11:22 gswlm_userinfo.cfg + -rw------- 1 omm dbgrp 20K Jul 26 11:22 mot.conf + drwx------ 2 omm dbgrp 26 Jul 26 11:22 pg_clog + drwx------ 2 omm dbgrp 26 Jul 26 11:22 pg_csnlog + -rw------- 1 omm dbgrp 0 Jul 26 11:22 pg_ctl.lock + drwx------ 2 omm dbgrp 6 Jul 26 11:22 pg_errorinfo + -rw------- 1 omm dbgrp 4.5K Jul 26 11:22 pg_hba.conf + -rw------- 1 omm dbgrp 4.5K Jul 26 11:22 pg_hba.conf.bak + -rw------- 1 omm dbgrp 1.0K Jul 26 11:22 pg_hba.conf.lock + -rw------- 1 omm dbgrp 1.6K Jul 26 11:22 pg_ident.conf + drwx------ 4 omm dbgrp 39 Jul 26 11:22 pg_llog + drwx------ 4 omm dbgrp 36 Jul 26 11:22 pg_multixact + drwx------ 2 omm dbgrp 26 Jul 26 11:22 pg_notify + drwx------ 2 omm dbgrp 6 Jul 26 11:22 pg_replslot + drwx------ 2 omm dbgrp 6 Jul 26 11:22 pg_serial + drwx------ 2 omm dbgrp 6 Jul 26 11:22 pg_snapshots + drwx------ 2 omm dbgrp 25 Jul 26 11:22 pg_stat_tmp + drwx------ 2 omm dbgrp 6 Jul 26 11:22 pg_tblspc + drwx------ 2 omm dbgrp 6 Jul 26 11:22 pg_twophase + -rw------- 1 omm dbgrp 4 Jul 26 11:22 PG_VERSION + drwx------ 3 omm dbgrp 92 Jul 26 11:22 pg_xlog + -rw------- 1 omm dbgrp 38K Jul 26 11:22 postgresql.conf + -rw------- 1 omm dbgrp 38K Jul 26 11:22 postgresql.conf.bak + -rw------- 1 omm dbgrp 1.0K Jul 26 11:22 postgresql.conf.lock + -rw------- 1 omm dbgrp 4.3K Jul 26 11:22 server.crt + -rw------- 1 omm dbgrp 1.8K Jul 26 11:22 server.key + -rw------- 1 omm dbgrp 56 Jul 26 11:22 server.key.cipher + -rw------- 1 omm dbgrp 24 Jul 26 11:22 server.key.rand + ``` + +- 模拟主节点丢失文件 + + ``` + [omm@wzsy01 ~]$ cd opt/huawei/install/data/d1/ + [omm@wzsy01 d1]$ ls + base pg_csnlog pg_llog pg_stat_tmp postgresql.conf.lock + cacert.pem pg_ctl.lock pg_location pg_tblspc postmaster.opts + gaussdb.state pg_errorinfo pg_multixact pg_twophase postmaster.pid + global pg_hba.conf pg_notify PG_VERSION server.crt + gswlm_userinfo.cfg pg_hba.conf.bak pg_replslot pg_xlog server.key + mot.conf pg_hba.conf.lock pg_serial postgresql.conf server.key.cipher + pg_clog pg_ident.conf pg_snapshots postgresql.conf.bak server.key.rand + [omm@wzsy01 d1]$ rm -rf server.* + [omm@wzsy01 d1]$ rm -rf pg_hba.conf* + [omm@wzsy01 d1]$ ls + base pg_clog pg_location pg_stat_tmp postgresql.conf.bak + cacert.pem pg_csnlog pg_multixact pg_tblspc postgresql.conf.lock + gaussdb.state pg_ctl.lock pg_notify pg_twophase postmaster.opts + global pg_errorinfo pg_replslot PG_VERSION postmaster.pid + gswlm_userinfo.cfg pg_ident.conf pg_serial pg_xlog + mot.conf pg_llog pg_snapshots postgresql.conf + [omm@wzsy01 d1]$ gs_om -t status --detail + [ Cluster State ] + + cluster_state : Normal + redistributing : No + current_az : AZ_ALL + + [ Datanode State ] + + node node_ip instance state | node node_ip instance state + -------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 1 wzsy01 9.1.14.39 6001 opt/huawei/install/data/d1 P Primary Normal | 2 wzsy02 9.1.14.40 6002 opt/huawei/install/data/d2 S Standby Normal + ``` + +- 重启群集 + + ``` + [omm@wzsy01 d1]$ gs_om -t stop + Stopping cluster. + ========================================= + Successfully stopped cluster. + ========================================= + End stop cluster. + [omm@wzsy01 d1]$ gs_om -t start + Starting cluster. + ========================================= + [SUCCESS] wzsy02 + 2021-07-26 11:25:03.656 60fe2b0f.1 [unknown] 139725014521600 [unknown] 0 dn_6001_6002 01000 0 [BACKEND] WARNING: Failed to initialize the memory protect for g_instance.attr.attr_storage.cstore_buffers (1024 Mbytes) or shared memory (4250 Mbytes) is larger. + ========================================= + [GAUSS-53600]: Can not start the database, the cmd is source home/omm/.bashrc; python3 '/opt/huawei/install/om/script/local/StartInstance.py' -U omm -R opt/huawei/install/app -t 300 --security-mode=off, Error: + [FAILURE] wzsy01: + [GAUSS-51607] : Failed to start instance. Error: Please check the gs_ctl log for failure details. + [2021-07-26 11:24:59.261][90121][][gs_ctl]: gs_ctl started,datadir is opt/huawei/install/data/d1 + [2021-07-26 11:24:59.434][90121][][gs_ctl]: waiting for server to start... + .0 LOG: [Alarm Module]can not read GAUSS_WARNING_TYPE env. + + 0 LOG: [Alarm Module]Host Name: wzsy01 + + 0 LOG: [Alarm Module]Host IP: 9.1.14.39 + + 0 LOG: [Alarm Module]Cluster Name: Cluster_template + + 0 LOG: [Alarm Module]Invalid data in AlarmItem file! Read alarm English name failed! line: 52 + + 0 WARNING: failed to open feature control file, please check whether it exists: FileName=gaussdb.version, Errno=2, Errmessage=No such file or directory. + 0 WARNING: failed to parse feature control file: gaussdb.version. + 0 WARNING: Failed to load the product control file, so gaussdb cannot distinguish product version. + 0 LOG: Failed to initialze environment for codegen. + The core dump path is an invalid directory + 2021-07-26 11:24:59.789 60fe2b0b.1 [unknown] 140465425811200 [unknown] 0 dn_6001_6002 DB010 0 [REDO] LOG: Recovery parallelism, cpu count = 4, max = 4, actual = 4 + 2021-07-26 11:24:59.789 60fe2b0b.1 [unknown] 140465425811200 [unknown] 0 dn_6001_6002 DB010 0 [REDO] LOG: ConfigRecoveryParallelism, true_max_recovery_parallelism:4, max_recovery_parallelism:4 + 2021-07-26 11:24:59.789 60fe2b0b.1 [unknown] 140465425811200 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]can not read GAUSS_WARNING_TYPE env. + + 2021-07-26 11:24:59.789 60fe2b0b.1 [unknown] 140465425811200 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Host Name: wzsy01 + + 2021-07-26 11:24:59.790 60fe2b0b.1 [unknown] 140465425811200 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Host IP: 9.1.14.39 + + 2021-07-26 11:24:59.790 60fe2b0b.1 [unknown] 140465425811200 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Cluster Name: Cluster_template + + 2021-07-26 11:24:59.790 60fe2b0b.1 [unknown] 140465425811200 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Invalid data in AlarmItem file! Read alarm English name failed! line: 52 + + 2021-07-26 11:24:59.790 60fe2b0b.1 [unknown] 140465425811200 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: Transparent encryption disabled. + + 2021-07-26 11:24:59.797 60fe2b0b.1 [unknown] 140465425811200 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: loaded library "security_plugin" + 2021-07-26 11:24:59.799 60fe2b0b.1 [unknown] 140465425811200 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: InitNuma numaNodeNum: 1 numa_distribute_mode: none inheritThreadPool: 0. + 2021-07-26 11:24:59.799 60fe2b0b.1 [unknown] 140465425811200 [unknown] 0 dn_6001_6002 01000 0 [BACKEND] WARNING: Failed to initialize the memory protect for g_instance.attr.attr_storage.cstore_buffers (1024 Mbytes) or shared memory (4250 Mbytes) is larger. + 2021-07-26 11:24:59.907 60fe2b0b.1 [unknown] 140465425811200 [unknown] 0 dn_6001_6002 00000 0 [CACHE] LOG: set data cache size(805306368) + 2021-07-26 11:24:59.960 60fe2b0b.1 [unknown] 140465425811200 [unknown] 0 dn_6001_6002 00000 0 [CACHE] LOG: set metadata cache size(268435456) + 2021-07-26 11:25:00.451 60fe2b0b.1 [unknown] 140465425811200 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: gaussdb: fsync file "/opt/huawei/install/data/d1/gaussdb.state.temp" success + 2021-07-26 11:25:00.452 60fe2b0b.1 [unknown] 140465425811200 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: create gaussdb state file success: db state(STARTING_STATE), server mode(Standby) + 2021-07-26 11:25:00.478 60fe2b0b.1 [unknown] 140465425811200 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: max_safe_fds = 979, usable_fds = 1000, already_open = 11 + The core dump path is an invalid directory + 2021-07-26 11:25:00.482 60fe2b0b.1 [unknown] 140465425811200 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: the configure file /opt/huawei/install/app/etc/gscgroup_omm.cfg doesn't exist or the size of configure file has changed. Please create it by root user! + 2021-07-26 11:25:00.482 60fe2b0b.1 [unknown] 140465425811200 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: Failed to parse cgroup config file. + .[2021-07-26 11:25:02.249][90121][][gs_ctl]: waitpid 90124 failed, exitstatus is 256, ret is 2 + + + [2021-07-26 11:25:02.250][90121][][gs_ctl]: stopped waiting + [2021-07-26 11:25:02.250][90121][][gs_ctl]: could not start server + Examine the log output.. + ``` + +- 群集启动过程中主节点报错,备节点启动,但备节点状态需要修复: + + ``` + [omm@wzsy01 d1]$ gs_om -t status --detail + [ Cluster State ] + + + cluster_state : Unavailable + redistributing : No + current_az : AZ_ALL + + + [ Datanode State ] + + + node node_ip instance state | node node_ip instance state + -------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 1 wzsy01 9.1.14.39 6001 /opt/huawei/install/data/d1 P Down Manually stopped | 2 wzsy02 9.1.14.40 6002 /opt/huawei/install/data/d2 S Standby Need repair(Disconnected) + ``` + +- 恢复删除文件,手动启动原主节点 + + ``` + [omm@wzsy01 d1]$ cp /home/omm/backup/server.* /opt/huawei/install/data/d1/ + [omm@wzsy01 d1]$ cp /home/omm/backup/pg_hba.conf* /opt/huawei/install/data/d1/ + [omm@wzsy01 d1]$ gs_ctl start -D /opt/huawei/install/data/d1/ -M primary + [2021-07-26 14:51:40.756][7855][][gs_ctl]: gs_ctl started,datadir is /opt/huawei/install/data/d1 + [2021-07-26 14:51:40.933][7855][][gs_ctl]: waiting for server to start... + .0 LOG: [Alarm Module]can not read GAUSS_WARNING_TYPE env. + + 0 LOG: [Alarm Module]Host Name: wzsy01 + + 0 LOG: [Alarm Module]Host IP: 9.1.14.39 + + 0 LOG: [Alarm Module]Cluster Name: Cluster_template + + 0 LOG: [Alarm Module]Invalid data in AlarmItem file! Read alarm English name failed! line: 52 + + 0 WARNING: failed to open feature control file, please check whether it exists: FileName=gaussdb.version, Errno=2, Errmessage=No such file or directory. + 0 WARNING: failed to parse feature control file: gaussdb.version. + 0 WARNING: Failed to load the product control file, so gaussdb cannot distinguish product version. + 0 LOG: Failed to initialze environment for codegen. + The core dump path is an invalid directory + 2021-07-26 14:51:41.300 60fe5b7d.1 [unknown] 139948390721280 [unknown] 0 dn_6001_6002 DB010 0 [REDO] LOG: Recovery parallelism, cpu count = 4, max = 4, actual = 4 + 2021-07-26 14:51:41.300 60fe5b7d.1 [unknown] 139948390721280 [unknown] 0 dn_6001_6002 DB010 0 [REDO] LOG: ConfigRecoveryParallelism, true_max_recovery_parallelism:4, max_recovery_parallelism:4 + 2021-07-26 14:51:41.300 60fe5b7d.1 [unknown] 139948390721280 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]can not read GAUSS_WARNING_TYPE env. + + 2021-07-26 14:51:41.301 60fe5b7d.1 [unknown] 139948390721280 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Host Name: wzsy01 + + 2021-07-26 14:51:41.301 60fe5b7d.1 [unknown] 139948390721280 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Host IP: 9.1.14.39 + + 2021-07-26 14:51:41.301 60fe5b7d.1 [unknown] 139948390721280 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Cluster Name: Cluster_template + + 2021-07-26 14:51:41.301 60fe5b7d.1 [unknown] 139948390721280 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Invalid data in AlarmItem file! Read alarm English name failed! line: 52 + + 2021-07-26 14:51:41.301 60fe5b7d.1 [unknown] 139948390721280 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: Transparent encryption disabled. + + 2021-07-26 14:51:41.305 60fe5b7d.1 [unknown] 139948390721280 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: loaded library "security_plugin" + 2021-07-26 14:51:41.307 60fe5b7d.1 [unknown] 139948390721280 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: InitNuma numaNodeNum: 1 numa_distribute_mode: none inheritThreadPool: 0. + 2021-07-26 14:51:41.307 60fe5b7d.1 [unknown] 139948390721280 [unknown] 0 dn_6001_6002 01000 0 [BACKEND] WARNING: Failed to initialize the memory protect for g_instance.attr.attr_storage.cstore_buffers (1024 Mbytes) or shared memory (4250 Mbytes) is larger. + 2021-07-26 14:51:41.412 60fe5b7d.1 [unknown] 139948390721280 [unknown] 0 dn_6001_6002 00000 0 [CACHE] LOG: set data cache size(805306368) + 2021-07-26 14:51:41.464 60fe5b7d.1 [unknown] 139948390721280 [unknown] 0 dn_6001_6002 00000 0 [CACHE] LOG: set metadata cache size(268435456) + 2021-07-26 14:51:41.944 60fe5b7d.1 [unknown] 139948390721280 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: gaussdb: fsync file "/opt/huawei/install/data/d1/gaussdb.state.temp" success + 2021-07-26 14:51:41.944 60fe5b7d.1 [unknown] 139948390721280 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: create gaussdb state file success: db state(STARTING_STATE), server mode(Primary) + 2021-07-26 14:51:41.971 60fe5b7d.1 [unknown] 139948390721280 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: max_safe_fds = 978, usable_fds = 1000, already_open = 12 + The core dump path is an invalid directory + 2021-07-26 14:51:41.975 60fe5b7d.1 [unknown] 139948390721280 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: the configure file /opt/huawei/install/app/etc/gscgroup_omm.cfg doesn't exist or the size of configure file has changed. Please create it by root user! + 2021-07-26 14:51:41.975 60fe5b7d.1 [unknown] 139948390721280 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: Failed to parse cgroup config file. + [2021-07-26 14:51:43.017][7855][][gs_ctl]: done + [2021-07-26 14:51:43.017][7855][][gs_ctl]: server started (/opt/huawei/install/data/d1) + ``` + +- 群集状态恢复正常 + + ``` + [omm@wzsy01 d1]$ gs_om -t status --detail + [ Cluster State ] + + + cluster_state : Normal + redistributing : No + current_az : AZ_ALL + + + [ Datanode State ] + + + node node_ip instance state | node node_ip instance state + -------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 1 wzsy01 9.1.14.39 6001 /opt/huawei/install/data/d1 P Primary Normal | 2 wzsy02 9.1.14.40 6002 /opt/huawei/install/data/d2 S Standby Normal + ``` + + diff --git "a/content/zh/post/July/openGauss\347\232\204WDR\346\212\245\345\221\212\357\274\210\347\261\273\344\274\274\344\272\216Oracle\347\232\204awr\357\274\211.md" "b/content/zh/post/July/openGauss\347\232\204WDR\346\212\245\345\221\212\357\274\210\347\261\273\344\274\274\344\272\216Oracle\347\232\204awr\357\274\211.md" new file mode 100644 index 0000000000000000000000000000000000000000..24390a35b567b8af7697f5d7ef43ac94efd82451 --- /dev/null +++ "b/content/zh/post/July/openGauss\347\232\204WDR\346\212\245\345\221\212\357\274\210\347\261\273\344\274\274\344\272\216Oracle\347\232\204awr\357\274\211.md" @@ -0,0 +1,186 @@ ++++ + +title = "openGauss的WDR报告(类似于Oracle的awr)" + +date = "2021-08-06" + +tags = [ "openGauss的WDR报告(类似于Oracle的awr)"] + +archives = "2021-08" + +author = "阎书利" + +summary = "openGauss的WDR报告(类似于Oracle的awr)" + +img = "/zh/post/July/title/img1.png" + +times = "12:30" + ++++ + +# openGauss的WDR报告(类似于Oracle的awr) + +Oracle的awr报告在日常解决问题中起到了很大的便利,在遇到问题时,我们通常会查看有无对应时间段的快照,生成awr报告并进一步分析。通过分析数据库的状态,资源消耗以及等待事件等初步定位问题,并在此基础上进行验证。在opengauss数据库中,也有着这样的“awr”,它叫做——wdr。 + +## 前提: + +1. 打开参数enable\_wdr\_snapshot。 + + ``` + postgres=# show enable_wdr_snapshot; + enable_wdr_snapshot + --------------------- + on + (1 row) + ``` + + WDR Snasphot在启动后,会在用户表空间"pg\_default",数据库"postgres"下新建schema “snapshot”,用于持久化WDR快照数据。 + +2. WDR Snasphot性能快照数量大于等于2。 + +## 操作步骤: + +1. 执行以下命令查询已经生成的快照 + + ``` + postgres=# select * from snapshot.snapshot; + snapshot_id | start_ts | end_ts + -------------+-------------------------------+------------------------------- + 1 | 2021-07-08 15:02:15.990876+08 | 2021-07-08 15:02:18.555272+08 + 2 | 2021-07-08 15:08:12.470218+08 | 2021-07-08 15:08:14.514862+08 + 3 | 2021-07-08 16:02:16.709364+08 | 2021-07-08 16:02:17.643546+08 + 4 | 2021-07-08 17:02:17.617386+08 | 2021-07-08 17:02:20.626552+08 + ............ + 43 | 2021-07-10 07:02:36.418031+08 | 2021-07-10 07:02:37.380217+08 + 44 | 2021-08-05 00:21:09.062745+08 | 2021-08-05 00:21:10.33016+08 + (44 rows) + ``` + +2. 可以选择手从创建快照,该命令需要用户具有sysadmin权限。或者直接选取数据库中已有的快照。 + + ``` + postgres=# select create_wdr_snapshot(); + create_wdr_snapshot + ----------------------------------------- + WDR snapshot request has been submitted + (1 row) + + postgres=# select * from snapshot.snapshot; + snapshot_id | start_ts | end_ts + -------------+-------------------------------+------------------------------- + 1 | 2021-07-08 15:02:15.990876+08 | 2021-07-08 15:02:18.555272+08 + 2 | 2021-07-08 15:08:12.470218+08 | 2021-07-08 15:08:14.514862+08 + 3 | 2021-07-08 16:02:16.709364+08 | 2021-07-08 16:02:17.643546+08 + 4 | 2021-07-08 17:02:17.617386+08 | 2021-07-08 17:02:20.626552+08 + ............ + 43 | 2021-07-10 07:02:36.418031+08 | 2021-07-10 07:02:37.380217+08 + 44 | 2021-08-05 00:21:09.062745+08 | 2021-08-05 00:21:10.33016+08 + 45 | 2021-08-05 00:39:43.777341+08 | 2021-08-05 00:39:44.760498+08 //这一快照为刚才手动执行生成的 + (45 rows) + ``` + +3. 执行如下步骤,生成node级别wdr报告 + - 查询 pgxc\_node\_name参数值 + + ``` + [omm@node1 ~]$ gsql -p 26000 postgres -c "show pgxc_node_name" + pgxc_node_name + ---------------- + dn_6001_6002 + (1 row) + ``` + + - \\a \\t \\o 服务器文件路径生成格式化性能报告 + + 例如 + + ``` + postgres=# \a \t \o /home/omm/wdr_sarah.html + Output format is unaligned. + Showing only tuples. + ``` + + 上述命令涉及参数说明如下: + + - \\a:切换非对齐模式。 + - \\t:切换输出的字段名的信息和行计数脚注。 + - \\o:把所有的查询结果发送至服务器文件里。 + + 服务器文件路径:生成性能报告文件存放路径。用户需要拥有此路径的读写权限。 + + - 向性能报告wdr\_sarah.html中写入数据。 + + ``` + gsql -p 26000 -d postgres + select generate_wdr_report(快照id1,快照id2,‘all’,‘node’,‘pgxc_node_name参数值’); + ``` + + 例如 + + ``` + postgres=# select generate_wdr_report(44,45,'all','node','dn_6001_6002'); + 目录下生成对应的wdr报告 + + [omm@node1 ~]$ ll + total 1080 + -rw------- 1 omm dbgrp 1317 Apr 9 15:43 single.xml + -rw------- 1 omm dbgrp 1101242 Aug 5 00:47 wdr_sarah.html + ``` + + - 拿到浏览器上查看: + + opengauss的awr报告类似于oracle的wdr,拥有资源消耗、等待事件、TOPSQL,以及参数设置等。 + + ![](figures/1.png) + + ![](figures/2.png) + + ![](figures/3.png) + + + +## 快照相关参数: + +- enable\_wdr\_snapshot + + 参数说明:是否开启数据库监控快照功能。 + + 该参数属于SIGHUP类型参数 + + 取值范围:布尔型 + + on:打开数据库监控快照功能。 + + off:关闭数据库监控快照功能。 + +- wdr\_snapshot\_retention\_days + + 参数说明:系统中数据库监控快照数据的保留天数。当数据库运行过程期间所生成的快照量数超过保留天数内允许生成的快照数量的最大值时,系统将每隔wdr\_snapshot\_interval时间间隔,清理snapshot\_id最小的快照数据。 + + 该参数属于SIGHUP类型参数 + + 取值范围:整型,1~8。 + + 默认值:8 + +- wdr\_snapshot\_interval + + 参数说明:后台线程Snapshot自动对数据库监控数据执行快照操作的时间间隔。 + + 该参数属于SIGHUP类型参数 + + 取值范围:整型,10~60(分钟)。 + + 默认值:1h + +- wdr\_snapshot\_query\_timeout + + 参数说明:系统执行数据库监控快照操作时,设置快照操作相关的sql语句的执行超时时间。如果语句超过设置的时间没有执行完并返回结果,则本次快照操作失败。 + + 该参数属于SIGHUP类型参数 + + 取值范围:整型,100~INT\_MAX(秒)。 + + 默认值:100s + + diff --git "a/content/zh/post/July/openGauss\347\276\244\351\233\206\345\244\207\350\212\202\347\202\271\347\212\266\346\200\201\344\277\256\345\244\215.md" "b/content/zh/post/July/openGauss\347\276\244\351\233\206\345\244\207\350\212\202\347\202\271\347\212\266\346\200\201\344\277\256\345\244\215.md" new file mode 100644 index 0000000000000000000000000000000000000000..b6a44b9b174238f2618a0c8c1c77135e2038f324 --- /dev/null +++ "b/content/zh/post/July/openGauss\347\276\244\351\233\206\345\244\207\350\212\202\347\202\271\347\212\266\346\200\201\344\277\256\345\244\215.md" @@ -0,0 +1,495 @@ ++++ + +title = "openGauss群集备节点状态修复" + +date = "2021-08-08" + +tags = [ "openGauss群集备节点状态修复"] + +archives = "2021-08" + +author = "Walrus" + +summary = "openGauss群集备节点状态修复" + +img = "/zh/post/July/title/img7.png" + +times = "12:30" + ++++ + +# openGauss群集备节点Standby Need repair\(Disconnected\)状态修复 + +## 模拟openGauss群集备节点出现Standby Need repair\(Disconnected\)状态 + +1. 破坏群集文件 + + 首先备份主节点文件;再删除主节点重要的文件,重启群集后发现,主节点未启动,备节点启动后会出现Standby Need repair状态,如果此时恢复文件,重启后,群集状态会正常。 + + ``` + [omm@wzsy01 d1]$ gs_om -t status --detail + [ Cluster State ] + + cluster_state : Normal + redistributing : No + current_az : AZ_ALL + + + [ Datanode State ] + + + node node_ip instance state | node node_ip instance state + -------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 1 wzsy01 9.1.14.39 6001 opt/huawei/install/data/d1 P Primary Normal | 2 wzsy02 9.1.14.40 6002 opt/huawei/install/data/d2 S Standby Normal + [omm@wzsy01 backup]$ gs_basebackup -D home/omm/backup/ -h wzsy01 -p 15400 + INFO: The starting position of the xlog copy of the full build is: 0/4F000028. The slot minimum LSN is: 0/4F000140. + [2021-07-26 15:14:04]:begin build tablespace list + [2021-07-26 15:14:04]:finish build tablespace list + [2021-07-26 15:14:04]:begin get xlog by xlogstream + [2021-07-26 15:14:04]: check identify system success + [2021-07-26 15:14:04]: send START_REPLICATION 0/4F000000 success + [2021-07-26 15:14:04]: keepalive message is received + [2021-07-26 15:14:04]: keepalive message is received + [2021-07-26 15:14:09]:gs_basebackup: base backup successfully + [omm@wzsy01 backup]$ ls + backup_label cacert.pem mot.conf pg_errorinfo pg_ident.conf pg_replslot pg_tblspc postgresql.conf server.crt term_file + backup_label.old full_backup_label pg_clog pg_hba.conf pg_llog pg_serial pg_twophase postgresql.conf.bak server.key + base global pg_csnlog pg_hba.conf.bak pg_multixact pg_snapshots PG_VERSION postgresql.conf.lock server.key.cipher + build_completed.done gswlm_userinfo.cfg pg_ctl.lock pg_hba.conf.lock pg_notify pg_stat_tmp pg_xlog rewind_lable server.key.rand + [omm@wzsy01 backup]$ cd - + /opt/huawei/install/data/d1 + [omm@wzsy01 d1]$ ls + backup_label.old gaussdb.state pg_clog pg_hba.conf.bak pg_multixact pg_snapshots pg_xlog postmaster.pid server.key.rand + base global pg_csnlog pg_hba.conf.lock pg_notify pg_stat_tmp postgresql.conf rewind_lable term_file + build_completed.done gs_build.pid pg_ctl.lock pg_ident.conf pg_replslot pg_tblspc postgresql.conf.bak server.crt + cacert.pem gswlm_userinfo.cfg pg_errorinfo pg_llog pg_rewind_filemap pg_twophase postgresql.conf.lock server.key + full_backup_label mot.conf pg_hba.conf pg_location pg_serial PG_VERSION postmaster.opts server.key.cipher + [omm@wzsy01 d1]$ rm -rf pg_hba.conf* + [omm@wzsy01 d1]$ gs_om -t stop + Stopping cluster. + ========================================= + Successfully stopped cluster. + ========================================= + End stop cluster. + [omm@wzsy01 d1]$ gs_om -t status --detail + [ Cluster State ] + + + cluster_state : Unavailable + redistributing : No + current_az : AZ_ALL + + + [ Datanode State ] + + + node node_ip instance state | node node_ip instance state + -------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 1 wzsy01 9.1.14.39 6001 opt/huawei/install/data/d1 P Down Manually stopped | 2 wzsy02 9.1.14.40 6002 opt/huawei/install/data/d2 S Down Manually stopped + [omm@wzsy01 d1]$ gs_om -t start + Starting cluster. + ========================================= + [SUCCESS] wzsy02 + 2021-07-26 15:15:58.513 60fe612e.1 [unknown] 139773867022080 [unknown] 0 dn_6001_6002 01000 0 [BACKEND] WARNING: Failed to initialize the memory protect for g_instance.attr.attr_storage.cstore_buffers (1024 Mbytes) or shared memory (4250 Mbytes) is larger. + ========================================= + [GAUSS-53600]: Can not start the database, the cmd is source home/omm/.bashrc; python3 '/opt/huawei/install/om/script/local/StartInstance.py' -U omm -R opt/huawei/install/app -t 300 --security-mode=off, Error: + [FAILURE] wzsy01: + [GAUSS-51607] : Failed to start instance. Error: Please check the gs_ctl log for failure details. + [2021-07-26 15:15:54.178][15696][][gs_ctl]: gs_ctl started,datadir is opt/huawei/install/data/d1 + [2021-07-26 15:15:54.362][15696][][gs_ctl]: waiting for server to start... + .0 LOG: [Alarm Module]can not read GAUSS_WARNING_TYPE env. + + 0 LOG: [Alarm Module]Host Name: wzsy01 + + 0 LOG: [Alarm Module]Host IP: 9.1.14.39 + + 0 LOG: [Alarm Module]Cluster Name: Cluster_template + + 0 LOG: [Alarm Module]Invalid data in AlarmItem file! Read alarm English name failed! line: 52 + + 0 WARNING: failed to open feature control file, please check whether it exists: FileName=gaussdb.version, Errno=2, Errmessage=No such file or directory. + 0 WARNING: failed to parse feature control file: gaussdb.version. + 0 WARNING: Failed to load the product control file, so gaussdb cannot distinguish product version. + 0 LOG: Failed to initialze environment for codegen. + The core dump path is an invalid directory + 2021-07-26 15:15:54.716 60fe612a.1 [unknown] 140709682288384 [unknown] 0 dn_6001_6002 DB010 0 [REDO] LOG: Recovery parallelism, cpu count = 4, max = 4, actual = 4 + 2021-07-26 15:15:54.716 60fe612a.1 [unknown] 140709682288384 [unknown] 0 dn_6001_6002 DB010 0 [REDO] LOG: ConfigRecoveryParallelism, true_max_recovery_parallelism:4, max_recovery_parallelism:4 + 2021-07-26 15:15:54.716 60fe612a.1 [unknown] 140709682288384 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]can not read GAUSS_WARNING_TYPE env. + 2021-07-26 15:15:54.716 60fe612a.1 [unknown] 140709682288384 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Host Name: wzsy01 + 2021-07-26 15:15:54.716 60fe612a.1 [unknown] 140709682288384 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Host IP: 9.1.14.39 + 2021-07-26 15:15:54.716 60fe612a.1 [unknown] 140709682288384 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Cluster Name: Cluster_template + 2021-07-26 15:15:54.716 60fe612a.1 [unknown] 140709682288384 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Invalid data in AlarmItem file! Read alarm English name failed! line: 52 + 2021-07-26 15:15:54.716 60fe612a.1 [unknown] 140709682288384 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: Transparent encryption disabled. + 2021-07-26 15:15:54.720 60fe612a.1 [unknown] 140709682288384 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: loaded library "security_plugin" + 2021-07-26 15:15:54.722 60fe612a.1 [unknown] 140709682288384 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: InitNuma numaNodeNum: 1 numa_distribute_mode: none inheritThreadPool: 0. + 2021-07-26 15:15:54.722 60fe612a.1 [unknown] 140709682288384 [unknown] 0 dn_6001_6002 01000 0 [BACKEND] WARNING: Failed to initialize the memory protect for g_instance.attr.attr_storage.cstore_buffers (1024 Mbytes) or shared memory (4250 Mbytes) is larger. + 2021-07-26 15:15:54.843 60fe612a.1 [unknown] 140709682288384 [unknown] 0 dn_6001_6002 00000 0 [CACHE] LOG: set data cache size(805306368) + 2021-07-26 15:15:54.896 60fe612a.1 [unknown] 140709682288384 [unknown] 0 dn_6001_6002 00000 0 [CACHE] LOG: set metadata cache size(268435456) + 2021-07-26 15:15:55.365 60fe612a.1 [unknown] 140709682288384 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: gaussdb: fsync file "/opt/huawei/install/data/d1/gaussdb.state.temp" success + 2021-07-26 15:15:55.365 60fe612a.1 [unknown] 140709682288384 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: create gaussdb state file success: db state(STARTING_STATE), server mode(Primary) + 2021-07-26 15:15:55.389 60fe612a.1 [unknown] 140709682288384 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: max_safe_fds = 979, usable_fds = 1000, already_open = 11 + The core dump path is an invalid directory + 2021-07-26 15:15:55.394 60fe612a.1 [unknown] 140709682288384 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: the configure file opt/huawei/install/app/etc/gscgroup_omm.cfg doesn't exist or the size of configure file has changed. Please create it by root user! + 2021-07-26 15:15:55.394 60fe612a.1 [unknown] 140709682288384 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: Failed to parse cgroup config file. + .[2021-07-26 15:15:57.160][15696][][gs_ctl]: waitpid 15700 failed, exitstatus is 256, ret is 2 + + + [2021-07-26 15:15:57.160][15696][][gs_ctl]: stopped waiting + [2021-07-26 15:15:57.160][15696][][gs_ctl]: could not start server + Examine the log output.. + [omm@wzsy01 d1]$ gs_om -t status --detail + [ Cluster State ] + + + cluster_state : Unavailable + redistributing : No + current_az : AZ_ALL + + + [ Datanode State ] + + + node node_ip instance state | node node_ip instance state + -------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 1 wzsy01 9.1.14.39 6001 opt/huawei/install/data/d1 P Down Manually stopped | 2 wzsy02 9.1.14.40 6002 opt/huawei/install/data/d2 S Standby Need repair(Disconnected) + ``` + +2. 切换备节点为主节点 + + ``` + [omm@wzsy02 d2]$ gs_om -t stop + Stopping cluster. + ========================================= + Successfully stopped cluster. + ========================================= + End stop cluster. + [omm@wzsy02 d2]$ gs_ctl start -D opt/huawei/install/data/d2/ -M primary + [2021-07-26 15:45:05.449][129349][][gs_ctl]: gs_ctl started,datadir is opt/huawei/install/data/d2 + [2021-07-26 15:45:05.617][129349][][gs_ctl]: waiting for server to start... + .0 LOG: [Alarm Module]can not read GAUSS_WARNING_TYPE env. + + 0 LOG: [Alarm Module]Host Name: wzsy02 + + 0 LOG: [Alarm Module]Host IP: 9.1.14.40 + + 0 LOG: [Alarm Module]Cluster Name: Cluster_template + + 0 LOG: [Alarm Module]Invalid data in AlarmItem file! Read alarm English name failed! line: 52 + + 0 WARNING: failed to open feature control file, please check whether it exists: FileName=gaussdb.version, Errno=2, Errmessage=No such file or directory. + 0 WARNING: failed to parse feature control file: gaussdb.version. + 0 WARNING: Failed to load the product control file, so gaussdb cannot distinguish product version. + 0 LOG: Failed to initialze environment for codegen. + The core dump path is an invalid directory + 2021-07-26 15:45:05.976 60fe6801.1 [unknown] 140434066839296 [unknown] 0 dn_6001_6002 DB010 0 [REDO] LOG: Recovery parallelism, cpu count = 4, max = 4, actual = 4 + 2021-07-26 15:45:05.976 60fe6801.1 [unknown] 140434066839296 [unknown] 0 dn_6001_6002 DB010 0 [REDO] LOG: ConfigRecoveryParallelism, true_max_recovery_parallelism:4, max_recovery_parallelism:4 + 2021-07-26 15:45:05.976 60fe6801.1 [unknown] 140434066839296 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]can not read GAUSS_WARNING_TYPE env. + + 2021-07-26 15:45:05.976 60fe6801.1 [unknown] 140434066839296 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Host Name: wzsy02 + + 2021-07-26 15:45:05.976 60fe6801.1 [unknown] 140434066839296 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Host IP: 9.1.14.40 + + 2021-07-26 15:45:05.976 60fe6801.1 [unknown] 140434066839296 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Cluster Name: Cluster_template + + 2021-07-26 15:45:05.977 60fe6801.1 [unknown] 140434066839296 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Invalid data in AlarmItem file! Read alarm English name failed! line: 52 + + 2021-07-26 15:45:05.977 60fe6801.1 [unknown] 140434066839296 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: Transparent encryption disabled. + + 2021-07-26 15:45:05.980 60fe6801.1 [unknown] 140434066839296 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: loaded library "security_plugin" + 2021-07-26 15:45:05.981 60fe6801.1 [unknown] 140434066839296 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: InitNuma numaNodeNum: 1 numa_distribute_mode: none inheritThreadPool: 0. + 2021-07-26 15:45:05.981 60fe6801.1 [unknown] 140434066839296 [unknown] 0 dn_6001_6002 01000 0 [BACKEND] WARNING: Failed to initialize the memory protect for g_instance.attr.attr_storage.cstore_buffers (1024 Mbytes) or shared memory (4250 Mbytes) is larger. + 2021-07-26 15:45:06.088 60fe6801.1 [unknown] 140434066839296 [unknown] 0 dn_6001_6002 00000 0 [CACHE] LOG: set data cache size(805306368) + 2021-07-26 15:45:06.138 60fe6801.1 [unknown] 140434066839296 [unknown] 0 dn_6001_6002 00000 0 [CACHE] LOG: set metadata cache size(268435456) + 2021-07-26 15:45:06.581 60fe6801.1 [unknown] 140434066839296 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: gaussdb: fsync file "/opt/huawei/install/data/d2/gaussdb.state.temp" success + 2021-07-26 15:45:06.581 60fe6801.1 [unknown] 140434066839296 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: create gaussdb state file success: db state(STARTING_STATE), server mode(Primary) + 2021-07-26 15:45:06.607 60fe6801.1 [unknown] 140434066839296 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: max_safe_fds = 978, usable_fds = 1000, already_open = 12 + The core dump path is an invalid directory + 2021-07-26 15:45:06.610 60fe6801.1 [unknown] 140434066839296 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: the configure file opt/huawei/install/app/etc/gscgroup_omm.cfg doesn't exist or the size of configure file has changed. Please create it by root user! + 2021-07-26 15:45:06.610 60fe6801.1 [unknown] 140434066839296 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: Failed to parse cgroup config file. + . + [2021-07-26 15:45:07.645][129349][][gs_ctl]: done + [2021-07-26 15:45:07.645][129349][][gs_ctl]: server started (/opt/huawei/install/data/d2) + [omm@wzsy02 d2]$ gs_om -t status --detail + [ Cluster State ] + + + cluster_state : Degraded + redistributing : No + current_az : AZ_ALL + + + [ Datanode State ] + + + node node_ip instance state | node node_ip instance state + -------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 1 wzsy01 9.1.14.39 6001 opt/huawei/install/data/d1 P Down Manually stopped | 2 wzsy02 9.1.14.40 6002 opt/huawei/install/data/d2 S Primary Normal + ``` + +3. 恢复主节点丢失文件,并手动启动原主节点为备节点 + + ``` + [omm@wzsy01 d1]$ cp home/omm/backup/pg_hba.conf* opt/huawei/install/data/d1/ + [omm@wzsy01 d1]$ gs_ctl start -D opt/huawei/install/data/d1/ -M standby + [2021-07-26 15:46:01.894][40444][][gs_ctl]: gs_ctl started,datadir is opt/huawei/install/data/d1 + [2021-07-26 15:46:02.090][40444][][gs_ctl]: waiting for server to start... + .0 LOG: [Alarm Module]can not read GAUSS_WARNING_TYPE env. + + 0 LOG: [Alarm Module]Host Name: wzsy01 + + 0 LOG: [Alarm Module]Host IP: 9.1.14.39 + + 0 LOG: [Alarm Module]Cluster Name: Cluster_template + + 0 LOG: [Alarm Module]Invalid data in AlarmItem file! Read alarm English name failed! line: 52 + + 0 WARNING: failed to open feature control file, please check whether it exists: FileName=gaussdb.version, Errno=2, Errmessage=No such file or directory. + 0 WARNING: failed to parse feature control file: gaussdb.version. + 0 WARNING: Failed to load the product control file, so gaussdb cannot distinguish product version. + 0 LOG: Failed to initialze environment for codegen. + The core dump path is an invalid directory + 2021-07-26 15:46:02.433 60fe683a.1 [unknown] 139749337880320 [unknown] 0 dn_6001_6002 DB010 0 [REDO] LOG: Recovery parallelism, cpu count = 4, max = 4, actual = 4 + 2021-07-26 15:46:02.433 60fe683a.1 [unknown] 139749337880320 [unknown] 0 dn_6001_6002 DB010 0 [REDO] LOG: ConfigRecoveryParallelism, true_max_recovery_parallelism:4, max_recovery_parallelism:4 + 2021-07-26 15:46:02.433 60fe683a.1 [unknown] 139749337880320 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]can not read GAUSS_WARNING_TYPE env. + + 2021-07-26 15:46:02.433 60fe683a.1 [unknown] 139749337880320 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Host Name: wzsy01 + + 2021-07-26 15:46:02.434 60fe683a.1 [unknown] 139749337880320 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Host IP: 9.1.14.39 + + 2021-07-26 15:46:02.434 60fe683a.1 [unknown] 139749337880320 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Cluster Name: Cluster_template + + 2021-07-26 15:46:02.434 60fe683a.1 [unknown] 139749337880320 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Invalid data in AlarmItem file! Read alarm English name failed! line: 52 + + 2021-07-26 15:46:02.434 60fe683a.1 [unknown] 139749337880320 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: Transparent encryption disabled. + + 2021-07-26 15:46:02.437 60fe683a.1 [unknown] 139749337880320 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: loaded library "security_plugin" + 2021-07-26 15:46:02.438 60fe683a.1 [unknown] 139749337880320 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: InitNuma numaNodeNum: 1 numa_distribute_mode: none inheritThreadPool: 0. + 2021-07-26 15:46:02.439 60fe683a.1 [unknown] 139749337880320 [unknown] 0 dn_6001_6002 01000 0 [BACKEND] WARNING: Failed to initialize the memory protect for g_instance.attr.attr_storage.cstore_buffers (1024 Mbytes) or shared memory (4250 Mbytes) is larger. + 2021-07-26 15:46:02.543 60fe683a.1 [unknown] 139749337880320 [unknown] 0 dn_6001_6002 00000 0 [CACHE] LOG: set data cache size(805306368) + 2021-07-26 15:46:02.592 60fe683a.1 [unknown] 139749337880320 [unknown] 0 dn_6001_6002 00000 0 [CACHE] LOG: set metadata cache size(268435456) + 2021-07-26 15:46:03.055 60fe683a.1 [unknown] 139749337880320 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: gaussdb: fsync file "/opt/huawei/install/data/d1/gaussdb.state.temp" success + 2021-07-26 15:46:03.055 60fe683a.1 [unknown] 139749337880320 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: create gaussdb state file success: db state(STARTING_STATE), server mode(Standby) + 2021-07-26 15:46:03.081 60fe683a.1 [unknown] 139749337880320 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: max_safe_fds = 978, usable_fds = 1000, already_open = 12 + The core dump path is an invalid directory + 2021-07-26 15:46:03.085 60fe683a.1 [unknown] 139749337880320 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: the configure file opt/huawei/install/app/etc/gscgroup_omm.cfg doesn't exist or the size of configure file has changed. Please create it by root user! + 2021-07-26 15:46:03.085 60fe683a.1 [unknown] 139749337880320 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: Failed to parse cgroup config file. + . + [2021-07-26 15:46:04.140][40444][][gs_ctl]: done + [2021-07-26 15:46:04.140][40444][][gs_ctl]: server started (/opt/huawei/install/data/d1) + [omm@wzsy01 d1]$ gs_om -t status --detail + [ Cluster State ] + + + cluster_state : Degraded + redistributing : No + current_az : AZ_ALL + + + [ Datanode State ] + + + node node_ip instance state | node node_ip instance state + -------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 1 wzsy01 9.1.14.39 6001 /opt/huawei/install/data/d1 P Standby Need repair(WAL) | 2 wzsy02 9.1.14.40 6002 /opt/huawei/install/data/d2 S Primary Normal + ``` + + +## 此时备节点为Standby Need repair\(WAL\)状态,此状态不支持主备切换 + +1. 主备切换状态报错 + + ``` + [omm@wzsy01 d1]$ gs_ctl switchover -D /opt/huawei/install/data/d1/ + [2021-07-26 15:46:47.883][41371][][gs_ctl]: gs_ctl switchover ,datadir is /opt/huawei/install/data/d1 + [2021-07-26 15:46:47.883][41371][][gs_ctl]: switchover term (1) + [2021-07-26 15:46:47.891][41371][][gs_ctl]: waiting for server to switchover............................................................... + [2021-07-26 15:47:48.376][41371][][gs_ctl]: + switchover timeout after 60 seconds. please manually check the cluster status. + [omm@wzsy01 d1]$ gs_om -t status --detail + [ Cluster State ] + + + cluster_state : Degraded + redistributing : No + current_az : AZ_ALL + + + [ Datanode State ] + + + node node_ip instance state | node node_ip instance state + -------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 1 wzsy01 9.1.14.39 6001 /opt/huawei/install/data/d1 P Standby Need repair(WAL) | 2 wzsy02 9.1.14.40 6002 /opt/huawei/install/data/d2 S Primary Normal + ``` + +2. 重建备节点 + + ``` + [omm@wzsy01 d1]$ gs_ctl build -D /opt/huawei/install/data/d1/ + [2021-07-26 15:48:44.087][43045][][gs_ctl]: gs_ctl incremental build ,datadir is /opt/huawei/install/data/d1 + waiting for server to shut down............. done + server stopped + [2021-07-26 15:48:54.113][43045][][gs_ctl]: fopen build pid file "/opt/huawei/install/data/d1/gs_build.pid" success + [2021-07-26 15:48:54.113][43045][][gs_ctl]: fprintf build pid file "/opt/huawei/install/data/d1/gs_build.pid" success + [2021-07-26 15:48:54.113][43045][][gs_ctl]: fsync build pid file "/opt/huawei/install/data/d1/gs_build.pid" success + [2021-07-26 15:48:54.120][43045][dn_6001_6002][gs_rewind]: set gaussdb state file when rewind:db state(BUILDING_STATE), server mode(STANDBY_MODE), build mode(INC_BUILD). + [2021-07-26 15:48:54.201][43045][dn_6001_6002][gs_rewind]: connected to server: host=9.1.14.40 port=15401 dbname=postgres application_name=gs_rewind connect_timeout=5 + [2021-07-26 15:48:54.204][43045][dn_6001_6002][gs_rewind]: connect to primary success + [2021-07-26 15:48:54.205][43045][dn_6001_6002][gs_rewind]: get pg_control success + [2021-07-26 15:48:54.205][43045][dn_6001_6002][gs_rewind]: target server was interrupted in mode 2. + [2021-07-26 15:48:54.205][43045][dn_6001_6002][gs_rewind]: sanityChecks success + [2021-07-26 15:48:54.205][43045][dn_6001_6002][gs_rewind]: find last checkpoint at 0/500062D0 and checkpoint redo at 0/50006250 from source control file + [2021-07-26 15:48:54.205][43045][dn_6001_6002][gs_rewind]: find last checkpoint at 0/500060A0 and checkpoint redo at 0/50006020 from target control file + [2021-07-26 15:48:54.207][43045][dn_6001_6002][gs_rewind]: find max lsn success, find max lsn rec (0/500060A0) success. + + + [2021-07-26 15:48:54.213][43045][dn_6001_6002][gs_rewind]: request lsn is 0/500060A0 and its crc(source, target):[1826941517, 2969834311] + [2021-07-26 15:48:54.219][43045][dn_6001_6002][gs_rewind]: request lsn is 0/50005F70 and its crc(source, target):[1279685734, 2286271583] + [2021-07-26 15:48:54.225][43045][dn_6001_6002][gs_rewind]: request lsn is 0/50005E58 and its crc(source, target):[3699854113, 3699854113] + [2021-07-26 15:48:54.225][43045][dn_6001_6002][gs_rewind]: find common checkpoint 0/50005E58 + [2021-07-26 15:48:54.225][43045][dn_6001_6002][gs_rewind]: find diverge point success + [2021-07-26 15:48:54.225][43045][dn_6001_6002][gs_rewind]: read checkpoint redo (0/50005DD8) success before rewinding. + [2021-07-26 15:48:54.225][43045][dn_6001_6002][gs_rewind]: rewinding from checkpoint redo point at 0/50005DD8 on timeline 1 + [2021-07-26 15:48:54.225][43045][dn_6001_6002][gs_rewind]: diverge xlogfile is 000000010000000000000050, older ones will not be copied or removed. + [2021-07-26 15:48:54.226][43045][dn_6001_6002][gs_rewind]: targetFileStatThread success pid 140525638383360. + [2021-07-26 15:48:54.227][43045][dn_6001_6002][gs_rewind]: traverse_datadir start. + [2021-07-26 15:48:54.227][43045][dn_6001_6002][gs_rewind]: reading source file list + [2021-07-26 15:48:54.234][43045][dn_6001_6002][gs_rewind]: filemap_list_to_array start. + [2021-07-26 15:48:54.234][43045][dn_6001_6002][gs_rewind]: filemap_list_to_array end sort start. length is 2586 + [2021-07-26 15:48:54.235][43045][dn_6001_6002][gs_rewind]: sort end. + [2021-07-26 15:48:54.243][43045][dn_6001_6002][gs_rewind]: targetFileStatThread return success. + [2021-07-26 15:48:54.263][43045][dn_6001_6002][gs_rewind]: reading target file list + [2021-07-26 15:48:54.268][43045][dn_6001_6002][gs_rewind]: traverse target datadir success + [2021-07-26 15:48:54.268][43045][dn_6001_6002][gs_rewind]: reading WAL in target + [2021-07-26 15:48:54.268][43045][dn_6001_6002][gs_rewind]: could not read WAL record at 0/50006138: invalid record length at 0/50006138: wanted 32, got 0 + [2021-07-26 15:48:54.270][43045][dn_6001_6002][gs_rewind]: calculate totals rewind success + [2021-07-26 15:48:54.270][43045][dn_6001_6002][gs_rewind]: need to copy 16MB (total source directory size is 657MB) + [2021-07-26 15:48:54.270][43045][dn_6001_6002][gs_rewind]: starting background WAL receiver + [2021-07-26 15:48:54.270][43045][dn_6001_6002][gs_rewind]: Starting copy xlog, start point: 0/50005DD8 + [2021-07-26 15:48:54.271][43045][dn_6001_6002][gs_rewind]: in gs_rewind proecess,so no need remove. + [2021-07-26 15:48:54.277][43045][dn_6001_6002][gs_rewind]: check identify system success + [2021-07-26 15:48:54.277][43045][dn_6001_6002][gs_rewind]: send START_REPLICATION 0/50000000 success + [2021-07-26 15:48:54.309][43045][dn_6001_6002][gs_rewind]: receiving and unpacking files... + [2021-07-26 15:48:54.413][43045][dn_6001_6002][gs_rewind]: execute file map success + [2021-07-26 15:48:54.414][43045][dn_6001_6002][gs_rewind]: find minRecoveryPoint success from xlog insert location 0/5000A858 + [2021-07-26 15:48:54.414][43045][dn_6001_6002][gs_rewind]: update pg_control file success, minRecoveryPoint: 0/5000A858, ckpLoc:0/500062D0, ckpRedo:0/50006250, preCkp:0/500061B8 + [2021-07-26 15:48:54.416][43045][dn_6001_6002][gs_rewind]: update pg_dw file success + [2021-07-26 15:48:54.416][43045][dn_6001_6002][gs_rewind]: xlog end point: 0/5000A858 + [2021-07-26 15:48:54.416][43045][dn_6001_6002][gs_rewind]: waiting for background process to finish streaming... + [2021-07-26 15:48:59.281][43045][dn_6001_6002][gs_rewind]: creating backup label and updating control file + [2021-07-26 15:48:59.281][43045][dn_6001_6002][gs_rewind]: create backup label success + [2021-07-26 15:48:59.281][43045][dn_6001_6002][gs_rewind]: read checkpoint redo (0/50005DD8) success. + [2021-07-26 15:48:59.281][43045][dn_6001_6002][gs_rewind]: read checkpoint rec (0/50005E58) success. + [2021-07-26 15:48:59.281][43045][dn_6001_6002][gs_rewind]: dn incremental build completed. + [2021-07-26 15:48:59.287][43045][dn_6001_6002][gs_rewind]: fetching MOT checkpoint + [2021-07-26 15:48:59.462][43045][dn_6001_6002][gs_ctl]: waiting for server to start... + .0 LOG: [Alarm Module]can not read GAUSS_WARNING_TYPE env. + + 0 LOG: [Alarm Module]Host Name: wzsy01 + + 0 LOG: [Alarm Module]Host IP: 9.1.14.39 + + 0 LOG: [Alarm Module]Cluster Name: Cluster_template + + 0 LOG: [Alarm Module]Invalid data in AlarmItem file! Read alarm English name failed! line: 52 + + 0 WARNING: failed to open feature control file, please check whether it exists: FileName=gaussdb.version, Errno=2, Errmessage=No such file or directory. + 0 WARNING: failed to parse feature control file: gaussdb.version. + 0 WARNING: Failed to load the product control file, so gaussdb cannot distinguish product version. + 0 LOG: Failed to initialze environment for codegen. + The core dump path is an invalid directory + 2021-07-26 15:48:59.849 60fe68eb.1 [unknown] 140413211043584 [unknown] 0 dn_6001_6002 DB010 0 [REDO] LOG: Recovery parallelism, cpu count = 4, max = 4, actual = 4 + 2021-07-26 15:48:59.849 60fe68eb.1 [unknown] 140413211043584 [unknown] 0 dn_6001_6002 DB010 0 [REDO] LOG: ConfigRecoveryParallelism, true_max_recovery_parallelism:4, max_recovery_parallelism:4 + 2021-07-26 15:48:59.850 60fe68eb.1 [unknown] 140413211043584 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]can not read GAUSS_WARNING_TYPE env. + + 2021-07-26 15:48:59.850 60fe68eb.1 [unknown] 140413211043584 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Host Name: wzsy01 + + 2021-07-26 15:48:59.850 60fe68eb.1 [unknown] 140413211043584 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Host IP: 9.1.14.39 + + 2021-07-26 15:48:59.850 60fe68eb.1 [unknown] 140413211043584 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Cluster Name: Cluster_template + + 2021-07-26 15:48:59.850 60fe68eb.1 [unknown] 140413211043584 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: [Alarm Module]Invalid data in AlarmItem file! Read alarm English name failed! line: 52 + + 2021-07-26 15:48:59.850 60fe68eb.1 [unknown] 140413211043584 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: Transparent encryption disabled. + + 2021-07-26 15:48:59.857 60fe68eb.1 [unknown] 140413211043584 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: loaded library "security_plugin" + 2021-07-26 15:48:59.859 60fe68eb.1 [unknown] 140413211043584 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: InitNuma numaNodeNum: 1 numa_distribute_mode: none inheritThreadPool: 0. + 2021-07-26 15:48:59.859 60fe68eb.1 [unknown] 140413211043584 [unknown] 0 dn_6001_6002 01000 0 [BACKEND] WARNING: Failed to initialize the memory protect for g_instance.attr.attr_storage.cstore_buffers (1024 Mbytes) or shared memory (4250 Mbytes) is larger. + 2021-07-26 15:48:59.968 60fe68eb.1 [unknown] 140413211043584 [unknown] 0 dn_6001_6002 00000 0 [CACHE] LOG: set data cache size(805306368) + 2021-07-26 15:49:00.020 60fe68eb.1 [unknown] 140413211043584 [unknown] 0 dn_6001_6002 00000 0 [CACHE] LOG: set metadata cache size(268435456) + 2021-07-26 15:49:00.503 60fe68eb.1 [unknown] 140413211043584 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: gaussdb: fsync file "/opt/huawei/install/data/d1/gaussdb.state.temp" success + 2021-07-26 15:49:00.504 60fe68eb.1 [unknown] 140413211043584 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: create gaussdb state file success: db state(STARTING_STATE), server mode(Standby) + 2021-07-26 15:49:00.526 60fe68eb.1 [unknown] 140413211043584 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: max_safe_fds = 976, usable_fds = 1000, already_open = 14 + The core dump path is an invalid directory + 2021-07-26 15:49:00.531 60fe68eb.1 [unknown] 140413211043584 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: the configure file /opt/huawei/install/app/etc/gscgroup_omm.cfg doesn't exist or the size of configure file has changed. Please create it by root user! + 2021-07-26 15:49:00.531 60fe68eb.1 [unknown] 140413211043584 [unknown] 0 dn_6001_6002 00000 0 [BACKEND] LOG: Failed to parse cgroup config file. + . + [2021-07-26 15:49:01.581][43045][dn_6001_6002][gs_ctl]: done + [2021-07-26 15:49:01.581][43045][dn_6001_6002][gs_ctl]: server started (/opt/huawei/install/data/d1) + [2021-07-26 15:49:01.581][43045][dn_6001_6002][gs_ctl]: fopen build pid file "/opt/huawei/install/data/d1/gs_build.pid" success + [2021-07-26 15:49:01.581][43045][dn_6001_6002][gs_ctl]: fprintf build pid file "/opt/huawei/install/data/d1/gs_build.pid" success + [2021-07-26 15:49:01.582][43045][dn_6001_6002][gs_ctl]: fsync build pid file "/opt/huawei/install/data/d1/gs_build.pid" success + [omm@wzsy01 d1]$ gs_om -t status --detail + [ Cluster State ] + + + cluster_state : Normal + redistributing : No + current_az : AZ_ALL + + + [ Datanode State ] + + + node node_ip instance state | node node_ip instance state + -------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 1 wzsy01 9.1.14.39 6001 /opt/huawei/install/data/d1 P Standby Normal | 2 wzsy02 9.1.14.40 6002 /opt/huawei/install/data/d2 S Primary Normal + ``` + +3. 再切换主备,使群集恢复原始状态 + + ``` + [omm@wzsy01 d1]$ gs_ctl switchover -D /opt/huawei/install/data/d1/ + [2021-07-26 15:49:38.594][43597][][gs_ctl]: gs_ctl switchover ,datadir is /opt/huawei/install/data/d1 + [2021-07-26 15:49:38.594][43597][][gs_ctl]: switchover term (1) + [2021-07-26 15:49:38.601][43597][][gs_ctl]: waiting for server to switchover................ + [2021-07-26 15:49:51.713][43597][][gs_ctl]: done + [2021-07-26 15:49:51.713][43597][][gs_ctl]: switchover completed (/opt/huawei/install/data/d1) + [omm@wzsy01 d1]$ gs_om -t status --detail + [ Cluster State ] + + + cluster_state : Normal + redistributing : No + current_az : AZ_ALL + + + [ Datanode State ] + + + node node_ip instance state | node node_ip instance state + -------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 1 wzsy01 9.1.14.39 6001 /opt/huawei/install/data/d1 P Primary Normal | 2 wzsy02 9.1.14.40 6002 /opt/huawei/install/data/d2 S Standby Normal + [omm@wzsy01 d1]$ gs_om refreshconf + [GAUSS-50001] : Incorrect parameter. Parameter '-t' is required. + [omm@wzsy01 d1]$ gs_om -t refreshconf + Generating dynamic configuration file for all nodes. + Successfully generated dynamic configuration file. + [omm@wzsy01 d1]$ gs_om -t status --detail + [ Cluster State ] + + + cluster_state : Normal + redistributing : No + current_az : AZ_ALL + + + [ Datanode State ] + + + node node_ip instance state | node node_ip instance state + -------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 1 wzsy01 9.1.14.39 6001 /opt/huawei/install/data/d1 P Primary Normal | 2 wzsy02 9.1.14.40 6002 /opt/huawei/install/data/d2 S Standby Normal + ``` + + diff --git "a/content/zh/post/July/openGauss\351\200\273\350\276\221\345\244\207\344\273\275\345\217\212\346\201\242\345\244\215.md" "b/content/zh/post/July/openGauss\351\200\273\350\276\221\345\244\207\344\273\275\345\217\212\346\201\242\345\244\215.md" new file mode 100644 index 0000000000000000000000000000000000000000..91adb37e72c3343084f5d473a82af0aa74522b76 --- /dev/null +++ "b/content/zh/post/July/openGauss\351\200\273\350\276\221\345\244\207\344\273\275\345\217\212\346\201\242\345\244\215.md" @@ -0,0 +1,771 @@ ++++ + +title = "openGauss逻辑备份及恢复" + +date = "2021-08-07" + +tags = [ "openGauss逻辑备份及恢复"] + +archives = "2021-08" + +author = "Walrus" + +summary = "openGauss逻辑备份及恢复" + +img = "/zh/post/July/title/img3.png" + +times = "12:30" + ++++ + +# openGauss逻辑备份及恢复 + +## gs\_dumpall + +1. 背景信息 + + - gs\_dumpall是openGauss用于导出所有数据库相关信息工具,它可以导出openGauss数据库的所有数据,包括默认数据库postgres的数据、自定义数据库的数据、以及openGauss所有数据库公共的全局对象。 + - gs\_dumpall工具由操作系统用户omm执行。 + - gs\_dumpall工具在进行数据导出时,其他用户可以访问openGauss数据库(读或写)。 + - gs\_dumpall工具支持导出完整一致的数据。例如,T1时刻启动gs\_dumpall导出openGauss数据库,那么导出数据结果将会是T1时刻该openGauss数据库的数据状态,T1时刻之后对openGauss的修改不会被导出。 + - gs\_dumpall在导出openGauss所有数据库时分为两部分: + - gs\_dumpall自身对所有数据库公共的全局对象进行导出,包括有关数据库用户和组,表空间以及属性(例如,适用于数据库整体的访问权限)信息。 + - gs\_dumpall通过调用gs\_dump来完成openGauss中各数据库的SQL脚本文件导出,该脚本文件包含将数据库恢复为其保存时的状态所需要的全部SQL语句。 + + 以上两部分导出的结果为纯文本格式的SQL脚本文件,使用gsql运行该脚本文件可以恢复openGauss数据库。 + +2. 实验过程 + + ``` + gs_dumpall -p 15400 -f home/omm/opengauss_39_back.sql + [omm@wzsy01 ~]$ gs_dumpall -p 15400 -f home/omm/opengauss_39_back.sql + gs_dump[port='15400'][dbname='chnbs'][2021-07-27 15:38:06]: The total objects number is 1948. + gs_dump[port='15400'][dbname='chnbs'][2021-07-27 15:38:07]: [100.00%] 1948 objects have been dumped. + gs_dump[port='15400'][dbname='chnbs'][2021-07-27 15:38:29]: dump database dbname='chnbs' successfully + gs_dump[port='15400'][dbname='chnbs'][2021-07-27 15:38:29]: total time: 25678 ms + gs_dump[port='15400'][dbname='mydb'][2021-07-27 15:38:31]: The total objects number is 1166. + gs_dump[port='15400'][dbname='mydb'][2021-07-27 15:38:31]: [100.00%] 1166 objects have been dumped. + gs_dump[port='15400'][dbname='mydb'][2021-07-27 15:38:42]: dump database dbname='mydb' successfully + gs_dump[port='15400'][dbname='mydb'][2021-07-27 15:38:42]: total time: 13215 ms + gs_dump[port='15400'][dbname='postgres'][2021-07-27 15:38:43]: The total objects number is 434. + gs_dump[port='15400'][dbname='postgres'][2021-07-27 15:38:43]: [100.00%] 434 objects have been dumped. + gs_dump[port='15400'][dbname='postgres'][2021-07-27 15:38:43]: dump database dbname='postgres' successfully + gs_dump[port='15400'][dbname='postgres'][2021-07-27 15:38:43]: total time: 584 ms + gs_dump[port='15400'][dbname='vzoom'][2021-07-27 15:38:46]: The total objects number is 1947. + gs_dump[port='15400'][dbname='vzoom'][2021-07-27 15:38:47]: [100.00%] 1947 objects have been dumped. + gs_dump[port='15400'][dbname='vzoom'][2021-07-27 15:39:10]: dump database dbname='vzoom' successfully + gs_dump[port='15400'][dbname='vzoom'][2021-07-27 15:39:10]: total time: 27453 ms + gs_dumpall[port='15400'][2021-07-27 15:39:10]: dumpall operation successful + gs_dumpall[port='15400'][2021-07-27 15:39:10]: total time: 67481 ms + ``` + + gs\_dumpall详细参数说请见官方文档:https://opengauss.org/zh/docs/1.1.0/docs/Toolreference/gs\_dumpall.html + + +## gs\_dump + +1. 背景信息 + - gs\_dump是openGauss用于导出数据库相关信息的工具,用户可以自定义导出一个数据库或其中的对象(模式、表、视图等)。支持导出的数据库可以是默认数据库postgres,也可以是自定义数据库。 + - gs\_dump工具由操作系统用户omm执行。 + - gs\_dump工具在进行数据导出时,其他用户可以访问openGauss数据库(读或写)。 + - gs\_dump工具支持导出完整一致的数据。例如,T1时刻启动gs\_dump导出A数据库,那么导出数据结果将会是T1时刻A数据库的数据状态,T1时刻之后对A数据库的修改不会被导出。 + - gs\_dump支持将数据库信息导出至纯文本格式的SQL脚本文件或其他归档文件中。 + - 纯文本格式的SQL脚本文件:包含将数据库恢复为其保存时的状态所需的SQL语句。通过gsql运行该SQL脚本文件,可以恢复数据库。即使在其他主机和其他数据库产品上,只要对SQL脚本文件稍作修改,也可以用来重建数据库。 + - 归档格式文件:包含将数据库恢复为其保存时的状态所需的数据,可以是tar格式、目录归档格式或自定义归档格式。该导出结果必须与gs\_restore配合使用来恢复数据库,gs\_restore工具在导入时,系统允许用户选择需要导入的内容,甚至可以在导入之前对等待导入的内容进行排序。 + +2. 实验过程 + + - 二进制备份chnbs数据库 + + ``` + gs_dump -p 15400 chnbs -F c -f home/omm/chnbs.binary + ``` + + - sql文本导出chnbs数据库 + + ``` + gs_dump -p 15400 chnbs -f home/omm/chnbs.sql + ``` + + - tar包备份chnbs数据库 + + ``` + gs_dump -p 15400 chnbs -F t -f home/omm/chnbs.tar + ``` + + - 文件夹备份chnbs数据库 + + ``` + gs_dump -p 15400 chnbs -F d -f home/omm/chnbs + ``` + + - sql文本导出chnbs数据库下名为salene的schema + + ``` + gs_dump -p 15400 chnbs -n salene -f home/omm/chnbs_salene.sql + ``` + + - 二进制备份chnbs数据库下名为salene的schema + + ``` + gs_dump -p 15400 chnbs -n salene -F c -f home/omm/chnbs_salene.binary + ``` + + 综合备份出的文件,二进制备份压缩比最高,实验过程中最高达到1:13,tar包备份和sql文本备份无压缩。 + + ``` + [omm@wzsy01 ~]$ gs_dump -p 15400 chnbs -F c -f home/omm/chnbs.binary + gs_dump[port='15400'][chnbs][2021-07-27 15:40:26]: The total objects number is 1948. + gs_dump[port='15400'][chnbs][2021-07-27 15:40:27]: [100.00%] 1948 objects have been dumped. + gs_dump[port='15400'][chnbs][2021-07-27 15:40:51]: dump database chnbs successfully + gs_dump[port='15400'][chnbs][2021-07-27 15:40:51]: total time: 27629 ms + [omm@wzsy01 ~]$ ls -lh + -rw------- 1 omm dbgrp 72M Jul 27 15:40 chnbs.binary + [omm@wzsy01 ~]$ gs_dump -p 15400 chnbs -f home/omm/chnbs.sql + gs_dump[port='15400'][chnbs][2021-07-27 15:41:25]: The total objects number is 1948. + gs_dump[port='15400'][chnbs][2021-07-27 15:41:26]: [100.00%] 1948 objects have been dumped. + gs_dump[port='15400'][chnbs][2021-07-27 15:41:46]: dump database chnbs successfully + gs_dump[port='15400'][chnbs][2021-07-27 15:41:46]: total time: 24089 ms + [omm@wzsy01 ~]$ ls -lh + -rw------- 1 omm dbgrp 72M Jul 27 15:40 chnbs.binary + -rw------- 1 omm dbgrp 904M Jul 27 15:41 chnbs.sql + [omm@wzsy01 ~]$ gs_dump -p 15400 chnbs -F t -f home/omm/chnbs.tar + gs_dump[port='15400'][chnbs][2021-07-27 15:42:54]: The total objects number is 1948. + gs_dump[port='15400'][chnbs][2021-07-27 15:42:56]: [100.00%] 1948 objects have been dumped. + gs_dump[port='15400'][chnbs][2021-07-27 15:43:17]: dump database chnbs successfully + gs_dump[port='15400'][chnbs][2021-07-27 15:43:17]: total time: 25627 ms + [omm@wzsy01 ~]$ ls -lh + -rw------- 1 omm dbgrp 72M Jul 27 15:40 chnbs.binary + -rw------- 1 omm dbgrp 904M Jul 27 15:41 chnbs.sql + -rw------- 1 omm dbgrp 907M Jul 27 15:43 chnbs.tar + [omm@wzsy01 ~]$ gs_dump -p 15400 chnbs -F d -f home/omm/chnbs + gs_dump[port='15400'][chnbs][2021-07-27 15:44:12]: The total objects number is 1948. + gs_dump[port='15400'][chnbs][2021-07-27 15:44:13]: [100.00%] 1948 objects have been dumped. + gs_dump[port='15400'][chnbs][2021-07-27 15:44:34]: dump database chnbs successfully + gs_dump[port='15400'][chnbs][2021-07-27 15:44:34]: total time: 24484 ms + [omm@wzsy01 ~]$ ls -lh + drwx------ 2 omm dbgrp 12K Jul 27 15:44 chnbs + -rw------- 1 omm dbgrp 72M Jul 27 15:40 chnbs.binary + -rw------- 1 omm dbgrp 904M Jul 27 15:41 chnbs.sql + -rw------- 1 omm dbgrp 907M Jul 27 15:43 chnbs.tar + [omm@wzsy01 ~]$ du -sh chnbs + 92M chnbs + [omm@wzsy01 ~]$ cd chnbs/ + [omm@wzsy01 chnbs]$ ls + 7292.dat.gz 7324.dat.gz 7356.dat.gz 7388.dat.gz 7420.dat.gz 7452.dat.gz 7485.dat.gz 7517.dat.gz 7549.dat.gz 7581.dat.gz 7613.dat.gz 7645.dat.gz 7677.dat.gz 7710.dat.gz 7742.dat.gz + 7293.dat.gz 7325.dat.gz 7357.dat.gz 7389.dat.gz 7421.dat.gz 7453.dat.gz 7486.dat.gz 7518.dat.gz 7550.dat.gz 7582.dat.gz 7614.dat.gz 7646.dat.gz 7678.dat.gz 7711.dat.gz 7743.dat.gz + 7294.dat.gz 7326.dat.gz 7358.dat.gz 7390.dat.gz 7422.dat.gz 7454.dat.gz 7487.dat.gz 7519.dat.gz 7551.dat.gz 7583.dat.gz 7615.dat.gz 7647.dat.gz 7679.dat.gz 7712.dat.gz 7744.dat.gz + 7295.dat.gz 7327.dat.gz 7359.dat.gz 7391.dat.gz 7423.dat.gz 7455.dat.gz 7488.dat.gz 7520.dat.gz 7552.dat.gz 7584.dat.gz 7616.dat.gz 7648.dat.gz 7680.dat.gz 7713.dat.gz 7745.dat.gz + 7296.dat.gz 7328.dat.gz 7360.dat.gz 7392.dat.gz 7424.dat.gz 7456.dat.gz 7489.dat.gz 7521.dat.gz 7553.dat.gz 7585.dat.gz 7617.dat.gz 7649.dat.gz 7681.dat.gz 7714.dat.gz 7746.dat.gz + 7297.dat.gz 7329.dat.gz 7361.dat.gz 7393.dat.gz 7425.dat.gz 7457.dat.gz 7490.dat.gz 7522.dat.gz 7554.dat.gz 7586.dat.gz 7618.dat.gz 7650.dat.gz 7682.dat.gz 7715.dat.gz 7747.dat.gz + 7298.dat.gz 7330.dat.gz 7362.dat.gz 7394.dat.gz 7426.dat.gz 7458.dat.gz 7491.dat.gz 7523.dat.gz 7555.dat.gz 7587.dat.gz 7619.dat.gz 7651.dat.gz 7683.dat.gz 7716.dat.gz 7748.dat.gz + 7299.dat.gz 7331.dat.gz 7363.dat.gz 7395.dat.gz 7427.dat.gz 7459.dat.gz 7492.dat.gz 7524.dat.gz 7556.dat.gz 7588.dat.gz 7620.dat.gz 7652.dat.gz 7684.dat.gz 7717.dat.gz 7749.dat.gz + 7300.dat.gz 7332.dat.gz 7364.dat.gz 7396.dat.gz 7428.dat.gz 7460.dat.gz 7493.dat.gz 7525.dat.gz 7557.dat.gz 7589.dat.gz 7621.dat.gz 7653.dat.gz 7685.dat.gz 7718.dat.gz 7750.dat.gz + 7301.dat.gz 7333.dat.gz 7365.dat.gz 7397.dat.gz 7429.dat.gz 7462.dat.gz 7494.dat.gz 7526.dat.gz 7558.dat.gz 7590.dat.gz 7622.dat.gz 7654.dat.gz 7686.dat.gz 7719.dat.gz 7751.dat.gz + 7302.dat.gz 7334.dat.gz 7366.dat.gz 7398.dat.gz 7430.dat.gz 7463.dat.gz 7495.dat.gz 7527.dat.gz 7559.dat.gz 7591.dat.gz 7623.dat.gz 7655.dat.gz 7687.dat.gz 7720.dat.gz 7752.dat.gz + 7303.dat.gz 7335.dat.gz 7367.dat.gz 7399.dat.gz 7431.dat.gz 7464.dat.gz 7496.dat.gz 7528.dat.gz 7560.dat.gz 7592.dat.gz 7624.dat.gz 7656.dat.gz 7688.dat.gz 7721.dat.gz 7753.dat.gz + 7304.dat.gz 7336.dat.gz 7368.dat.gz 7400.dat.gz 7432.dat.gz 7465.dat.gz 7497.dat.gz 7529.dat.gz 7561.dat.gz 7593.dat.gz 7625.dat.gz 7657.dat.gz 7689.dat.gz 7722.dat.gz 7754.dat.gz + 7305.dat.gz 7337.dat.gz 7369.dat.gz 7401.dat.gz 7433.dat.gz 7466.dat.gz 7498.dat.gz 7530.dat.gz 7562.dat.gz 7594.dat.gz 7626.dat.gz 7658.dat.gz 7690.dat.gz 7723.dat.gz 7755.dat.gz + 7306.dat.gz 7338.dat.gz 7370.dat.gz 7402.dat.gz 7434.dat.gz 7467.dat.gz 7499.dat.gz 7531.dat.gz 7563.dat.gz 7595.dat.gz 7627.dat.gz 7659.dat.gz 7691.dat.gz 7724.dat.gz 7756.dat.gz + 7307.dat.gz 7339.dat.gz 7371.dat.gz 7403.dat.gz 7435.dat.gz 7468.dat.gz 7500.dat.gz 7532.dat.gz 7564.dat.gz 7596.dat.gz 7628.dat.gz 7660.dat.gz 7692.dat.gz 7725.dat.gz 7757.dat.gz + 7308.dat.gz 7340.dat.gz 7372.dat.gz 7404.dat.gz 7436.dat.gz 7469.dat.gz 7501.dat.gz 7533.dat.gz 7565.dat.gz 7597.dat.gz 7629.dat.gz 7661.dat.gz 7693.dat.gz 7726.dat.gz 7758.dat.gz + 7309.dat.gz 7341.dat.gz 7373.dat.gz 7405.dat.gz 7437.dat.gz 7470.dat.gz 7502.dat.gz 7534.dat.gz 7566.dat.gz 7598.dat.gz 7630.dat.gz 7662.dat.gz 7694.dat.gz 7727.dat.gz 7759.dat.gz + 7310.dat.gz 7342.dat.gz 7374.dat.gz 7406.dat.gz 7438.dat.gz 7471.dat.gz 7503.dat.gz 7535.dat.gz 7567.dat.gz 7599.dat.gz 7631.dat.gz 7663.dat.gz 7696.dat.gz 7728.dat.gz dir.lock + 7311.dat.gz 7343.dat.gz 7375.dat.gz 7407.dat.gz 7439.dat.gz 7472.dat.gz 7504.dat.gz 7536.dat.gz 7568.dat.gz 7600.dat.gz 7632.dat.gz 7664.dat.gz 7697.dat.gz 7729.dat.gz toc.dat + 7312.dat.gz 7344.dat.gz 7376.dat.gz 7408.dat.gz 7440.dat.gz 7473.dat.gz 7505.dat.gz 7537.dat.gz 7569.dat.gz 7601.dat.gz 7633.dat.gz 7665.dat.gz 7698.dat.gz 7730.dat.gz + 7313.dat.gz 7345.dat.gz 7377.dat.gz 7409.dat.gz 7441.dat.gz 7474.dat.gz 7506.dat.gz 7538.dat.gz 7570.dat.gz 7602.dat.gz 7634.dat.gz 7666.dat.gz 7699.dat.gz 7731.dat.gz + 7314.dat.gz 7346.dat.gz 7378.dat.gz 7410.dat.gz 7442.dat.gz 7475.dat.gz 7507.dat.gz 7539.dat.gz 7571.dat.gz 7603.dat.gz 7635.dat.gz 7667.dat.gz 7700.dat.gz 7732.dat.gz + 7315.dat.gz 7347.dat.gz 7379.dat.gz 7411.dat.gz 7443.dat.gz 7476.dat.gz 7508.dat.gz 7540.dat.gz 7572.dat.gz 7604.dat.gz 7636.dat.gz 7668.dat.gz 7701.dat.gz 7733.dat.gz + 7316.dat.gz 7348.dat.gz 7380.dat.gz 7412.dat.gz 7444.dat.gz 7477.dat.gz 7509.dat.gz 7541.dat.gz 7573.dat.gz 7605.dat.gz 7637.dat.gz 7669.dat.gz 7702.dat.gz 7734.dat.gz + 7317.dat.gz 7349.dat.gz 7381.dat.gz 7413.dat.gz 7445.dat.gz 7478.dat.gz 7510.dat.gz 7542.dat.gz 7574.dat.gz 7606.dat.gz 7638.dat.gz 7670.dat.gz 7703.dat.gz 7735.dat.gz + 7318.dat.gz 7350.dat.gz 7382.dat.gz 7414.dat.gz 7446.dat.gz 7479.dat.gz 7511.dat.gz 7543.dat.gz 7575.dat.gz 7607.dat.gz 7639.dat.gz 7671.dat.gz 7704.dat.gz 7736.dat.gz + 7319.dat.gz 7351.dat.gz 7383.dat.gz 7415.dat.gz 7447.dat.gz 7480.dat.gz 7512.dat.gz 7544.dat.gz 7576.dat.gz 7608.dat.gz 7640.dat.gz 7672.dat.gz 7705.dat.gz 7737.dat.gz + 7320.dat.gz 7352.dat.gz 7384.dat.gz 7416.dat.gz 7448.dat.gz 7481.dat.gz 7513.dat.gz 7545.dat.gz 7577.dat.gz 7609.dat.gz 7641.dat.gz 7673.dat.gz 7706.dat.gz 7738.dat.gz + 7321.dat.gz 7353.dat.gz 7385.dat.gz 7417.dat.gz 7449.dat.gz 7482.dat.gz 7514.dat.gz 7546.dat.gz 7578.dat.gz 7610.dat.gz 7642.dat.gz 7674.dat.gz 7707.dat.gz 7739.dat.gz + 7322.dat.gz 7354.dat.gz 7386.dat.gz 7418.dat.gz 7450.dat.gz 7483.dat.gz 7515.dat.gz 7547.dat.gz 7579.dat.gz 7611.dat.gz 7643.dat.gz 7675.dat.gz 7708.dat.gz 7740.dat.gz + 7323.dat.gz 7355.dat.gz 7387.dat.gz 7419.dat.gz 7451.dat.gz 7484.dat.gz 7516.dat.gz 7548.dat.gz 7580.dat.gz 7612.dat.gz 7644.dat.gz 7676.dat.gz 7709.dat.gz 7741.dat.gz + + + [omm@wzsy01 ~]$ gs_dump -p 15400 chnbs -n salene -f home/omm/chnbs_salene.sql + gs_dump[port='15400'][chnbs][2021-07-27 15:48:34]: The total objects number is 1158. + gs_dump[port='15400'][chnbs][2021-07-27 15:48:35]: [100.00%] 1158 objects have been dumped. + gs_dump[port='15400'][chnbs][2021-07-27 15:48:45]: dump database chnbs successfully + gs_dump[port='15400'][chnbs][2021-07-27 15:48:45]: total time: 12182 ms + [omm@wzsy01 ~]$ gs_dump -p 15400 chnbs -n salene -F c -f home/omm/chnbs_salene.binary + gs_dump[port='15400'][chnbs][2021-07-27 15:49:05]: The total objects number is 1158. + gs_dump[port='15400'][chnbs][2021-07-27 15:49:05]: [100.00%] 1158 objects have been dumped. + gs_dump[port='15400'][chnbs][2021-07-27 15:49:15]: dump database chnbs successfully + gs_dump[port='15400'][chnbs][2021-07-27 15:49:15]: total time: 12375 ms + [omm@wzsy01 ~]$ ls -l chnbs* + -rw------- 1 omm dbgrp 74573357 Jul 27 15:40 chnbs.binary + -rw------- 1 omm dbgrp 34597228 Jul 27 15:49 chnbs_salene.binary + -rw------- 1 omm dbgrp 456398816 Jul 27 15:48 chnbs_salene.sql + -rw------- 1 omm dbgrp 947593784 Jul 27 15:41 chnbs.sql + -rw------- 1 omm dbgrp 950491648 Jul 27 15:43 chnbs.tar + ``` + + +gs\_dump其他详细参数请参看官方文档:https://opengauss.org/zh/docs/1.1.0/docs/Toolreference/gs\_dump.html + +## gs\_restore + +1. 背景信息 + + gs\_restore是openGauss提供的针对gs\_dump导出数据的导入工具。通过此工具可将由gs\_dump生成的导出文件进行导入。 + + gs\_restore工具由操作系统用户omm执行。 + + 主要功能包含: + + - 导入到数据库 + + 如果连接参数中指定了数据库,则数据将被导入到指定的数据库中。其中,并行导入必须指定连接的密码。 + + - 导入到脚本文件 + + 如果未指定导入数据库,则创建包含重建数据库所必须的SQL语句脚本并写入到文件或者标准输出。等效于直接使用gs\_dump导出为纯文本格式。 + +2. 实验过程 + +- 删除schema + + ``` + drop schema salene cascade; + + [omm@wzsy01 ~]$ gsql -p 15400 -d chnbs -r -U deity -W Deityl--- + gsql ((openGauss 2.0.1 build d97c0e8a) compiled at 2021-06-02 19:37:17 commit 0 last mr ) + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + + + chnbs=> show search_path; + search_path + ---------------- + "$user",public + (1 row) + + + chnbs=> set search_path='salene'; + SET + chnbs=> show search_path; + search_path + ------------- + salene + (1 row) + + + chnbs=> \dn + List of schemas + Name | Owner + --------+------- + jack | deity + salene | deity + (2 rows) + + + chnbs=> \d + List of relations + Schema | Name | Type | Owner | Storage + --------+--------------------------------+----------+-------+---------------------------------- + salene | bank_balance_loan | table | deity | {orientation=row,compression=no} + salene | bi_authentication | table | deity | {orientation=row,compression=no} + salene | bi_bank_product | table | deity | {orientation=row,compression=no} + salene | bi_bank_rate | table | deity | {orientation=row,compression=no} + salene | bi_compatible | table | deity | {orientation=row,compression=no} + ...... + ...... + ...... + salene | bi_credit_feedback | table | deity | {orientation=row,compression=no} + salene | bi_customer | table | deity | {orientation=row,compression=no} + salene | bi_disburse_detail | table | deity | {orientation=row,compression=no} + salene | fahai_sifa_info | table | deity | {orientation=row,compression=no} + chnbs=> \dn + List of schemas + Name | Owner + --------+------- + jack | deity + salene | deity + (2 rows) + + + chnbs=> drop schema salene cascade; + NOTICE: drop cascades to 236 other objects + DETAIL: drop cascades to function p_zcfzblrb_tb() + drop cascades to table bank_balance_loan + drop cascades to table bi_authentication + drop cascades to table bi_bank_product + drop cascades to table bi_bank_rate + drop cascades to table bi_compatible + drop cascades to table bi_credit_feedback + drop cascades to table bi_customer + drop cascades to table bi_disburse_detail + drop cascades to table bi_finance_transaction + drop cascades to table bi_flow + drop cascades to table bi_flow_define + drop cascades to table bi_flow_node + drop cascades to table bi_monitor_history_record + drop cascades to table bi_monitor_record + drop cascades to table bi_mutual_excls + drop cascades to table bi_order + drop cascades to table bi_order_audit + drop cascades to table bi_order_collateral + drop cascades to table bi_order_collateral_owner + drop cascades to table bi_order_push + drop cascades to table bi_order_veritify + drop cascades to table bi_orglist + drop cascades to table bi_pay_repay + drop cascades to table bi_pre_credit + drop cascades to table bi_reason_rule + drop cascades to table bi_reconl_record + drop cascades to table bi_schedule_job + drop cascades to table bi_schedule_record + drop cascades to table bi_tax_organization + drop cascades to table bi_transaction_flow + drop cascades to table bi_transaction_node_detail + ...... + ...... + ...... + drop cascades to table hsj_lawsuit_detail_bgt + drop cascades to table hsj_lawsuit_detail_cpws + and 136 other objects (see server log for list) + DROP SCHEMA + chnbs=> \dn + List of schemas + Name | Owner + ------+------- + jack | deity + (1 row) + + + chnbs=> select * from pg_tables where schemaname='salene'; + schemaname | tablename | tableowner | tablespace | hasindexes | hasrules | hastriggers | tablecreator | created | last_ddl_time + ------------+-----------+------------+------------+------------+----------+-------------+--------------+---------+--------------- + (0 rows) + ``` + + +- sql文本导入恢复salene的schema + + ``` + \i home/omm/chnbs_salene.sql + chnbs=> \i home/omm/chnbs_salene.sql + SET + SET + SET + SET + SET + SET + CREATE SCHEMA + ALTER SCHEMA + SET + CREATE PROCEDURE + ALTER FUNCTION + SET + SET + CREATE TABLE + ...... + ...... + ...... + COMMENT + COMMENT + COMMENT + setval + -------- + 51 + (1 row) + ALTER TABLE + ALTER TABLE + ALTER TABLE + CREATE INDEX + CREATE INDEX + CREATE INDEX + CREATE INDEX + CREATE INDEX + REVOKE + REVOKE + GRANT + GRANT + chnbs=> \dn + List of schemas + Name | Owner + --------+------- + jack | deity + salene | deity + (2 rows) + + + chnbs=> select * from pg_tables where schemaname='salene'; + schemaname | tablename | tableowner | tablespace | hasindexes | hasrules | hastriggers | tablecreator | created | last_ddl_time + ------------+--------------------------------+------------+------------+------------+----------+-------------+--------------+-------------------------------+------------------------------- + salene | bank_balance_loan | deity | | f | f | f | deity | 2021-07-27 16:01:11.467086+08 | 2021-07-27 16:01:11.523697+08 + salene | bi_authentication | deity | | f | f | f | deity | 2021-07-27 16:01:11.524193+08 | 2021-07-27 16:01:11.536608+08 + ...... + ...... + ...... + salene | er_basic | deity | | f | f | f | deity | 2021-07-27 16:01:12.24948+08 | 2021-07-27 16:01:12.254199+08 + ``` + +- gs\_restore从gs\_dump备份出的二进制文件恢复名为salene的schema + + ``` + gs_restore -p 15400 -d chnbs -n salene -F c /home/omm/chnbs_salene.binary + chnbs=> \dn + List of schemas + Name | Owner + --------+------- + jack | deity + salene | deity + (2 rows) + + + chnbs=> drop schema salene cascade; + DROP SCHEMA + chnbs=> \dn + List of schemas + Name | Owner + ------+------- + jack | deity + (1 row) + + + chnbs=> select * from pg_tables where schemaname='salene'; + schemaname | tablename | tableowner | tablespace | hasindexes | hasrules | hastriggers | tablecreator | created | last_ddl_time + ------------+-----------+------------+------------+------------+----------+-------------+--------------+---------+--------------- + (0 rows) + + + chnbs=> \q + [omm@wzsy01 ~]$ gs_restore -p 15400 -d chnbs -n salene -F c /home/omm/chnbs_salene.binary + start restore operation ... + 100 SQL statements read in ! + 200 SQL statements read in ! + 300 SQL statements read in ! + 400 SQL statements read in ! + 500 SQL statements read in ! + 600 SQL statements read in ! + 700 SQL statements read in ! + 800 SQL statements read in ! + 900 SQL statements read in ! + 1000 SQL statements read in ! + 1100 SQL statements read in ! + 1200 SQL statements read in ! + 1300 SQL statements read in ! + 1400 SQL statements read in ! + 1500 SQL statements read in ! + 1600 SQL statements read in ! + 1700 SQL statements read in ! + 1800 SQL statements read in ! + 1900 SQL statements read in ! + 2000 SQL statements read in ! + 2100 SQL statements read in ! + 2200 SQL statements read in ! + 2300 SQL statements read in ! + 2400 SQL statements read in ! + 2500 SQL statements read in ! + 2600 SQL statements read in ! + 2700 SQL statements read in ! + 2800 SQL statements read in ! + 2900 SQL statements read in ! + 3000 SQL statements read in ! + 3100 SQL statements read in ! + 3200 SQL statements read in ! + 3300 SQL statements read in ! + 3400 SQL statements read in ! + 3500 SQL statements read in ! + 3600 SQL statements read in ! + table bank_balance_loan complete data imported ! + table bi_authentication complete data imported ! + table bi_bank_product complete data imported ! + table bi_bank_rate complete data imported ! + ...... + ...... + ...... + table zx_nsrjcxx complete data imported ! + table zx_sbxx complete data imported ! + table zx_sbzsxx complete data imported ! + table zx_tzfxx complete data imported ! + table zx_wfwzxx complete data imported ! + table zx_ybnsr complete data imported ! + table zx_zcfzbxx complete data imported ! + 3900 SQL statements read in ! + Finish reading 3925 SQL statements! + end restore operation ... + restore operation successful + total time: 26888 ms + [omm@wzsy01 ~]$ gsql -p 15400 -d chnbs -r -U deity -W Deitylee1983 + gsql ((openGauss 2.0.1 build d97c0e8a) compiled at 2021-06-02 19:37:17 commit 0 last mr ) + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + + + chnbs=> \dn + List of schemas + Name | Owner + --------+------- + jack | deity + salene | deity + (2 rows) + + + chnbs=> select count(*) from pg_tables where schemaname='salene'; + count + ------- + 233 + ``` + +- gs\_restore从gs\_dump备份出的目录中恢复名为salene的schema + + ``` + gs_restore -p 15400 -d chnbs -n salene -F d /home/omm/chnbs + chnbs=> drop schema salene cascade; + NOTICE: drop cascades to 236 other objects + DETAIL: drop cascades to function salene.p_zcfzblrb_tb() + drop cascades to table salene.bank_balance_loan + drop cascades to table salene.bi_authentication + ...... + ...... + ...... + drop cascades to table salene.hsj_lawsuit_detail_bgt + drop cascades to table salene.hsj_lawsuit_detail_cpws + and 136 other objects (see server log for list) + DROP SCHEMA + chnbs=> select count(*) from pg_tables where schemaname='salene'; + count + ------- + 0 + (1 row) + + + chnbs=> \q + [omm@wzsy01 ~]$ gs_restore -p 15400 -d chnbs -n salene -F d /home/omm/chnbs + start restore operation ... + 100 SQL statements read in ! + ...... + ...... + ...... + 7300 SQL statements read in ! + 7400 SQL statements read in ! + table bank_balance_loan complete data imported ! + table bi_authentication complete data imported ! + 7500 SQL statements read in ! + table bi_bank_product complete data imported ! + table bi_bank_rate complete data imported ! + table bi_compatible complete data imported ! + ...... + ...... + ...... + table zx_ybnsr complete data imported ! + table zx_zcfzbxx complete data imported ! + 7800 SQL statements read in ! + Finish reading 7852 SQL statements! + end restore operation ... + restore operation successful + total time: 26309 ms + [omm@wzsy01 ~]$ gsql -p 15400 -d chnbs -r -U deity -W Deityl--- + gsql ((openGauss 2.0.1 build d97c0e8a) compiled at 2021-06-02 19:37:17 commit 0 last mr ) + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + + + chnbs=> \dn + List of schemas + Name | Owner + --------+------- + jack | deity + salene | deity + (2 rows) + + + chnbs=> select count(*) from pg_tables where schemaname='salene'; + count + ------- + 233 + ``` + +- gs\_restore从gs\_dump备份出的tar文件恢复名为salene的schema + + ``` + gs_restore -p 15400 -d chnbs -n salene -F t /home/omm/chnbs.tar + chnbs=> drop schema salene cascade; + NOTICE: drop cascades to 236 other objects + DETAIL: drop cascades to function salene.p_zcfzblrb_tb() + drop cascades to table salene.bank_balance_loan + ...... + ...... + ...... + drop cascades to table salene.hsj_lawsuit_detail_cpws + and 136 other objects (see server log for list) + DROP SCHEMA + chnbs=> \dn + List of schemas + Name | Owner + ------+------- + jack | deity + (1 row) + + + chnbs=> \q + [omm@wzsy01 ~]$ gs_restore -p 15400 -d chnbs -n salene -F t /home/omm/chnbs.tar + start restore operation ... + 100 SQL statements read in ! + ...... + ...... + ...... + 7400 SQL statements read in ! + table bank_balance_loan complete data imported ! + table bi_authentication complete data imported ! + 7500 SQL statements read in ! + table bi_bank_product complete data imported ! + ...... + ...... + ...... + table zx_zcfzbxx complete data imported ! + 7800 SQL statements read in ! + Finish reading 7852 SQL statements! + end restore operation ... + restore operation successful + total time: 27971 ms + [omm@wzsy01 ~]$ gsql -p 15400 -d chnbs -r -U deity -W Deityle--- + gsql ((openGauss 2.0.1 build d97c0e8a) compiled at 2021-06-02 19:37:17 commit 0 last mr ) + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + + + chnbs=> \dn + List of schemas + Name | Owner + --------+------- + jack | deity + salene | deity + (2 rows) + + + chnbs=> select count(*) from pg_tables where schemaname='salene'; + count + ------- + 233 + ``` + +- gs\_restore从gs\_dump备份出的tar文件恢复salene中的某一张表 + + ``` + gs_restore -p 15400 -d chnbs -n salene -t zx_zcfzbxx -F t /home/omm/chnbs.tar + chnbs=> set search_path='salene'; + SET + + + chnbs=> select count(*) from zx_zcfzbxx; + count + --------- + 1804672 + (1 row) + + + chnbs=> drop table zx_zcfzbxx; + DROP TABLE + chnbs=> select count(*) from zx_zcfzbxx; + ERROR: relation "zx_zcfzbxx" does not exist on dn_6001_6002 + LINE 1: select count(*) from zx_zcfzbxx; + ^ + chnbs=> \q + [omm@wzsy01 ~]$ gs_restore -p 15400 -d chnbs -n salene -t zx_zcfzbxx -F t /home/omm/chnbs.tar + start restore operation ... + 100 SQL statements read in ! + 200 SQL statements read in ! + 300 SQL statements read in ! + 400 SQL statements read in ! + 500 SQL statements read in ! + 600 SQL statements read in ! + 700 SQL statements read in ! + 800 SQL statements read in ! + 900 SQL statements read in ! + 1000 SQL statements read in ! + 1100 SQL statements read in ! + 1200 SQL statements read in ! + 1300 SQL statements read in ! + 1400 SQL statements read in ! + 1500 SQL statements read in ! + 1600 SQL statements read in ! + 1700 SQL statements read in ! + 1800 SQL statements read in ! + 1900 SQL statements read in ! + 2000 SQL statements read in ! + 2100 SQL statements read in ! + 2200 SQL statements read in ! + 2300 SQL statements read in ! + 2400 SQL statements read in ! + 2500 SQL statements read in ! + 2600 SQL statements read in ! + 2700 SQL statements read in ! + 2800 SQL statements read in ! + 2900 SQL statements read in ! + 3000 SQL statements read in ! + 3100 SQL statements read in ! + 3200 SQL statements read in ! + 3300 SQL statements read in ! + 3400 SQL statements read in ! + 3500 SQL statements read in ! + 3600 SQL statements read in ! + 3700 SQL statements read in ! + 3800 SQL statements read in ! + 3900 SQL statements read in ! + 4000 SQL statements read in ! + 4100 SQL statements read in ! + 4200 SQL statements read in ! + 4300 SQL statements read in ! + 4400 SQL statements read in ! + 4500 SQL statements read in ! + 4600 SQL statements read in ! + 4700 SQL statements read in ! + 4800 SQL statements read in ! + 4900 SQL statements read in ! + 5000 SQL statements read in ! + 5100 SQL statements read in ! + 5200 SQL statements read in ! + 5300 SQL statements read in ! + 5400 SQL statements read in ! + 5500 SQL statements read in ! + 5600 SQL statements read in ! + 5700 SQL statements read in ! + 5800 SQL statements read in ! + 5900 SQL statements read in ! + 6000 SQL statements read in ! + 6100 SQL statements read in ! + 6200 SQL statements read in ! + 6300 SQL statements read in ! + 6400 SQL statements read in ! + 6500 SQL statements read in ! + 6600 SQL statements read in ! + 6700 SQL statements read in ! + 6800 SQL statements read in ! + 6900 SQL statements read in ! + 7000 SQL statements read in ! + 7100 SQL statements read in ! + 7200 SQL statements read in ! + 7300 SQL statements read in ! + 7400 SQL statements read in ! + 7500 SQL statements read in ! + 7600 SQL statements read in ! + 7700 SQL statements read in ! + table zx_zcfzbxx complete data imported ! + 7800 SQL statements read in ! + Finish reading 7852 SQL statements! + end restore operation ... + restore operation successful + total time: 14488 ms + [omm@wzsy01 ~]$ gsql -p 15400 -d chnbs -r -U deity -W Deityle--- + gsql ((openGauss 2.0.1 build d97c0e8a) compiled at 2021-06-02 19:37:17 commit 0 last mr ) + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + + + chnbs=> select count(*) from salene.zx_zcfzbxx; + count + --------- + 1804672 + (1 row) + ``` + + +gs\_restore详细参数解释请参考官方文档:https://opengauss.org/zh/docs/1.1.0/docs/Toolreference/gs\_restore.html + diff --git a/content/zh/post/July/public_sys-resources/icon-caution.gif b/content/zh/post/July/public_sys-resources/icon-caution.gif new file mode 100644 index 0000000000000000000000000000000000000000..6e90d7cfc2193e39e10bb58c38d01a23f045d571 Binary files /dev/null and b/content/zh/post/July/public_sys-resources/icon-caution.gif differ diff --git a/content/zh/post/July/public_sys-resources/icon-danger.gif b/content/zh/post/July/public_sys-resources/icon-danger.gif new file mode 100644 index 0000000000000000000000000000000000000000..6e90d7cfc2193e39e10bb58c38d01a23f045d571 Binary files /dev/null and b/content/zh/post/July/public_sys-resources/icon-danger.gif differ diff --git a/content/zh/post/July/public_sys-resources/icon-note.gif b/content/zh/post/July/public_sys-resources/icon-note.gif new file mode 100644 index 0000000000000000000000000000000000000000..6314297e45c1de184204098efd4814d6dc8b1cda Binary files /dev/null and b/content/zh/post/July/public_sys-resources/icon-note.gif differ diff --git a/content/zh/post/July/public_sys-resources/icon-notice.gif b/content/zh/post/July/public_sys-resources/icon-notice.gif new file mode 100644 index 0000000000000000000000000000000000000000..86024f61b691400bea99e5b1f506d9d9aef36e27 Binary files /dev/null and b/content/zh/post/July/public_sys-resources/icon-notice.gif differ diff --git a/content/zh/post/July/public_sys-resources/icon-tip.gif b/content/zh/post/July/public_sys-resources/icon-tip.gif new file mode 100644 index 0000000000000000000000000000000000000000..93aa72053b510e456b149f36a0972703ea9999b7 Binary files /dev/null and b/content/zh/post/July/public_sys-resources/icon-tip.gif differ diff --git a/content/zh/post/July/public_sys-resources/icon-warning.gif b/content/zh/post/July/public_sys-resources/icon-warning.gif new file mode 100644 index 0000000000000000000000000000000000000000..6e90d7cfc2193e39e10bb58c38d01a23f045d571 Binary files /dev/null and b/content/zh/post/July/public_sys-resources/icon-warning.gif differ diff --git a/content/zh/post/July/title/img1.png b/content/zh/post/July/title/img1.png new file mode 100644 index 0000000000000000000000000000000000000000..2af578504062e5fa7a7aaf7e1c2014531e51e9c2 Binary files /dev/null and b/content/zh/post/July/title/img1.png differ diff --git a/content/zh/post/July/title/img10.png b/content/zh/post/July/title/img10.png new file mode 100644 index 0000000000000000000000000000000000000000..ce35c3cd313c8e4ed939ae18b91b9a64767ab504 Binary files /dev/null and b/content/zh/post/July/title/img10.png differ diff --git a/content/zh/post/July/title/img11.png b/content/zh/post/July/title/img11.png new file mode 100644 index 0000000000000000000000000000000000000000..7ebe22cb03c6ee1e735b29bce766c1e10d334f0c Binary files /dev/null and b/content/zh/post/July/title/img11.png differ diff --git a/content/zh/post/July/title/img12.png b/content/zh/post/July/title/img12.png new file mode 100644 index 0000000000000000000000000000000000000000..0ec8535146c6a1d5e0b78ee6c1a6b3a8ede1cdf3 Binary files /dev/null and b/content/zh/post/July/title/img12.png differ diff --git a/content/zh/post/July/title/img2.png b/content/zh/post/July/title/img2.png new file mode 100644 index 0000000000000000000000000000000000000000..5537c95b900978a3020269be7ec52ce914224844 Binary files /dev/null and b/content/zh/post/July/title/img2.png differ diff --git a/content/zh/post/July/title/img3.png b/content/zh/post/July/title/img3.png new file mode 100644 index 0000000000000000000000000000000000000000..b903c7f8d5a3ba8b66b2d6be883a4bac7230915e Binary files /dev/null and b/content/zh/post/July/title/img3.png differ diff --git a/content/zh/post/July/title/img4.png b/content/zh/post/July/title/img4.png new file mode 100644 index 0000000000000000000000000000000000000000..6b7b474933a31c6a20d0d1708e8909163293b4ad Binary files /dev/null and b/content/zh/post/July/title/img4.png differ diff --git a/content/zh/post/July/title/img5.png b/content/zh/post/July/title/img5.png new file mode 100644 index 0000000000000000000000000000000000000000..830c8bc490a1b830e759df1f04b453909a097406 Binary files /dev/null and b/content/zh/post/July/title/img5.png differ diff --git a/content/zh/post/July/title/img6.png b/content/zh/post/July/title/img6.png new file mode 100644 index 0000000000000000000000000000000000000000..b71bb7d740d0f375bbea6116ffde9175c0dbcacf Binary files /dev/null and b/content/zh/post/July/title/img6.png differ diff --git a/content/zh/post/July/title/img7.png b/content/zh/post/July/title/img7.png new file mode 100644 index 0000000000000000000000000000000000000000..830c8bc490a1b830e759df1f04b453909a097406 Binary files /dev/null and b/content/zh/post/July/title/img7.png differ diff --git a/content/zh/post/July/title/img8.png b/content/zh/post/July/title/img8.png new file mode 100644 index 0000000000000000000000000000000000000000..31e776c19ddc9b62b4b88171d015b1b94ff2b022 Binary files /dev/null and b/content/zh/post/July/title/img8.png differ diff --git a/content/zh/post/July/title/img9.png b/content/zh/post/July/title/img9.png new file mode 100644 index 0000000000000000000000000000000000000000..1da9e55bd25cbc7cfc6fdef1800b4c95b077829b Binary files /dev/null and b/content/zh/post/July/title/img9.png differ diff --git "a/content/zh/post/July/\344\270\200\351\224\256\351\203\250\347\275\262openGauss2-0-1-CentOS-7-6.md" "b/content/zh/post/July/\344\270\200\351\224\256\351\203\250\347\275\262openGauss2-0-1-CentOS-7-6.md" new file mode 100644 index 0000000000000000000000000000000000000000..4bbf4255bbcae3eb14c3641371226ad0b183bfd3 --- /dev/null +++ "b/content/zh/post/July/\344\270\200\351\224\256\351\203\250\347\275\262openGauss2-0-1-CentOS-7-6.md" @@ -0,0 +1,252 @@ ++++ + +title = "一键部署openGauss2.0.1 CentOS 7.6" + +date = "2021-08-21" + +tags = [ "一键部署openGauss2.0.1 CentOS 7.6"] + +archives = "2021-08" + +author = "贾军锋" + +summary = "一键部署openGauss2.0.1 CentOS 7.6" + +img = "/zh/post/July/title/img5.png" + +times = "12:30" + ++++ + +# 一键部署openGauss2.0.1\[CentOS 7.6\] + +本文档目的是为了帮助高校学生提供基于CentOS7.6操作系统,实现openGauss数据库一键式安装的脚本。 + +该脚本执行成功后,所有关于openGauss数据库的文件将保存在/gaussdb目录中。 + +- 脚本内容中“192.168.0.99”请替换为服务器实际的私有IP地址。 +- 脚本内容中“192.168.0.99”请替换为服务器实际的私有IP地址。 +- 脚本内容中“192.168.0.99”请替换为服务器实际的私有IP地址。 + + ``` + #!/bin/bash + ## Author: 贾军锋 + ## Date: 2021-04-15 + ## OS: CentOS7.6 [最小硬件配置:2c/4G] + ## Database:openGauss 2.0.1 + ## Description:一键式实现操作系统环境配置、openGauss软件下载、openGauss软件安装等步骤,帮助大家提升安装openGauss数据库效率 + ## Tips: 请确保操作系统可以连接外网 + + ## 0.关闭virbr0网卡 [本地虚拟机标准化安装openEuler系统会默认存在virbr0网卡,删除该网卡以避免干扰数据库的安装] + ## virsh net-destroy default + ## virsh net-list + ## echo "Net device virbr0 is disabled." + + ## 1.定义主机信息[请根据实际情况修改] + export MY_HOSTNAME=node1 ## 主机名 + export MY_HOSTIP=192.168.0.99 ## IP地址 + export MY_SOFTWARE_DIRECTORY=/soft/openGauss ## 软件包所在目录 + export MY_XML=/soft/openGauss/clusterconfig.xml ## 集群配置文件XML + export openGauss_Download_url=https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.1/x86/openGauss-2.0.1-CentOS-64bit-all.tar.gz ## openGauss软件包下载地址 + + ## 1. 设置主机名并配置hosts文件 + hostnamectl set-hostname $MY_HOSTNAME + sed -i '/$MY_HOSTIP/d' /etc/hosts + echo "$MY_HOSTIP $MY_HOSTNAME #Gauss OM IP Hosts Mapping" >> /etc/hosts + cat /etc/hosts + echo "1.Configure /etc/hosts completed." + echo -e "\n" + + ## 2. 关闭防火墙 + systemctl disable firewalld.service + systemctl stop firewalld.service + echo "Firewalld " `systemctl status firewalld|grep Active` + echo "2.Disable firewalld service completed." + echo -e "\n" + + ## 3. 关闭SELinux + sed -i '/^SELINUX=/d' /etc/selinux/config + echo "SELINUX=disabled" >> /etc/selinux/config + cat /etc/selinux/config|grep "SELINUX=disabled" + echo "3.Disable SELINUX completed." + echo -e "\n" + + ## 4. 设置操作系统字符集编码 + echo "LANG=en_US.UTF-8" >> /etc/profile + source /etc/profile + echo $LANG + echo "4.Configure encoding completed." + echo -e "\n" + + ## 5. 设置操作系统时区 + rm -fr /etc/localtime + ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime + date -R + hwclock + echo "5.Configure Timezone completed." + echo -e "\n" + + ## 6. 关闭SWAP分区 [对于2G内存的设备,建议待安装完毕后再打开SWAP以间接 “扩容内存容量”] + sed -i '/swap/s/^/#/' /etc/fstab + swapoff -a + free -m + echo "6.Close swap partition completed." + echo -e "\n" + + ## 7. 配置SSH服务,关闭Banner,允许root远程登录 + sed -i '/Banner/s/^/#/' /etc/ssh/sshd_config + sed -i '/PermitRootLogin/s/^/#/' /etc/ssh/sshd_config + echo -e "\n" >> /etc/ssh/sshd_config + echo "Banner none " >> /etc/ssh/sshd_config + echo "PermitRootLogin yes" >> /etc/ssh/sshd_config + cat /etc/ssh/sshd_config |grep -v ^#|grep -E 'PermitRoot|Banner' + echo "7.Configure SSH Service completed." + echo -e "\n" + + ## 8. 配置YUM源、安装依赖包、修改默认Python3版本 + mkdir /etc/yum.repos.d/bak + mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/bak/ + wget -O /etc/yum.repos.d/CentOS-Base.repo https://repo.huaweicloud.com/repository/conf/CentOS-7-reg.repo + yum clean all + yum install -y bzip2 python3 + yum install -y libaio-devel flex bison ncurses-devel glibc-devel patch redhat-lsb-core readline-devel net-tools tar + mv /usr/bin/python /usr/bin/python2_bak + ln -s /usr/bin/python3 /usr/bin/python + python -V + echo "8.Configure Install Packages and change default Python version completed." + echo -e "\n" + + ## 9. 配置 sysctl.conf 和 performance.sh + cat >> /etc/sysctl.conf << EOF + net.ipv4.tcp_retries1 = 5 + net.ipv4.tcp_syn_retries = 5 + net.sctp.path_max_retrans = 10 + net.sctp.max_init_retransmits = 10 + EOF + sysctl -p + echo "9.Configure sysctl.conf and performance.sh completed." + echo -e "\n" + + ## 10. 配置资源限制 + echo "* soft stack 3072" >> /etc/security/limits.conf + echo "* hard stack 3072" >> /etc/security/limits.conf + echo "* soft nofile 1000000" >> /etc/security/limits.conf + echo "* hard nofile 1000000" >> /etc/security/limits.conf + echo "* soft nproc unlimited" >> /etc/security/limits.d/90-nproc.conf + tail -n 4 /etc/security/limits.conf + tail -n 1 /etc/security/limits.d/90-nproc.conf + echo "10.Configure resource limits completed." + echo -e "\n" + + ## 11. 关闭透明大页[Only for CentOS] + cat >>/etc/rc.d/rc.local< /sys/kernel/mm/transparent_hugepage/enabled + fi + if test -f /sys/kernel/mm/transparent_hugepage/defrag; then + echo never > /sys/kernel/mm/transparent_hugepage/defrag + fi + EOF + chmod +x /etc/rc.d/rc.local + /usr/bin/sh /etc/rc.d/rc.local + cat /sys/kernel/mm/transparent_hugepage/enabled + cat /sys/kernel/mm/transparent_hugepage/defrag + echo "11.Close transparent_hugepage completed." + echo -e "\n" + + ## 12. 禁用RemoveIPC[Only for openEuler] + ## sed -i '/^RemoveIPC/d' /etc/systemd/logind.conf + ## sed -i '/^RemoveIPC/d' /usr/lib/systemd/system/systemd-logind.service + ## echo "RemoveIPC=no" >> /etc/systemd/logind.conf + ## echo "RemoveIPC=no" >> /usr/lib/systemd/system/systemd-logind.service + ## systemctl daemon-reload + ## systemctl restart systemd-logind + ## loginctl show-session | grep RemoveIPC + ## systemctl show systemd-logind | grep RemoveIPC + ## echo "12.Disable RemoveIPC completed." + ## echo -e "\n" + + ## 13. 下载openGauss软件包 + mkdir -p $MY_SOFTWARE_DIRECTORY + cd $MY_SOFTWARE_DIRECTORY + wget $openGauss_Download_url + echo "13.openGauss software download completed." + echo -e "\n" + + ## 14. 配置XML文件 + rm -fr $MY_XML + cat >> $MY_XML < + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + EOF + cat $MY_XML + echo "14.Configure XML file completed." + echo -e "\n" + + ## 15. 解压安装包并修改目录权限 + echo "Begin to Uncompress openGauss Package and Modify directory permissions:" + cd $MY_SOFTWARE_DIRECTORY + tar -zxvf *all.tar.gz + tar -zxvf *om.tar.gz + ls -l + chmod -R 777 $MY_SOFTWARE_DIRECTORY/../ + echo "15.Uncompress openGauss Package completed." + echo -e "\n" + + ## 16. 执行 gs_preinstall + echo "Begin to execute openGauss preinstall:" + python $MY_SOFTWARE_DIRECTORY/script/gs_preinstall -U omm -G dbgrp -X $MY_XML + echo "16.openGauss preinstall completed." + echo -e "\n" + + ## 17. 检查预安装环境 + echo "Begin to Check OS environment:" + $MY_SOFTWARE_DIRECTORY/script/gs_checkos -i A -h $MY_HOSTNAME --detail + + ## 18. 执行 gs_install + echo "Begin to execute openGauss install:" + touch /home/omm/install_db + cat >> /home/omm/install_db < + + + +卸载openGauss的过程分为两步,包含卸载openGauss数据库和对openGauss服务器的环境做清理。 + +## 执行卸载openGauss数据库 + +openGauss提供了卸载脚本帮助用户完整的卸载openGauss数据库。 + +## 操作步骤 + +1. 确认主节点 + + ``` + [omm@db02 ~]$ gs_om -t status --detail + [ CMServer State ] + + node node_ip instance state + ---------------------------------------------------------------------------- + AZ1 1 db01 192.168.0.43 1 /opt/huawei/data/cmserver/cm_server Primary + AZ1 2 db02 192.168.0.22 2 /opt/huawei/data/cmserver/cm_server Standby + AZ1 3 db03 192.168.0.242 3 /opt/huawei/data/cmserver/cm_server Standby + ``` + +2. 以操作系统用户omm登录数据库主节点db01 192.168.0.43。 + + ``` + [root@db01 ~]# su - omm + Last login: Mon Oct 25 14:24:27 CST 2021 on pts/0 + [omm@db01 ~]$ ip a | grep 43 + inet 192.168.0.43/24 brd 192.168.0.255 scope global dynamic noprefixroute eth0 + ``` + +3. 使用gs\_uninstall卸载openGauss。 + + ``` + [omm@db01 ~]$ gs_uninstall --delete-data + Checking uninstallation. + Successfully checked uninstallation. + Stopping the cluster. + Successfully stopped the cluster. + Successfully deleted instances. + Uninstalling application. + Successfully uninstalled application. + Uninstallation succeeded. + Successfully uninstall cluster, for more message please see /home/omm/gs_uninstall.log + ``` + +4. 错误排查 + + 如果卸载失败请根据“$GAUSSLOG/om/gs\_uninstall-YYYY-MM-DD\_HHMMSS.log”中的日志信息排查错误。 + + +## 服务器环境清理 + +在openGauss卸载完成后,如果不需要在环境上重新部署openGauss,可以运行脚本gs\_postuninstall对openGauss服务器上环境信息做清理。openGauss环境清理是对环境准备脚本gs\_preinstall所做设置的清理。 + +## 前提条件 + +- openGauss卸载执行成功。 +- root用户互信可用。 +- 只能使用root用户执行gs\_postuninstall命令。 + +## 操作步骤 + +1. 以root用户登录openGauss服务器。 +2. 查看root用户互信是否建立,如果root用户没有建立互信,需要手工建立root用户互信 + + ``` + # 检查互信 + [root@db01 ~]# ssh db02 date + Mon Oct 25 15:15:56 CST 2021 + [root@db01 ~]# ssh db03 date + Mon Oct 25 15:16:01 CST 2021 + [root@db01 ~]# ssh db01 date + Mon Oct 25 15:16:07 CST 2021 + ``` + +3. 进入script路径下。 + + ``` + [root@db01 ~]# cd /opt/software/GaussDB_Kernel/script/ + [root@db01 script]# ls + checkRunStatus.py gs_checkos gs_lcctl gs_resize HADR.py nodegroup_migrate.sh util + cmd_sender.py gs_checkperf gs_om gs_shrink impl py_pstree.py + CSVInfo.py gs_collector gs_postuninstall gs_ssh __init__.py stage_step + GaussRoach.py gs_expand gs_preinstall gs_sshexkey JsonToDbClustorInfo.py SyncDataToStby.py + gs_backup gs_hotpatch gspylib gs_uninstall killall uninstall_force.py + gs_check gs_install gs_replace gs_upgradectl local uploader.py + ``` + +4. 使用gs\_postuninstall进行清理。若为环境变量分离的模式安装的数据库需要source环境变量分离文件ENVFILE。 + + ``` + [root@db01 script]# ./gs_postuninstall -U omm -X /opt/software/GaussDB_Kernel/clusterconfig.xml --delete-user --delete-group + Parsing the configuration file. + Successfully parsed the configuration file. + Check log file path. + Successfully checked log file path. + Checking unpreinstallation. + Successfully checked unpreinstallation. + Deleting Cgroup. + Successfully deleted Cgroup. + Deleting the instance's directory. + Successfully deleted the instance's directory. + Start to delete the installation directory. + Successfully deleted the installation directory. + Deleting the temporary directory. + Successfully deleted the temporary directory. + Deleting remote OS user. + Successfully deleted remote OS user. + Deleting software packages and environmental variables of other nodes. + Successfully deleted software packages and environmental variables of other nodes. + Deleting logs of other nodes. + Successfully deleted logs of other nodes. + Deleting software packages and environmental variables of the local node. + Successfully deleted software packages and environmental variables of the local nodes. + Deleting local OS user. + Successfully deleted local OS user. + Deleting local node's logs. + Successfully deleted local node's logs. + Successfully cleaned environment. + ``` + + omm为运行openGauss的操作系统用户名,/opt/software/GaussDB\_Kernel/clusterconfig.xml为openGauss配置文件路径。 + + 若为环境变量分离的模式安装的数据库需删除之前source的环境变量分离的env参数。 + + unset MPPDB\\\_ENV\\\_SEPARATE\\\_PATH + +5. 删除openGauss数据库各节点root用户的互信 + + ``` + [root@db01 ~]# \rm -rf /root/.ssh + ``` + +6. 错误排查 + + 如果一键式环境清理失败请根据“$GAUSSLOG/om/gs\_postuninstall-YYYY-MM-DD\_HHMMSS.log”中的日志信息排查错误。 + + diff --git "a/content/zh/post/July/\345\246\202\344\275\225\345\234\250openGauss-2-1-0\344\270\255\344\275\277\347\224\250Job.md" "b/content/zh/post/July/\345\246\202\344\275\225\345\234\250openGauss-2-1-0\344\270\255\344\275\277\347\224\250Job.md" new file mode 100644 index 0000000000000000000000000000000000000000..2447000cc29fe5ada34da3fcdc0d2e58bcbc050c --- /dev/null +++ "b/content/zh/post/July/\345\246\202\344\275\225\345\234\250openGauss-2-1-0\344\270\255\344\275\277\347\224\250Job.md" @@ -0,0 +1,543 @@ ++++ + +title = "如何在openGauss 2.1.0中使用Job" + +date = "2021-10-31" + +tags = [ "如何在openGauss 2.1.0中使用Job"] + +archives = "2021-10" + +author = "刘旭" + +summary = "如何在openGauss 2.1.0中使用Job" + +img = "/zh/post/July/title/img11.png" + +times = "12:30" + ++++ + + + +# 如何在openGauss 2.1.0中使用Job + + + +## 如何在openGauss 2.1.0中使用Job + +Job类似unix中的crontab,有定时执行的功能,可以在指定的时间点或每天的某个时间点等自行执行任务。在各类系统使用运行过程中,经常会遇到需要定时完成的任务,比如定时更新数据,定时统计数据生成报表等等,这些工作都可以使用Job来完成。在openGauss 2.1.0中,提供了以下接口来实现管理Job: + +## 接口描述 + + + + + + + + + + + + + + + + + + + + + + + + + +

接口名称

+

描述

+

PKG_SERVICE.JOB_CANCEL

+

通过任务ID来删除定时任务。

+

PKG_SERVICE.JOB_FINISH

+

禁用或者启用定时任务。

+

PKG_SERVICE.JOB_SUBMIT

+

提交一个定时任务。作业号由系统自动生成或由用户指定。

+

PKG_SERVICE.JOB_UPDATE

+

修改定时任务的属性,包括任务内容、下次执行时间、执行间隔。

+

PKG_SERVICE.SUBMIT_ON_NODES

+

提交一个任务到所有节点,作业号由系统自动生成。

+

PKG_SERVICE.ISUBMIT_ON_NODES

+

提交一个任务到所有节点,作业号由用户指定

+
+ +## 接口定义和使用示例 + +- **PKG\_SERVICE.JOB\_CANCEL** + + 存储过程CANCEL删除指定的定时任务。 + + PKG\_SERVICE.JOB\_CANCEL函数原型为: + + PKG\_SERVICE.JOB\_CANCEL\( job IN INTEGER\); + + + + + + + + + + + + + + + + +

参数

+

类型

+

入参/出参

+

是否可以为空

+

描述

+

id

+

integer

+

IN

+

+

指定的作业号。

+
+ + 示例: + + ``` + CALL PKG_SERVICE.JOB_CANCEL(101); + ``` + + +- **PKG\_SERVICE.JOB\_FINISH** + + 存储过程FINISH禁用或者启用定时任务。 + + PKG\_SERVICE.JOB\_FINISH函数原型为: + + ``` + PKG_SERVICE.JOB_FINISH( id IN INTEGER, broken IN BOOLEAN, next_time IN TIMESTAMP DEFAULT sysdate); + ``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

参数

+

类型

+

入参/出参

+

是否可以为空

+

描述

+

id

+

integer

+

IN

+

+

指定的作业号。

+

broken

+

Boolean

+

IN

+

+

状态标志位,true代表禁用,false代表启用。根据true或false值更新当前job;如果为空值,则不改变原有job的状态。

+

next_time

+

timestamp

+

IN

+

+

下次运行时间,默认为当前系统时间。如果参数broken状态为true,则更新该参数为’4000-1-1’;如果参数broken状态为false,且如果参数next_time不为空值,则更新指定job的next_time值,如果next_time为空值,则不更新next_time值。该参数可以省略,为默认值。

+
+ +- **PKG\_SERVICE.JOB\_SUBMIT** + + 存储过程JOB\_SUBMIT提交一个系统提供的定时任务。 + + PKG\_SERVICE.JOB\_SUBMIT函数原型为: + + PKG\_SERVICE.JOB\_SUBMIT\( id IN BIGINT DEFAULT, content IN TEXT, next\_date IN TIMESTAMP DEFAULT sysdate, interval\_time IN TEXT DEFAULT ‘null’, job OUT INTEGER\); + + 当创建一个定时任务(JOB)时,系统默认将当前数据库和用户名与当前创建的定时任务绑定起来。该接口函数可以通过call或select调用,如果通过select调用,可以不填写出参。如果在存储过程中,则需要通过perform调用该接口函数。如果提交的sql语句任务使用到非public的schema,应该指定表或者函数的schema,或者在sql语句前添加set current\_schema = xxx;语句。 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

参数

+

类型

+

入参/出参

+

是否可以为空

+

描述

+

id

+

bigint

+

IN

+

+

作业号。如果传入id为NULL,则内部会生成作业ID。

+

context

+

text

+

IN

+

+

要执行的SQL语句。支持一个或多个‘DML’,‘匿名块’,‘调用存储过程的语句’或3种混合的场景。

+

next_time

+

timestamp

+

IN

+

+

下次作业运行时间。默认值为当前系统时间(sysdate)。如果是过去时间,在提交作业时表示立即执行。

+

interval_time

+

text

+

IN

+

+

用来计算下次作业运行时间的时间表达式,可以是interval表达式,也可以是sysdate加上一个numeric值(例如:sysdate+1.0/24)。如果为空值或字符串"null"表示只执行一次,执行后JOB状态STATUS变成’d’ 不再执行。

+

job

+

integer

+

OUT

+

+

作业号。范围为1~32767。当使用select调用pkg_service.job_submit时,该参数可以省略。

+
+ + 示例: + + ``` + SELECT PKG_SERVICE.JOB_SUBMIT(NULL, 'call pro_xxx();', to_date('20180101','yyyymmdd'),'sysdate+1'); SELECT PKG_SERVICE.JOB_SUBMIT(NULL, 'call pro_xxx();', to_date('20180101','yyyymmdd'),'sysdate+1.0/24'); CALL PKG_SERVICE.JOB_SUBMIT(NULL, 'INSERT INTO T_JOB VALUES(1); call pro_1(); call pro_2();', add_months(to_date('201701','yyyymm'),1), 'date_trunc(''day'',SYSDATE) + 1 +(8*60+30.0)/(24*60)' ,:jobid); SELECT PKG_SERVICE.JOB_SUBMIT (101, 'insert_msg_statistic1;', sysdate, 'sysdate+3.0/24'); + ``` + +- **PKG\_SERVICE.JOB\_UPDATE** + + 存储过程UPDATE修改定时任务的属性,包括任务内容、下次执行时间、执行间隔。 + + PKG\_SERVICE.JOB\_UPDATE函数原型为: + + PKG\_SERVICE.JOB\_UPDATE\( id IN BIGINT, next\_time IN TIMESTAMP, interval\_time IN TEXT, content IN TEXT\); + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

参数

+

类型

+

入参/出参

+

是否可以为空

+

描述

+

id

+

integer

+

IN

+

+

指定的作业号。

+

next_time

+

timestamp

+

IN

+

+

下次运行时间。如果该参数为空值,则不更新指定job的next_time值,否则更新指定job的next_time值。

+

interval_time

+

text

+

IN

+

+

用来计算下次作业运行时间的时间表达式。如果该参数为空值,则不更新指定job的interval_time值;如果该参数不为空值,会校验interval_time是否为有效的时间类型或interval类型,则更新指定job的interval_time值。如果为字符串"null"表示只执行一次,执行后JOB状态STATUS变成’d’ 不再执行。

+

content

+

text

+

IN

+

+

执行的存储过程名或者sql语句块。如果该参数为空值,则不更新指定job的content值,否则更新指定job的content值。

+
+ + 示例: + + ``` + CALL PKG_SERVICE.JOB_UPDATE(101, 'call userproc();', sysdate, 'sysdate + 1.0/1440'); CALL PKG_SERVICE.JOB_UPDATE(101, 'insert into tbl_a values(sysdate);', sysdate, 'sysdate + 1.0/1440'); + ``` + +- **PKG\_SERVICE.SUBMIT\_ON\_NODES** + + 存储过程SUBMIT\_ON\_NODES创建一个所有CN/DN上的定时任务,仅sysadmin有此权限。 + + PKG\_SERVICE.SUBMIT\_ON\_NODES函数原型为: + + ``` + PKG_SERVICE.SUBMIT_ON_NODES( node_name IN TEXT, database IN TEXT what IN TEXT, next_date IN TIMESTAMP DEFAULT sysdate, job_interval IN TEXT DEFAULT 'null', job OUT INTEGER); + ``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

参数

+

类型

+

入参/出参

+

是否可以为空

+

描述

+

node_name

+

text

+

IN

+

+

指定作业的执行节点,当前仅支持值为’ALL_NODE’(在所有节点执行)与’CCN’(在central coordinator执行)。

+

database

+

text

+

IN

+

+

集群作业所使用的database,节点类型为’ALL_NODE’时仅支持值为’postgres’。

+

what

+

text

+

IN

+

+

要执行的SQL语句。支持一个或多个‘DML’,‘匿名块’,‘调用存储过程的语句’或3种混合的场景。

+

nextdate

+

timestamp

+

IN

+

+

下次作业运行时间。默认值为当前系统时间(sysdate)。如果是过去时间,在提交作业时表示立即执行。

+

job_interval

+

text

+

IN

+

+

用来计算下次作业运行时间的时间表达式,可以是interval表达式,也可以是sysdate加上一个numeric值(例如:sysdate+1.0/24)。如果为空值或字符串"null"表示只执行一次,执行后JOB状态STATUS变成’d’不再执行。

+

job

+

integer

+

OUT

+

+

作业号。范围为1~32767。当使用select调用dbms.submit_on_nodes时,该参数可以省略。

+
+ + 示例: + + ``` + select pkg_service.submit_on_nodes('ALL_NODE', 'postgres', 'select capture_view_to_json(''dbe_perf.statement'', 0);', sysdate, 'interval ''60 second'''); select pkg_service.submit_on_nodes('CCN', 'postgres', 'select capture_view_to_json(''dbe_perf.statement'', 0);', sysdate, 'interval ''60 second'''); + ``` + +- **PKG\_SERVICE.ISUBMIT\_ON\_NODES** + + ISUBMIT\_ON\_NODES与SUBMIT\_ON\_NODES语法功能相同,但其第一个参数是入参,即指定的作业号,SUBMIT最后一个参数是出参,表示系统自动生成的作业号。仅sysadmin有此权限。 + + +## JOB的使用示例 + +- 创建测试表 + + ``` + gaussdb@postgres> create table t_job (value TIMESTAMP); + CREATE TABLE + + gaussdb@postgres> insert into t_job values(sysdate); + INSERT 0 1 + + gaussdb@postgres> select * from t_job; + +---------------------+| value ||---------------------| + | 2021-10-09 04:36:20 |+---------------------+ + SELECT 1 + ``` + +- 创建任务,每一分钟插入一条记录 + + ``` + gaussdb@postgres> select pkg_service.job_submit(null, 'insert into t_job values(sysdate);',sysdate,'sysdate + 1/1440'); + +--------------+| job_submit ||--------------|| 15566 |+--------------+ + SELECT 1 + ``` + +- 检查JOB运行结果 + + ``` + gaussdb@postgres> select * from t_job; + +---------------------+| value ||---------------------| + | 2021-10-09 04:36:20 || 2021-10-09 04:40:54 || 2021-10-09 04:41:54 || 2021-10-09 04:42:54 |+---------------------+ + SELECT 4 + ``` + + +- 从系统视图中检查JOB运行情况 + + ``` + gaussdb@postgres> select job_id,dbname,start_date,next_run_date,interval,failure_count from pg_job; + +----------+----------+----------------------------+---------------------+------------------+-----------------+ + | job_id | dbname | start_date | next_run_date | interval | failure_count | + |----------+----------+----------------------------+---------------------+------------------+-----------------| + | 15566 | postgres | 2021-10-09 04:40:54.072363 | 2021-10-09 04:56:54 | sysdate + 1/1440 | 0 | + +----------+----------+----------------------------+---------------------+------------------+-----------------+ + SELECT 1Time: 0.089sgaussdb@postgres> select * from pg_catalog.pg_job_proc pjp where job_id=15566; + +----------+------------------------------------+| + job_id | what | + |----------+------------------------------------| + | 15566 | insert into t_job values(sysdate); + |+----------+------------------------------------+ + SELECT 1Time: 0.089s + ``` + + +- 修改为2分钟执行一次 + + ``` + gaussdb@postgres> select pkg_service.job_update(15566,null,'sysdate + 2/1440',null); + +--------------+| job_update | + |--------------|| | + +--------------+SELECT 1 + ``` + + +- 检查修改情况和运行结果 + + ``` + [gaussdb@postgres> select job_id,interval from pg_job where job_id=15566; + +----------+------------------+| job_id | interval ||----------+------------------|| 15566 | sysdate + 2/1440 |+----------+------------------+ + SELECT 1]( select job_id,interval,next_run_date from pg_job where job_id=15566; + +----------+------------------+---------------------+| job_id | interval | next_run_date ||----------+------------------+---------------------|| 15566 | sysdate + 2/1440 | 2021-10-09 05:05:57 |+----------+------------------+---------------------+ + SELECT 1Time: 0.078s> + ``` + + +- 禁用和启用任务 + + 禁用和启用都是同样的函数pkg\_service.job\_finish,传入不同的参数表示是禁用还是启用。 + + ``` + gaussdb@postgres> select pkg_service.job_finish(15566,true,null); + +--------------+| job_finish ||--------------|| |+--------------+ + SELECT 1Time: 0.089sgaussdb@postgres> select job_id,next_run_date,job_status from pg_job where job_id=15566; + +----------+---------------------+--------------+| job_id | next_run_date | job_status ||----------+---------------------+--------------|| 15566 | 4000-01-01 00:00:00 | d |+----------+---------------------+--------------+ + SELECT 1Time: 0.075sgaussdb@postgres> select pkg_service.job_finish(15566,false,null);+--------------+| job_finish ||--------------|| |+--------------+SELECT 1Time: 0.091sgaussdb@postgres> select job_id,next_run_date,job_status from pg_job where job_id=15566;+----------+---------------------+--------------+| job_id | next_run_date | job_status ||----------+---------------------+--------------|| 15566 | 4000-01-01 00:00:00 | s |+----------+---------------------+--------------+ + SELECT 1Time: 0.080s + ``` + + 可以看到如果重新启用任务的时候,没有指定下次运行时间,那么下次运行时间会始终保持在4000年,意味着仍然不会启动,所以如果禁用任务之后再重新启动,需要手动显式指定下次运行时间。 + + ``` + gaussdb@postgres> select pkg_service.job_finish(15566,false,sysdate);+--------------+| job_finish ||--------------|| |+--------------+SELECT 1Time: 0.088sgaussdb@postgres> select job_id,next_run_date,job_status from pg_job where job_id=15566;+----------+---------------------+--------------+| job_id | next_run_date | job_status ||----------+---------------------+--------------|| 15566 | 2021-10-09 05:16:22 | s |+----------+---------------------+--------------+SELECT 1Time: 0.086s + ``` + +- 删除任务 + + ``` + gaussdb@postgres> select pkg_service.job_cancel(15566);+--------------+| job_cancel ||--------------|| |+--------------+SELECT 1Time: 0.082sgaussdb@postgres> select job_id,next_run_date,job_status from pg_job where job_id=15566;+----------+-----------------+--------------+| job_id | next_run_date | job_status ||----------+-----------------+--------------|+----------+-----------------+--------------+SELECT 0Time: 0.086sgaussdb@postgres> select * from pg_catalog.pg_job_proc pjp where job_id=15566;+----------+--------+| job_id | what ||----------+--------|+----------+--------+SELECT 0Time: 0.087s + opengauss + ``` + + diff --git "a/content/zh/post/July/\345\276\252\345\272\217\346\270\220\350\277\233-openGauss-\345\210\235\345\247\213\345\214\226\345\217\202\346\225\260\347\232\204\350\256\276\347\275\256-\346\237\245\350\257\242\345\222\214\344\277\256\346\224\271.md" "b/content/zh/post/July/\345\276\252\345\272\217\346\270\220\350\277\233-openGauss-\345\210\235\345\247\213\345\214\226\345\217\202\346\225\260\347\232\204\350\256\276\347\275\256-\346\237\245\350\257\242\345\222\214\344\277\256\346\224\271.md" new file mode 100644 index 0000000000000000000000000000000000000000..f9052f2bca146d431983cd170dc561854e5762fe --- /dev/null +++ "b/content/zh/post/July/\345\276\252\345\272\217\346\270\220\350\277\233-openGauss-\345\210\235\345\247\213\345\214\226\345\217\202\346\225\260\347\232\204\350\256\276\347\275\256-\346\237\245\350\257\242\345\222\214\344\277\256\346\224\271.md" @@ -0,0 +1,206 @@ ++++ + +title = "循序渐进 openGauss :初始化参数的设置、查询和修改" + +date = "2021-12-09" + +tags = [ "循序渐进 openGauss :初始化参数的设置、查询和修改"] + +archives = "2021-12" + +author = "eygle" + +summary = "循序渐进 openGauss :初始化参数的设置、查询和修改" + +img = "/zh/post/July/title/img12.png" + +times = "12:30" + ++++ + +# 循序渐进 openGauss :初始化参数的设置、查询和修改 + +由于 openGauss 最早基于 PostgreSQL 创立,所以大多数接口完全兼容。在我们描述的文章中,以 openGauss 为入口,但是很多功能性实现是和 Pg 相一致的。 + +在 openGauss 中,可配置参数 被称为 GUC - Grand Unified Configuration,通常数据库安装后,会自动生成三个配置文件(postgresql.conf、pg\_hba.conf和pg\_ident.conf),并统一存放在数据目录(data)下。 + +对于 openGauss 来说,参数同样可以通过 pg\_setttings 视图访问,在 openGauss 2.0 中,初始提供了 601 个参数: + +``` +omm=# select * from version(); + version + +---------------------------------------------------------------------------------------------------------------------------------------------- +PostgreSQL 9.2.4 (openGauss 2.0.0 build 78689da9) compiled at 2021-03-31 21:03:52 commit 0 last mr on aarch64-unknown-linux-gnu, compiled b +y g++ (GCC) 7.3.0, 64-bit +(1 row) + +omm=# select count(*) from pg_settings; + count +------- + 601 +(1 row) +``` + +展示具体参数值,可以通过 show 命令来实现: + +``` +omm=# show config_file; + config_file +----------------------------------------- + /var/lib/opengauss/data/postgresql.conf +(1 row) + +omm=# show hba_file; + hba_file +------------------------------------- + /var/lib/opengauss/data/pg_hba.conf +(1 row) + +omm=# show ident_file; + ident_file +--------------------------------------- + /var/lib/opengauss/data/pg_ident.conf +(1 row) +``` + +通过单记录方式显示参数值: + +``` +omm=# \x +Expanded display is on. +omm=# select * from pg_settings where name='work_mem'; +-[ RECORD 1 ]--------------------------------------------------------------------------------------------------------------------- +name | work_mem +setting | 65536 +unit | kB +category | Resource Usage / Memory +short_desc | Sets the maximum memory to be used for query workspaces. +extra_desc | This much memory can be used by each internal sort operation and hash table before switching to temporary disk files. +context | user +vartype | integer +source | default +min_val | 64 +max_val | 2147483647 +enumvals | +boot_val | 65536 +reset_val | 65536 +sourcefile | +sourceline | +``` + +注意,pg 的参数分为如下几个数据类型: + +``` +omm=# select distinct(vartype) from pg_settings; + vartype +--------- + bool + real + int64 + string + integer + enum +(6 rows) +``` + +这些参数,根据不同的修改级别,分为以下六个类别: + +``` +omm=# select distinct(context) from pg_settings; + context +------------ + internal + user + postmaster + backend + sighup + superuser +(6 rows) +``` + +其中: + +- internal:这类参数为只读参数,有的是postgres程序固定的,有的是在安装数据库时intdb时设置好的 +- postmaster:这类参数需要重启数据库才能生效。 +- sighup:不需要重启数据库,但要向postmaster进程发送sighup信号,即需要pg\_ctl reload命令。 +- backend:无需重启数据库,只需向postmaster进程发送sighup信号。但新的配置值只能在之后的新连接中生效,已有连接中这些参数值不会改变。 +- superuser:这类参数可以由超级用户使用set修改。参数设置后只会影响超级用户自身的session配置,不会影响其他用户。 +- user:普通用户使用set设置,这类参数修改后和superuser类参数一样,也是只影响自身session。 + +例如以下查询显示 wal\_level 是一个 postmaster 参数: + +``` +omm=# select name,context from pg_settings where name like 'wal_level'; + name | context +-----------+------------ + wal_level | postmaster +(1 row) +``` + +在 openGauss 2.0 中,支持了通过 alter system 进行参数修改,以下尝试修改 work\_mem 收到一个错误提示,目前 alter system 仅仅支持 POSTMASTER-level, SIGHUP-level 和 BACKEND-level 级别的 guc 参数修改: + +``` +omm=# select name,context from pg_settings where name like 'work_mem'; + name | context +----------+--------- + work_mem | user +(1 row) + +omm=# show work_mem; + work_mem +---------- + 64MB +(1 row) +``` + +``` +omm=# alter system set work_mem='16MB'; +ERROR: unsupport parameter: work_mem +ALTER SYSTEM SET only support POSTMASTER-level, SIGHUP-level and BACKEND-level guc variable, +and it must be allowed to set in postgresql.conf. +``` + +我们尝试修改: + +``` +omm=# select name,context from pg_settings where name='port'; + name | context +------+------------ + port | postmaster +(1 row) +omm=# show port; + port +------ + 5432 +(1 row) + +omm=# alter system set port=8888; +NOTICE: please restart the database for the POSTMASTER level parameter to take effect. +ALTER SYSTEM SET +omm=# show port; + port +------ + 5432 +(1 row) +``` + +检查日志文件,日志提示需要重启参数才能生效: + +``` +[BACKEND] LOG: received SIGHUP, reloading configuration files +[BACKEND] LOG: parameter "port" cannot be changed without restarting the server +[BACKEND] LOG: parameter "wal_level" cannot be changed without restarting the server +[BACKEND] LOG: parameter "alarm_component" cannot be changed without restarting the server +[BACKEND] LOG: parameter "pgxc_node_name" cannot be changed without restarting the server +[BACKEND] LOG: parameter "listen_addresses" cannot be changed without restarting the server +[BACKEND] LOG: configuration file "/var/lib/opengauss/data/postgresql.conf" contains errors; unaffected changes were applied +``` + +检查参数文件,可以发现该参数已经被写入: + +``` +omm@modb:pg_log$ grep 8888 /var/lib/opengauss/data/postgresql.conf +port = 8888 # (change requires restart) +``` + diff --git "a/content/zh/post/July/\346\242\246\345\271\273\350\201\224\345\212\250-MogDB-openGauss\344\270\216ShardingSphere\345\234\250tpcc\344\270\212\347\232\204\350\241\250\347\216\260.md" "b/content/zh/post/July/\346\242\246\345\271\273\350\201\224\345\212\250-MogDB-openGauss\344\270\216ShardingSphere\345\234\250tpcc\344\270\212\347\232\204\350\241\250\347\216\260.md" new file mode 100644 index 0000000000000000000000000000000000000000..0c58bb4d1569213c423e46f0440e1cf0a562a6d7 --- /dev/null +++ "b/content/zh/post/July/\346\242\246\345\271\273\350\201\224\345\212\250-MogDB-openGauss\344\270\216ShardingSphere\345\234\250tpcc\344\270\212\347\232\204\350\241\250\347\216\260.md" @@ -0,0 +1,476 @@ ++++ + +title = "梦幻联动-MogDB/openGauss与ShardingSphere在tpcc上的表现" + +date = "2021-10-20" + +tags = [ "梦幻联动-MogDB/openGauss与ShardingSphere在tpcc上的表现告"] + +archives = "2021-10" + +author = "李宏达" + +summary = "梦幻联动-MogDB/openGauss与ShardingSphere在tpcc上的表现" + +img = "/zh/post/July/title/img4.png" + +times = "12:30" + ++++ + + + +# 梦幻联动-MogDB/openGauss与ShardingSphere在tpcc上的表现 + + + +## 一、 ShardingSphere + +1. 什么是ShardingSphere + + Apache ShardingSphere 是一套开源的分布式数据库解决方案组成的生态圈,它由 JDBC、Proxy 和 Sidecar(规划中)这 3 款既能够独立部署,又支持混合部署配合使用的产品组成。 它们均提供标准化的数据水平扩展、分布式事务和分布式治理等功能,可适用于如 Java 同构、异构语言、云原生等各种多样化的应用场景。 + +2. 适用场景 + + Apache ShardingSphere 旨在充分合理地在分布式的场景下利用关系型数据库的计算和存储能力,而并非实现一个全新的关系型数据库。 关系型数据库当今依然占有巨大市场份额,是企业核心系统的基石,未来也难于撼动,我们更加注重在原有基础上提供增量,而非颠覆。 + + Apache ShardingSphere 5.x 版本开始致力于可插拔架构,项目的功能组件能够灵活的以可插拔的方式进行扩展。 目前,数据分片、读写分离、数据加密、影子库压测等功能,以及 MySQL、PostgreSQL、SQLServer、Oracle 等 SQL 与协议的支持,均通过插件的方式织入项目。 开发者能够像使用积木一样定制属于自己的独特系统。Apache ShardingSphere 目前已提供数十个 SPI 作为系统的扩展点,仍在不断增加中。 + +3. 开源数字 + + ShardingSphere 已于2020年4月16日成为 Apache 软件基金会的顶级项目。 + + - 星评增长时间 + + ![](figures/星评增长时间线.png) + + - 贡献者增长时间线 + - ![](figures/贡献者增长时间线.png) + +4. 架构图 + + ![](figures/架构图.png) + +5. ShardingSphere Proxy + + - 定位为透明化的数据库代理端,提供封装了数据库二进制协议的服务端版本,用于完成对异构语言的支持。 目前提供 MySQL 和 PostgreSQL(兼容 openGauss 等基于 PostgreSQL 的数据库)版本,它可以使用任何兼容 MySQL/PostgreSQL 协议的访问客户端(如:MySQL Command Client, MySQL Workbench, Navicat 等)操作数据,对 DBA 更加友好。 + - 向应用程序完全透明,可直接当做 MySQL/PostgreSQL 使用。 + - 适用于任何兼容 MySQL/PostgreSQL 协议的的客户端。 + + ![](figures/ShardingSphere-Proxy.png) + + +## 二、 MogDB + +1. 什么是MogDB + +MogDB是云和恩墨基于openGauss开源数据库的内核进行研发,推出的一款极致易用的企业级关系型数据库。MogDB具备金融级高可用和全密态计算的极致安全、面向多核处理器的极致性能、AI自诊断调优的极致智能能力,能够满足从核心交易到复杂计算的企业级业务需求。 + +云和恩墨致力于发挥全栈产品加服务的企业优势,优先支持鲲鹏算力,在 MogDB 的运行平台、管理工具、SQL审核和运维服务等方向推出整体解决方案,为用户提供可信赖的企业级产品和服务,为 openGauss 的开源生态持续贡献力量。 + +2. 适用场景 + +大并发、大数据量、以联机事务处理为主的交易型应用场景,如电商、金融、O2O、电信CRM/计费等,应用可按需选择不同的主备部署模式。 + +在工业监控与远程控制、智慧城市能力延展、智能家居、车联网等物联网应用场景下,传感设备多、采样率高、数据存储为追加模型,满足操作和分析并重的要求。 + +## 三、如何联动 + +1. 概述 + +通过ShardingSphere的Proxy功能作为MogDB数据库的代理端进行数据的转发分片和流转。 + +本文通过TPCC标准程序BenchmarkSQL5.0作为模拟应用的压力发起程序。 + +通过ShardingSphere的功能可以具备分布式数据库的能力。 + +2. 架构图 + +![](figures/架构图3.png) + +## 四、安装数据库 + +1. 安装 + +安装两个单机数据库,数据库安装参考 https://www.modb.pro/db/70779 + +## 五、配置ShardingSphere + +1. 安装JAVA JDK + +``` +[root@db1 lee]# yum install java* -y +[root@db1 lee]# tail -3 ~/.bashrc +export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.242.b08-1.h5.oe1.aarch64 +export PATH=$JAVA_HOME/bin:$PATH +export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME +``` + +2. 编译 + +``` +[root@db1 lee]# git clone https://github.com/apache/shardingsphere.git +[root@db1 lee]# cd shardingsphere-master +[root@db1 shardingsphere-master]# nohup ./mvnw clean install -DskipTests -Prelease -T1C -Djacoco.skip=true -Dcheckstyle.skip=true -DskipITs -Drat.skip=true -Dmaven.javadoc.skip=true -B & +[root@db1 shardingsphere-master]# tail -20 nohup.out +[INFO] shardingsphere-integration-agent-test-plugins ...... SUCCESS [ 0.492 s] +[INFO] shardingsphere-integration-agent-test-common ....... SUCCESS [ 1.173 s] +[INFO] shardingsphere-integration-agent-test-metrics ...... SUCCESS [ 2.401 s] +[INFO] shardingsphere-integration-agent-test-zipkin ....... SUCCESS [ 2.285 s] +[INFO] shardingsphere-integration-agent-test-jaeger ....... SUCCESS [ 2.285 s] +[INFO] shardingsphere-integration-agent-test-opentelemetry SUCCESS [ 2.425 s] +[INFO] shardingsphere-integration-scaling-test ............ SUCCESS [ 1.020 s] +[INFO] shardingsphere-integration-scaling-test-mysql ...... SUCCESS [ 2.379 s] +[INFO] shardingsphere-rewrite-test ........................ SUCCESS [ 1.112 s] +[INFO] shardingsphere-optimize-test ....................... SUCCESS [ 0.927 s] +[INFO] shardingsphere-distribution ........................ SUCCESS [ 0.421 s] +[INFO] shardingsphere-src-distribution .................... SUCCESS [ 5.154 s] +[INFO] shardingsphere-jdbc-distribution ................... SUCCESS [ 1.845 s] +[INFO] shardingsphere-proxy-distribution .................. SUCCESS [ 7.516 s] +[INFO] ------------------------------------------------------------------------ +[INFO] BUILD SUCCESS +[INFO] ------------------------------------------------------------------------ +[INFO] Total time: 01:28 min (Wall Clock) +[INFO] Finished at: 2021-10-29T11:36:59+08:00 +[INFO] ------------------------------------------------------------------------ +[root@db1 target]# pwd +/lee/shardingsphere-master/shardingsphere-distribution/shardingsphere-proxy-distribution/target +[root@db1 target]# ls +apache-shardingsphere-5.0.0-RC1-SNAPSHOT-shardingsphere-proxy-bin.tar.gz archive-tmp +apache-shardingsphere-5.0.0-RC1-SNAPSHOT-shardingsphere-proxy-bin.tar.gz.sha512 maven-shared-archive-resources +[root@db1 target]# mv apache-shardingsphere-5.0.0-RC1-SNAPSHOT-shardingsphere-proxy-bin.tar.gz proxy.tar.gz +[root@db1 target]# cp proxy.tar.gz /lee/ss/ +``` + +apache-shardingsphere-5.0.0-RC1-SNAPSHOT-shardingsphere-proxy-bin.tar.gz 为proxy程序。 + +3. 修改配置文件 + +文件过长注释部分已经省略 + +- 主配置文件 + + ``` + [root@db1 conf]# cat server.yaml + rules: + - !AUTHORITY + users: + - root@%:root + - sharding@:sharding + provider: + type: ALL_PRIVILEGES_PERMITTED + props: + max-connections-size-per-query: 2 + proxy-frontend-flush-threshold: 128 # The default value is 128. + proxy-backend-query-fetch-size: 1000 + ``` + + +- users部分为ShardingSphere的账号密码,属于ShardingSphere的对象和数据库对象无关。 +- 分片文件 + + ``` + [root@db1 conf]# cat config-sharding.yaml + schemaName: tpcc + dataSources: + ds_0: + connectionTimeoutMilliseconds: 30000 + idleTimeoutMilliseconds: 60000 + maxLifetimeMilliseconds: 1800000 + maxPoolSize: 3000 + minPoolSize: 1 + password: tpcc@123 + url: jdbc:postgresql://192.168.2.157:26000/tpcc?serverTimezone=UTC&useSSL=false&loggerLevel=OFF + username: tpcc + ds_1: + connectionTimeoutMilliseconds: 30000 + idleTimeoutMilliseconds: 60000 + maxLifetimeMilliseconds: 1800000 + maxPoolSize: 3000 + minPoolSize: 1 + password: tpcc@123 + url: jdbc:postgresql://192.168.2.158:26000/tpcc?serverTimezone=UTC&useSSL=false&loggerLevel=OFF + username: tpcc + + rules: + - !SHARDING + bindingTables: + - bmsql_warehouse, bmsql_customer + - bmsql_stock, bmsql_district, bmsql_order_line + defaultDatabaseStrategy: + none: + defaultTableStrategy: + none: + keyGenerators: + snowflake: + props: + worker-id: 123 + type: SNOWFLAKE + tables: + bmsql_config: + actualDataNodes: ds_0.bmsql_config + + bmsql_warehouse: + actualDataNodes: ds_${0..1}.bmsql_warehouse + databaseStrategy: + standard: + shardingColumn: w_id + shardingAlgorithmName: bmsql_warehouse_database_inline + + bmsql_district: + actualDataNodes: ds_${0..1}.bmsql_district + databaseStrategy: + standard: + shardingColumn: d_w_id + shardingAlgorithmName: bmsql_district_database_inline + + bmsql_customer: + actualDataNodes: ds_${0..1}.bmsql_customer + databaseStrategy: + standard: + shardingColumn: c_w_id + shardingAlgorithmName: bmsql_customer_database_inline + + bmsql_item: + actualDataNodes: ds_${0..1}.bmsql_item + databaseStrategy: + standard: + shardingColumn: i_id + shardingAlgorithmName: bmsql_item_database_inline + + bmsql_history: + actualDataNodes: ds_${0..1}.bmsql_history + databaseStrategy: + standard: + shardingColumn: h_w_id + shardingAlgorithmName: bmsql_history_database_inline + + bmsql_oorder: + actualDataNodes: ds_${0..1}.bmsql_oorder_${0..1} + databaseStrategy: + standard: + shardingColumn: o_w_id + shardingAlgorithmName: bmsql_oorder_database_inline + tableStrategy: + standard: + shardingColumn: o_c_id + shardingAlgorithmName: bmsql_oorder_table_inline + + bmsql_stock: + actualDataNodes: ds_${0..1}.bmsql_stock + databaseStrategy: + standard: + shardingColumn: s_w_id + shardingAlgorithmName: bmsql_stock_database_inline + + bmsql_new_order: + actualDataNodes: ds_${0..1}.bmsql_new_order + databaseStrategy: + standard: + shardingColumn: no_w_id + shardingAlgorithmName: bmsql_new_order_database_inline + + bmsql_order_line: + actualDataNodes: ds_${0..1}.bmsql_order_line + databaseStrategy: + standard: + shardingColumn: ol_w_id + shardingAlgorithmName: bmsql_order_line_database_inline + + shardingAlgorithms: + bmsql_warehouse_database_inline: + type: INLINE + props: + algorithm-expression: ds_${w_id % 2} + + bmsql_district_database_inline: + type: INLINE + props: + algorithm-expression: ds_${d_w_id % 2} + + bmsql_customer_database_inline: + type: INLINE + props: + algorithm-expression: ds_${c_w_id % 2} + + bmsql_item_database_inline: + type: INLINE + props: + algorithm-expression: ds_${i_id % 2} + + bmsql_history_database_inline: + type: INLINE + props: + algorithm-expression: ds_${h_w_id % 2} + + bmsql_oorder_database_inline: + type: INLINE + props: + algorithm-expression: ds_${o_w_id % 2} + + bmsql_oorder_table_inline: + type: INLINE + props: + algorithm-expression: bmsql_oorder_${o_c_id % 2} + + bmsql_stock_database_inline: + type: INLINE + props: + algorithm-expression: ds_${s_w_id % 2} + + bmsql_new_order_database_inline: + type: INLINE + props: + algorithm-expression: ds_${no_w_id % 2} + + bmsql_order_line_database_inline: + type: INLINE + props: + algorithm-expression: ds_${ol_w_id % 2} + ``` + +- schemaName 为数据库用户名 +- dataSources为数据源可配置1至多个 +- rules为分片规则,%2为取id列分成2份到两个数据库。 + +4. 启动proxy + +``` +[root@db1 bin]# pwd +/lee/ss/proxy/bin +[root@db1 bin]# ./start.sh 3307 +Starting the ShardingSphere-Proxy ... +The classpath is /lee/ss/proxy/conf:.:/lee/ss/proxy/lib/*:/lee/ss/proxy/ext-lib/* +Please check the STDOUT file: /lee/ss/proxy/logs/stdout.log +[root@db1 bin]# cat /lee/ss/proxy/logs/stdout.log +Thanks for using Atomikos! Evaluate http://www.atomikos.com/Main/ExtremeTransactions for advanced features and professional support +or register at http://www.atomikos.com/Main/RegisterYourDownload to disable this message and receive FREE tips & advice +[INFO ] 2021-11-01 15:53:05.643 [main] o.a.s.p.i.BootstrapInitializer - Database name is `PostgreSQL`, version is `9.2.4` +[INFO ] 2021-11-01 15:53:05.837 [main] o.a.s.p.frontend.ShardingSphereProxy - ShardingSphere-Proxy start success +``` + +可以在脚本后指定proxy的启动端口 + +5. 测试连接 + +``` +[lee@node157 ~]$ gsql -d tpcc -Usharding -h 192.168.2.136 -p3307 -Wsharding +gsql ((MogDB 2.1.0 build ) compiled at 2021-10-26 19:07:06 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. +tpcc=> +``` + +## 六、 调试BenchmarkSQL + +1. 查看配置文件 + +``` +[root@db1 run]# cat props.mogdb.ss +db=postgres +driver=org.postgresql.Driver +conn=jdbc:postgresql://192.168.2.136:3307/tpcc?prepareThreshold=1&batchMode=on&fetchsize=10&loggerLevel=off +user=sharding +password=sharding + +warehouses=100 +loadWorkers=50 + +terminals=500 +//To run specified transactions per terminal- runMins must equal zero +runTxnsPerTerminal=0 +//To run for specified minutes- runTxnsPerTerminal must equal zero +runMins=10 +//Number of total transactions per minute +limitTxnsPerMin=0 + +//Set to true to run in 4.x compatible mode. Set to false to use the +//entire configured database evenly. +terminalWarehouseFixed=true + +//The following five values must add up to 100 +//The default percentages of 45, 43, 4, 4 & 4 match the TPC-C spec +newOrderWeight=45 +paymentWeight=43 +orderStatusWeight=4 +deliveryWeight=4 +stockLevelWeight=4 + +// Directory name to create for collecting detailed result data. +// Comment this out to suppress. +resultDirectory=ss_result_%tY-%tm-%td_%tH%tM%tS +//osCollectorScript=./misc/os_collector_linux.py +//osCollectorInterval=1 +//osCollectorSSHAddr=user@dbhost +//osCollectorDevices=net_eth0 blk_sda +``` + +2. 生成原始数据 + +``` +[root@db1 run]# ./runDatabaseBuild.sh props.mogdb.ss +部分日志 +# ------------------------------------------------------------ +# Loading SQL file ./sql.postgres/buildFinish.sql +------------------------------------------------------------ +-- ---- +-- Extra commands to run after the tables are created, loaded, +-- indexes built and extra's created. +-- PostgreSQL version. +-- ---- +``` + +3. 运行TPCC程序 + +``` +[root@db1 data]# numactl -C 0-25,30-55 ./runBenchmark.sh props.mogdb.ss +13:55:30,137 [main] INFO jTPCC : Term-00, +13:55:30,140 [main] INFO jTPCC : Term-00, +-------------------------------------------------------------+ +13:55:30,140 [main] INFO jTPCC : Term-00, BenchmarkSQL v5.0 +13:55:30,140 [main] INFO jTPCC : Term-00, +-------------------------------------------------------------+ +13:55:30,140 [main] INFO jTPCC : Term-00, (c) 2003, Raul Barbosa +13:55:30,140 [main] INFO jTPCC : Term-00, (c) 2004-2016, Denis Lussier +13:55:30,142 [main] INFO jTPCC : Term-00, (c) 2016, Jan Wieck +13:55:30,142 [main] INFO jTPCC : Term-00, +-------------------------------------------------------------+ +13:55:30,142 [main] INFO jTPCC : Term-00, +13:55:30,142 [main] INFO jTPCC : Term-00, db=postgres +13:55:30,142 [main] INFO jTPCC : Term-00, driver=org.postgresql.Driver +13:55:30,143 [main] INFO jTPCC : Term-00, conn=jdbc:postgresql://192.168.2.136:3307/tpcc?prepareThreshold=1&batchMode=on&fetchsize=10&loggerLevel=off +13:55:30,143 [main] INFO jTPCC : Term-00, user=sharding +13:55:30,143 [main] INFO jTPCC : Term-00, +13:55:30,143 [main] INFO jTPCC : Term-00, warehouses=100 +13:55:30,143 [main] INFO jTPCC : Term-00, terminals=500 +13:55:30,144 [main] INFO jTPCC : Term-00, runMins=10 +13:55:30,144 [main] INFO jTPCC : Term-00, limitTxnsPerMin=0 +13:55:30,145 [main] INFO jTPCC : Term-00, terminalWarehouseFixed=true +13:55:30,145 [main] INFO jTPCC : Term-00, +13:55:30,145 [main] INFO jTPCC : Term-00, newOrderWeight=45 +13:55:30,145 [main] INFO jTPCC : Term-00, paymentWeight=43 +13:55:30,145 [main] INFO jTPCC : Term-00, orderStatusWeight=4 +13:55:30,145 [main] INFO jTPCC : Term-00, deliveryWeight=4 +13:55:30,145 [main] INFO jTPCC : Term-00, stockLevelWeight=4 +13:55:30,145 [main] INFO jTPCC : Term-00, +13:55:30,145 [main] INFO jTPCC : Term-00, resultDirectory=ss_result_%tY-%tm-%td_%tH%tM%tS +13:55:30,145 [main] INFO jTPCC : Term-00, osCollectorScript=null +13:55:30,145 [main] INFO jTPCC : Term-00, +13:55:30,157 [main] INFO jTPCC : Term-00, copied props.mogdb.ss to ss_result_2021-11-01_135530/run.properties +13:55:30,157 [main] INFO jTPCC : Term-00, created ss_result_2021-11-01_135530/data/runInfo.csv for runID 835 +13:55:30,157 [main] INFO jTPCC : Term-00, writing per transaction results to ss_result_2021-11-01_135530/data/result.csv +13:55:30,158 [main] INFO jTPCC : Term-00, +13:55:30,237 [main] INFO jTPCC : Term-00, C value for C_LAST during load: 165 +13:55:30,237 [main] INFO jTPCC : Term-00, C value for C_LAST this run: 92 +13:55:30,237 [main] INFO jTPCC : Term-00, Running Average tpmTOTAL: 626491.25 Current tpmTOTAL: 41415216 Memory Usage: 858MB / 1001MB +14:05:31,197 [Thread-158] INFO jTPCC : Term-00, +14:05:31,197 [Thread-158] INFO jTPCC : Term-00, +14:05:31,197 [Thread-158] INFO jTPCC : Term-00, Measured tpmC (NewOrders) = 281818.72 +14:05:31,198 [Thread-158] INFO jTPCC : Term-00, Measured tpmTOTAL = 626481.78 +14:05:31,198 [Thread-158] INFO jTPCC : Term-00, Session Start = 2021-11-01 13:55:31 +14:05:31,198 [Thread-158] INFO jTPCC : Term-00, Session End = 2021-11-01 14:05:31 +14:05:31,198 [Thread-158] INFO jTPCC : Term-00, Transaction Count = 6265412 +14:05:31,198 [Thread-158] INFO jTPCC : executeTime[Payment]=87346178 +14:05:31,198 [Thread-158] INFO jTPCC : executeTime[Order-Status]=3979084 +14:05:31,198 [Thread-158] INFO jTPCC : executeTime[Delivery]=24407579 +14:05:31,198 [Thread-158] INFO jTPCC : executeTime[Stock-Level]=3583178 +14:05:31,198 [Thread-158] INFO jTPCC : executeTime[New-Order]=180651188 +``` + diff --git "a/content/zh/post/July/\347\272\257SQL\347\224\237\346\210\220openGauss\346\225\260\346\215\256\345\272\223\347\232\204html\345\267\241\346\243\200\346\212\245\345\221\212.md" "b/content/zh/post/July/\347\272\257SQL\347\224\237\346\210\220openGauss\346\225\260\346\215\256\345\272\223\347\232\204html\345\267\241\346\243\200\346\212\245\345\221\212.md" new file mode 100644 index 0000000000000000000000000000000000000000..a4fbebaf5e0ae65347cd9d3cac015a3d43a15f02 --- /dev/null +++ "b/content/zh/post/July/\347\272\257SQL\347\224\237\346\210\220openGauss\346\225\260\346\215\256\345\272\223\347\232\204html\345\267\241\346\243\200\346\212\245\345\221\212.md" @@ -0,0 +1,108 @@ ++++ + +title = "纯SQL生成openGauss数据库的html巡检报告" + +date = "2021-10-26" + +tags = [ "纯SQL生成openGauss数据库的html巡检报告"] + +archives = "2021-10" + +author = "小麦苗" + +summary = "纯SQL生成openGauss数据库的html巡检报告" + +img = "/zh/post/July/title/img4.png" + +times = "12:30" + ++++ + + + +# 纯SQL生成openGauss数据库的html巡检报告 + + + +## 1、巡检脚本简介 + +该套巡检脚本为纯SQL脚本开发,如下所示: + +![](figures/巡检脚本为纯SQL脚本开发.png) + +目前一共包含13个脚本,若脚本的扩展名为“.sql”则表示该脚本为sql脚本;若脚本的扩展名为“.pl”则表示该脚本为perl脚本;若脚本的扩展名为“.sh”则表示该脚本为shell脚本。 + +对于Oracle的SQL脚本而言,脚本DB\_Oracle\_HC\_lhr\_v7.0.0\_10g.sql适用于Oracle 10g数据库,脚本DB\_Oracle\_HC\_lhr\_v7.0.0\_11g.sql适用于Oracle 11g的数据库,脚本DB\_Oracle\_HC\_lhr\_v7.0.0\_12c.sql适用于Oracle 12c及其以上版本,这3个脚本都是只读版本,这3个脚本只会对数据库做查询操作,不会做DML和DDL操作,这也是很多朋友所期待的功能。 + +脚本DB\_OS\_HC\_lhr\_v7.0.0.pl是perl脚本,执行后会对OS的信息进行收集,并且输出到html中。脚本DB\_OS\_HC\_lhr\_v7.0.0.sh是shell脚本,执行后会对OS的信息进行收集。 + +脚本DB\_MySQL\_HC\_lhr\_v7.0.0.sql是MySQL脚本,执行后会产生MySQL的健康检查html报告,该脚本为只读脚本。 + +脚本DB\_MSSQL\_HC\_lhr\_v7.0.0\_2005.sql和DB\_MSSQL\_HC\_lhr\_v7.0.0\_2008R2.sql是SQL Server脚本,存在部分DDL和DML操作,执行后会产生SQL Server的健康检查html报告。脚本DB\_MSSQL\_HC\_lhr\_v7.0.0\_2005.sql最低支持2005版本,而脚本DB\_MSSQL\_HC\_lhr\_v7.0.0\_2008R2.sql最低支持2008R2版本。 + +脚本DB\_PG\_HC\_lhr\_v7.0.0.sql是PG数据库脚本,执行后会产生PostgreSQL数据库的健康检查html报告。 + +脚本DB\_DM\_HC\_lhr\_v7.0.0是达梦数据库脚本,执行后会产生达梦数据库的健康检查html报告。 + +脚本DB\_TiDB\_HC\_lhr\_v7.0.0.sql是TiDB数据库脚本,执行后会产生TiDB数据库的健康检查html报告。 + +脚本DB\_openGauss\_HC\_lhr\_v7.0.0.sql是openGauss数据库脚本,执行后会产生openGauss数据库的健康检查html报告。 + +## 2、巡检脚本特点 + +1、可以巡检Oracle、MySQL、SQL Server、PostgreSQL、TiDB、openGauss和国产达梦等7个数据库,也可以巡检Linux操作系统(后续会免费逐步增加MongoDB、db2、OceanBase、PolarDB、TDSQL、GBase、人大金仓等数据库) + +2、脚本为绿色版、免安装、纯SQL文本 + +3、跨平台,只要有SQL\*Plus(Oracle)、mysql(MySQL、TiDB)、MSSQL客户端(SSMS、Navicat皆可)、psql(PG、openGauss)、gisql(国产达梦)、gsql(openGauss)环境即可运行脚本 + +4、脚本内容可视化,可以看到脚本内容,因此可供学习数据库使用 + +5、兼容Oracle 10g、11g、12c、18c、19c、20c、21c等高版本Oracle数据库 + +6、对Oracle 10g、11g、12c、18c、19c、20c、21c等版本分别提供了只读版(只对数据库查询,不做DDL和DML操作) + +7、MySQL最低支持5.5版本 + +8、SQL Server最低支持2005版本 + +9、增删监控项非常方便,只需要提供相关SQL即可 + +10、一次购买,所有脚本终身免费升级 + +11、检查内容非常全面 + +12、针对每种数据库,只有1个SQL脚本,不存在嵌套调用脚本等其它问题 + +13、最终生成html文件格式的健康检查结果 + +14、对结果进行过滤,列出了数据库有问题的内容 + +15、对OS的信息提供了收集(单独脚本) + +## 3、openGauss数据库运行方式 + +需要有华为的gsql客户端,或PostgreSQL数据库的psql客户端都可以,运行方式如下: + +若是openGauss或华为的GaussDB数据库的gsql客户端,则执行: + +``` +gsql -U gaussdb -h 192.168.66.35 -p 15432 -d postgres -W'lhr@123XXT' -H -f D:\DB_openGauss_HC_lhr_v7.0.0.sql > d:\a.html +``` + +若是PostgreSQL数据库的psql客户端,则执行: + +``` +psql -U gaussdb -h 192.168.66.35 -p 54324 -d postgres -W -H -f D:\DB_openGauss_HC_lhr_v7.0.0.sql > d:\a.html +``` + +## 4、html巡检结果 + +这里只列出部分结果,其它的详细内容可以参考:https://share.weiyun.com/5lb2U2M + +![](figures/html巡检结果.png) + +![](figures/html巡检结果1.jpg) + +其它不再列举。 + diff --git "a/content/zh/post/July/\351\200\232\347\224\250\346\220\234\347\264\242\346\240\221.md" "b/content/zh/post/July/\351\200\232\347\224\250\346\220\234\347\264\242\346\240\221.md" new file mode 100644 index 0000000000000000000000000000000000000000..b581e433c6803c90035d74f086f8115ef0686220 --- /dev/null +++ "b/content/zh/post/July/\351\200\232\347\224\250\346\220\234\347\264\242\346\240\221.md" @@ -0,0 +1,409 @@ ++++ + +title = "通用搜索树" + +date = "2021-10-21" + +tags = [ "通用搜索树"] + +archives = "2021-10" + +author = "吴松" + +summary = "通用搜索树" + +img = "/zh/post/July/title/img2.png" + +times = "12:30" + ++++ + + + +# 通用搜索树 + + + +## 概述 + +本文翻译了 Generalized Search Trees for Database Systems 这篇论文的内容,并加入了自己的理解,旨在更好地理解 Gist 索引。 + +## 介绍 + +高效的查找树的实现对数据库十分重要。对于传统的数据库系统,B+-Tree 对于数字类型的数据而言可能已经足够了。但是,现在数据库系统需要处理各种新的数据类型,以支持不断涌现的新型应用,如定位系统、多媒体系统、CAD 工具、时序数据库、指纹识别系统、生物数据库等等。为了能够支持这些应用,查找树的实现必须灵活可扩展。 + +为了实现搜索树的灵活可扩展,主要有两种思路: + +- 1. Specialized Search Tree : 为解决特定的问题实现专用的搜索树。目前已经开发了很多各种各样的搜索树,例如大家熟知的 R-tree 是用来解决空间搜索问题。虽然其中很多搜索树在解决特定领域的问题非常重要,但是这种思路存在一些问题。实现和维护一种搜索树工作量巨大。产生新的应用后,需要重新开发一种搜索树,实现树的搜索、并发控制、插入、分裂等相关的逻辑。 +- 2. Search Trees For Extensible Data Types :为避免开发新的搜索树类型,已有的一些搜索树,如 B+-Tree、R-tree 可以扩展它们支持的数据类型。例如, B+-Tree 可用于检索任何具有线性排序的数据,支持对该数据类型的相等或者范围查询。这种方式虽然扩展了支持的数据类型,但是并没有扩展搜索树上可支持的查询的种类,依然只能支持相等或者范围查询。类似的,R-tree 中,唯一可用的谓词类型是相等、重叠和包含这几种。这种不灵活性为支持新的应用带来了非常多的问题,因为传统的一些查询类型,如 线性排序、范围查询等对新的数据类型而言可能并不适合。 + + Gist 是解决搜索树可扩展性的第三种思路,它在支持的数据类型和查询种类方面都很容易扩展。尤其是查询类型的可扩展尤其重要,它允许以一种接近自然的查询方式去支持新的数据类型上的索引。除了为新的数据类型提供可扩展性, Gist 还统一了之前用于常见的数据类型的不同的数据结构。例如,B+-tree 和 R-tree 都可以基于 Gist 实现,从而可以实现基于同一个代码库实现不同的索引,支持不同的应用。 + + Gist 很容易配置,为了实现不同用途的搜索树,只需要在数据库中注册 6 种方法,这 6 种方法中封装了作为搜索树中 key 的对象的结构和行为。例如 PG 中实现了基于 Gist 的 R-tree,实现了 R-tree 中支持的数据类型对应的 6 个接口(每一种类型都需要实现)。 实现一组新的方法,比完全从头开始实现一棵新的搜索树要简单得多。例如,R-trees 的 POSTGRES 实现大概 3000 行代码,而 GiST 方法实现大约 500 行代码。 + + 除了提供统一的、可扩展的数据结构外,对搜索树的处理面临一个最基本的问题:Gist 是否可以用于支持任意的数据集的索引,Gist 产生的搜索树是否总是可以支持有效地查询。对于这个问题,可能要回答“NO”了,本文将说明一些可能影响搜索树效率的问题。 + + 这引发了一个有趣的问题,即何时以何种方式在一些非标准化的问题上构建一棵高效的查询树--Gist 需要进一步通过试验进行探索。 + + +## 1.1 本文的结构 + +第 2 节,说明并概括了数据库搜索树的基本特性。 + +第 3 节,介绍了广义搜索树对象及其结构、属性和行为。 + +第 4 节,提供了三种不同类型的搜索树的 GiST 实现。 + +第 5 节,介绍了一些性能数据,探讨了构建有效搜索树所涉及的问题。 + +第 6 节,检查了在成熟的 DBMS 中实现 GiST 时需要考虑的一些细节。 + +第 7 节,最后讨论了这项工作的重要性,以及进一步研究的方向。 + +## 1.2 相关工作 + +Knuth 提供了一个很好的关于搜索树的调查研究,而 Comer 更详细地介绍了 B-tree 及其变体。现在已经有很多多维搜索树,例如 R-tree 及其变体 R\*-tree 和 R+-tree。其他的多维搜索树,如 quad-tree、k-D-B-tree、hB-tree 等等。也可以用空间填充曲线,将多维数据转换成一维数据,转换后,可以用B+-tree 索引生成的数据。 + +PG 中引入了可扩展索引,提供了可扩展的 B+-tree 和 R-tree 的实现。这些可扩展的索引允许多多种数据类型进行索引,但是只支持一组固定的查询谓词。例如,PG B+-tree 索引支持常用的排序谓词\(\>, \>=, =, <=, <\),而 PG 中 R-tree 仅支持谓词 Left、Right、OverLeft、Overlap、OverRight、Right、Contained、Contained 和 Equal。 + +可扩展的 R-tree 提供了 Gist 功能相当强大的一个子集,本文首次证明了 R-tree 可以索引尚未映射到空间域的数据。然而,除了有限的可扩展性之外,R 树还缺乏 Gist 支持的许多其他功能。R-trees 只提供一种关键谓词(Contains),它们不允许用户指定后面将会提到的 PickSplit 和 Penalty 算法,同时它缺乏对来自线性排序域的数据的优化。尽管有这些限制,可扩展 R-tree 与 Gist 已经十分接近了,我们在第 5 节中描述原始实现和性能验证。 + +## 2 The Gist of Database Search Trees + +大多数有数据库经验的人对搜索树的工作方式都有直观的认识,所以在这里我们以简化的方式讨论搜索树,很多内容可能是模糊的,目的只是为了说明某个概念而不是细节。 + +搜索树的粗略示意图如图-1所示,它是一棵平衡树,有高扇出。内部节点用作目录,叶子节点包含指向实际数据的指针,并且以链表的形式组织,以便索引扫描。 + +![](figures/数据库中搜索树示意图.png) + +图-1 数据库中搜索树示意图 + +每个内部节点都包含一系列 key 和 pointer 。要搜索与查询谓词 q 匹配的元组,需要从根节点开始查找。对于节点上的每个指针,如果关联的 key 与 q 一致,即 key 不排除指针下方存储的数据可能匹配 q 的可能性,则遍历指针下方的子树,直到找到所有匹配的数据。 + +我们回顾了一些熟悉的树型结构中的一致性概念。在 B+ 树中,查询采用范围谓词的形式(例如“找到所有 i 使得 c1 < i < c2”),key 在逻辑上划定了一个范围,其中包含指针下方的数据。如果查找到的节点内的一个 item 的 key 满足这个查询范围,则需要继续查找子节点;否则不需要继续查找。 + +在上面的描述中,对 key 的唯一限制是它必须在逻辑上匹配存储在它下面的每一个数据,以便一致性检查不会遗漏任何有效数据。在 B+-tree 和 R-tree 中,key 的本质是一个“包含”谓词:它描述了一个连续的区域,其中包含 pointer 下的所有数据。但是,“包含”谓词并不是唯一可能出现的关系。就像在 R-tree 中,节点上的 key 可能出现“重叠”这样的关系,即一个节点上的两个 key 下面可能保存着某些相同的元组。 + +概括一下搜索关键字的概念:搜索关键字 key 是可以对其下的每一个数据都成立的任意谓词。如果给定这样一个灵活的数据结构,用户可以通过将数据组织成任意嵌套的子类别来自由地形成树,每一个都可以用一些特殊的谓词来标记。反过来,抓住数据库搜索树的本质:它是数据集分区的层次结构,每一个分区都有一个分类,用以保存分区中的所有数据。可以基于分类进行任意谓词的搜索。 + +为了支持对给定谓词 q 的搜索,用户必须提供一个返回布尔类型的方法来判断 q 是否与给定的 key 一致,搜索通过遍历与 key 关联的指针来进行。当节点数据过多或者占用空间过大时,通过用户提供的分裂算法来控制数据的分组,并且可以使用用户提供的 key 完成对分组的表征。因此,通过向用户公开关键方法和拆分方法,可以构建任意的搜索树,支持可扩展的查询集。这些是 Gist 的基础,后面详细对其进行描述。 + +## 3 The Generalized Search Tree + +本节描述 Gist 的结构、它的不变属性、可扩展的方法以及内置的算法。按照惯例,将每个索引数据称为“元组”。 + +## 3.1 Structure + +Gist 是一棵平衡树,它的每个节点的扇出在 kM 和 M 之间,其中 2/M <= k <= 1/2,根节点的扇出可以在 2 和 M 之间。其中常数 k 被称为最小填充因子。叶子节点包含(p, ptr)这样的数据,其中 p 是被用作搜索关键字的谓词,而 ptr 是数据库中某个元组的标识符。 + +非叶子节点中的(p; ptr)中 p 是用作搜索关键字的谓词,而 ptr 指向是指向另一个树中节点的指针。谓词可以包含任意数量的自由变量,只要树的叶子节点引用的任意单个元组能够装下。为了方便说明,假设树中的项占用的空间大小是一样的,可变大小的项在第 6 节介绍。 + +假设给定项 E = \(p; ptr\)的实现中,可以访问 E 当前所在的节点。 + +## 3.2 Properties + +以下属性在 Gist 中是不变的: + +1. 除了根节点外每个节点包含 kM 到 M 个项。 + +2. 每个叶子节点中的索引项(p, ptr)在用指定的元组实例化时,p 为 true \(即 p 对元组成立\)。 + +3. 每个非叶子节点中的索引项(p, ptr),当使用从 ptr 可达的任意元组对其实例化时, p 为 true。和 R-tree 不同的是,对于某个从 ptr 可达的索引项 (p', ptr'),不要求 p' -\> p,只是 p 和 p' 都适用于从 ptr' 可达的元组。 + +4. 根节点至少有两个子节点,除非它本身也是叶子节点。 + +5. 所有叶子节点出现在同一层。 + +其中属性 3 尤其特别,对于 R-tree 而言要求 p' -\> p ,因为 R-tree 的边界排列在一个包含层次的结构中。然而,R-tree 有一些不必要的限制:节点 N 上方的 key 的谓词必须适用于 N 下的数据,因此不需要 N 上的 key 以更细的粒度重新描述这些谓词。 + +相反,有时我们需要节点 N 中的 key 基于某些不完全一样的规则对其下的数据进行分类。 + +## 3.3 Key Methods + +原则上, Gist 中的 key 可以是任意的谓词。实际使用中, key 来自用户定义的对象, 同时提供 Gist 需要的一些方法的实现。 例如, B+-tree 中 key 是数字类型,标识数据的范围;R-tree 中 key 的类型是外接矩阵,标识区域等等。 + +以下关键的方法,是预定义的需要用户实现的方法: + +- Consistent\(E, q\): 给定一个索引项 E = \(p, ptr\) 以及 一个 查询谓词 q,如果 p ^ q 一定不满足,返回 FALSE , 否则返回 true 。需要注意的是,这里不是精确查找, Consistent 有可能产生误报,但不影响算法的正确性,误报可能导致性能的问题,因为会导致不必要的子树的查询。举例,如图-2 所示情况,谓词 q 是矩形是否在矩形 D 的左侧;左下方蓝色矩形是父节点所描述的矩形区域,如果用父节点的矩形区域判断是否在 D 的左侧,可能不满足条件;但这里只需要排除一定不在左侧的情况,即图中右侧灰色矩形区域描述的情况,父节点矩形区域的左边界在 D 的右侧,这种情况一定是不满足条件的,此时 Consistent 应该返回 FALSE;蓝色区域不返回 FALSE,则应该返回 true。 只有返回 true 后,才有可能继续查询 A B C 所描述的区域是否满足谓词在 D 的左侧,这样最终才能返回正确结果。 + +- Union\(P\): 给定一个索引项的集合 P ,包含索引项 (p1, ptr1) ... \(pn, ptrn\),返回谓词 r ,能够满足 ptr1 到 ptrn 下所有元组。可以通过(p1 V p2 V pn) 的方式返回一个满足条件的 r 。 + + Compress\(E\): 给定索引项 E = \(p, ptr\),返回索引项 (x, ptr),其中 x 是 p 压缩后的数据形式。 + + +- Decompress\(E\): 给定一个压缩后的索引项 E = \(x, ptr\),其中 x = Compress\(p\),返回一个索引项(r, ptr) 满足 p -\> r 。注意这可能是一种有损的压缩,因为不需要满足 p <-\> r 。 + +- Penalty\(E1, E2\): 给定两个索引项 E1 = \(p1, ptr1\),E2 = \(p2, ptr2\) ,返回一个将 E2 插入以 E1 为根的子树的代价。该方法用于辅助插入和分裂算法给(在下面描述)。通常,代价值是从 E1.p1 增加到 Union \{E1, E2\} 增加的区域的面积的大小 (对 R-tree 而言)。 +- PickSplit\(P\):给定一个包含 M + 1 个索引项的节点 P ,将 P 分裂为 两个集合 P1 和 P2,每一个至少包含 kM 个索引项。通常希望以一种最优的方式进行拆分,但对最终取决于用户。 + + ![](figures/Consistent-Example.png) + + 图-2 Consistent Example + + +以上是 Gist 中唯一需要用户实现的方法。需要注意的是,Consistent、Union、Compress 和 Penalty 必须能够处理输入的谓词。这些方法实现起来可能会很困难,尤其是 Consistent。但是通常一棵树中的谓词是有限的,而且在实现中可以对这些谓词做一些限制。 + +对于 key 的压缩有很多选择,一种简单的实现是让 Compress 和 Decompress 成为恒等函数。复杂一点的实现可以让 Compress\(p, ptr\) 生成有效但更紧凑的谓词 r ,p -\> r,让 Decompress 为恒等函数。 + +以 SHORE's R-tree 中的技术为例,它在插入时获取一个多边形,将多边形压缩为外接矩形,其外接矩形本身也是一个多边形。当然,对 Compress 和 Decompress 而言可以实现更复杂的算法。 + +## 3.4 Tree Methods + +上一小节提到的方法需要开发者提供,本小节的方法是由 Gist 提供。需要注意 key 在节点上时压缩的,从节点读取时需要解压缩,这一点后续不在赘述。 + +- 3.4.1 Search + + 所示有两种形式,本节介绍的时第一种,可以用于搜索任何查询谓词的任何数据集,方法是为查找到满足查询条件的数据,不断地遍历树。 + + ``` + Algorithm Search(R, q) + + Input: GiST rooted at R, predicate q + + Output: all tuples that satisfy q + + Sketch: Recursively descend all paths in tree whose + keys are consistent with q. + + S1: [Search subtrees] If R is not a leaf, check + each entry E on R to determine whether + Consistent(E, q). For all entries that are Consistent, invoke Search on the subtree whose + root node is referenced by E.ptr. + + S2: [Search leaf node] If R is a leaf, + check each entry E on R to determine whether + Consistent(E, q). If E is Consistent, it is a + qualifying entry. At this point E.ptr could + be fetched to check q accurately, or this check + could be left to the calling process. + ``` + + 注意,查询谓词 q 可以是精确匹配(相等)谓词,也可以是同时有多个值满足的谓词。后一类包括”范围“ 或 ”窗口“谓词,如在 B+-tree 或 R-tree 中,还有更一般的不基于连续区域的谓词(例如集合包含谓词,等) + +- 3.4.2 Search In Linearly Ordered Domains + + 如果被索引的数据线性有序,且查询通常是相等或者范围这样的谓词,那么本小节中定义的 FindMin 和 Next 方法可以实现更高效地搜索。要使此选项可用,用户需要在创建搜索树的时候执行一些额外的步骤: + + - 1. IsOrdered 这个 flag 要设置为 true。IsOrdered 是在创建树的时候设置的一个静态属性,默认值是 false。 + - 2. 需要注册一个额外的方法 Compare\(E1, E2\)。给定两个索引项 E1 = \(p1, ptr1\) 和 E2 = \(p2, ptr2\) , Compare 方法返回 p1 是否在 p2 之前,或者 p1 在 p2 之后,或者 p1 和 p2 相等。Comapare 用于在每个节点内插入数据。 + - 3. PickSplit 方法必须保证 P 分裂为 P1 和 P2 节点后,对于任意 P1 上的索引项 E1、P2 上的索引项 E2, Compare\(E1, E2\) 返回 E1 在 E2 前。 + - 4. 要保证一个节点内没有两个重叠的 key ,即一个节点内的任意 E1 和 E2, Consistent\(E1, E2.p\) = false。 + + 如果执行了上面 4 个步骤,则可以通过调用 FindMin 并重复调用 Next 来处理相等和范围查询。而其他类型的谓词仍然可以通过通用的搜索方法来处理,FindMin/Next 比使用 Serach 遍历更高效,因为 FindMin 和 Next 只沿着一个根到叶子的路径访问非叶子节点。 + + ``` + Algorithm FindMin(R, q) + + Input: GiST rooted at R, predicate q + + Output: minimum tuple in linear order that satisfies q + + Sketch: descend leftmost branch of tree whose keys + are Consistent with q. When a leaf node is + reached, return the first key that is Consistent + with q. + + FM1: [Search subtrees] If R is not a leaf, find the + first entry E in order such that + Consistent(E, q) . If such an E can be found, + invoke FindMin on the subtree whose root + node is referenced by E :ptr. If no such entry is found, return NULL. + + FM2: [Search leaf node] If R is a leaf, find the + first entry E on R such that Consistent(E, q), + and return E. If no such entry exists, return + NULL. + ``` + + 给定一个满足谓词 q 的索引项 E, Next 方法返回下一个满足 q 的索引项,如果不存在则返回 NULL。如果是为了查找 Next 只会在叶子节点上被调用。 + + ``` + Algorithm Next(R, q, E) + + Input: GiST rooted at R, predicate q, current entry E + + Output: next entry in linear order that satisfies q + + Sketch: return next entry on the same level of the tree + if it satisfies q. Else return NULL. + + N1: [next on node] If E is not the rightmost entry + on its node, and N is the next entry to the right + of E in order, and Consistent(N, q), then return N. If :Consistent(N, q), return NULL. + + N2: [next on neighboring node] If E is the righmost entry on its node, let P be the next node + to the right of R on the same level of the tree + (this can be found via tree traversal, or via + sideways pointers in the tree, when available + [LY81].) If P is non-existent, return NULL. + Otherwise, let N be the leftmost entry on P . + If Consistent(N, q), then return N, else return + NULL. + ``` + + +- 3.4.3 Insert + + 插入流程保证 Gist tree 的平衡,它与 R-tree 的插入非常相似,它是 B+-tree 更简单的插入流程的泛化。插入允许指定插入 level,这允许后续方法使用 Insert 从树的内部节点重新插入数据。假设叶子节点是 0 层,层号从叶子节点向上不断增加,新插入的项目出现在 level = 0 层。 + + ``` + Algorithm Insert(R, E, l) + + Input: GiST rooted at R, entry E = (p, ptr), and + level l, where p is a predicate such that p holds + for all tuples reachable from ptr. + + Output: new GiST resulting from insert of E at level l. + + Sketch: find where E should go, and add it there, splitting if necessary to make room. + + I1. [invoke ChooseSubtree to find where E + should go] Let L = ChooseSubtree(R, E, l) + + I2. If there is room for E on L, install E on L + (in order according to Compare, if IsOrdered.) + Otherwise invoke Split(R, L, E). + + I3. [propagate changes upward] + AdjustKeys(R, L). + ``` + + ChooseSubtree 可用于在树的任何 level 找到插入的最佳节点。当 IsOrdered 属性是 true 时,必须仔细编写 Penalty 方法以确保 ChooseSubtree 按顺序到达正确的叶子节点。4.1 节将给出一个例子。 + + ``` + Algorithm ChooseSubtree(R, E, l) + + Input: subtree rooted at R, entry E = (p, ptr), level + l + + Output: node at level l best suited to hold entry with + characteristic predicate E.p + + Sketch: Recursively descend tree minimizing Penalty + + CS1. If R is at level l, return R; + + CS2. Else among all entries F = (q, ptr') on R + find the one such that Penalty(F, E) is minimal. Return ChooseSubtree(F.ptr', E, l). + ``` + + split 算法利用用户自定义的 PickSplit 方法来决定如何拆分节点,包括处理正在进行插入的新元组。一旦数据分成两份,Split 就会为其中一份生成新的节点,将其插入树中,并更新树中新节点之上的 key。 + + ``` + Algorithm Split(R, N, E) + + Input: GiST R with node N, and a new entry E = + (p, ptr). + + Output: the GiST with N split in two and E inserted. + + Sketch: split keys of N along with E into two groups + according to PickSplit. Put one group onto a + new node, and Insert the new node into the + parent of N. + + SP1: Invoke PickSplit on the union of the elements + of N and fEg, put one of the two partitions on + node N, and put the remaining partition on a + new node N0 + . + + SP2: [Insert entry for N0 + in parent] Let EN' = + (q, ptr'), where q is the Union of all entries + on N', and ptr' is a pointer to N'. If there + is room for EN' on Parent(N), install EN' on + Parent(N) (in order if IsOrdered.) Otherwise + invoke Split(R, Parent(N), EN'). + + SP3: Modify the entry F which points to N, so that + F.p is the Union of all entries on N. + ``` + + 步骤 SP3 修改父节点信息,以显示节点 N 的修改。 这些修改通过插入流程中的步骤 I3 向上传播到树中的其他部分,同时传播了由于插入 N' 引起的树结构的变化。 + + AdjustKeys 方法确保一组谓词之上的 key 适用于下面的所有元组。 + + ``` + Algorithm AdjustKeys(R, N) + + Input: GiST rooted at R, tree node N + + Output: the GiST with ancestors of N containing correct and specific keys + + Sketch: ascend parents from N in the tree, making the + predicates be accurate characterizations of the + subtrees. Stop after root, or when a predicate + is found that is already accurate. + + PR1: If N is the root, or the entry which points to N + has an already-accurate representation of the + Union of the entries on N, then return. + + PR2: Otherwise, modify the entry E which points to + N so that E.p is the Union of all entries on N. + Then AdjustKeys(R, Parent(N).) + ``` + + 注意,当 IsOrdered = true 时,AdjustKeys 通常不执行任何工作,因为这种情况下,节点上的谓词通常将数据分为几个范围,不需因为简单的插入或者删除而进行修改。 + + AdjustKeys 会在步骤 PR1 中检测到这种情况,从而避免在树中的更高的 level 调用 AdjustKeys 。这种情况下,如果有需要可以完全绕过 AdjustKeys。 + + +- 3.4.4 Delete + + 删除算法保持树的平衡,同时尽可能保持树中 key 的信息。当树中的 key 存在线性顺序时,使用 B+-tree 风格的 ”借用或者合并“技术。否则,使用 R-tree 风格的重新插入技术。篇幅原因,在此省略删除算法。 + + +## 4 The Gist for Three Applications + +这一章的内容是给出了三个基于 Gist 实现的具体的索引类型,包括 B+-tree、R-tree 和 RD-tree ,其中主要描述了实现这几种索引类型时, Gist 定义的需要用户实现的接口是如何实现的,本文对此不展开介绍了,感兴趣的可以参考原文。 + +## 5 Gist Performance Issues + +对于没有重叠 key 的平衡树,如 B+-tree ,需要检查的最大节点数(I/O 次数)很容易限制:对于无重叠数据的点查,查询次数是树的高度。例如,对于一个 n 个元组的数据库而言是 O\(log n\),这个上限不能保证。但是,对于有重叠 key 的树而言,相同的数据可能出现在不同的节点上,这会导致对树中的多条路径进行查询。 Gist 的性能随着节点上出现重叠 key 的情况而变化。 + +key 重叠的主要原因有 2 个:数据重叠和 key 压缩导致的数据有损。 + +第一个原因很简单,如果树种的数据出现重叠,那么 key 出现重叠显而易见。例如,如果数据集种的数据完全相同,这种情况下会生成低效的索引,利用顺序扫描可能更合适。 + +第二个原因,因为 key 压缩造成的数据有损以一种更微妙的方式导致问题:即使两个原始数据可能不重叠,但如果 Compress/Decompress 方法不能产生精确的 key,那么可能导致数据重叠。例如,对于 R-tree 而言,Compress 方法生成外接矩形,如果源数据不是矩形的,那么可能导致数据有损。 + +![](figures/上述两个因素对性能的影响如上图所示.png) + +上述两个因素对性能的影响如上图所示,初始时没有数据重叠或者压缩导致精度丢失的问题,此时具有最佳的性能。随着数据重叠的增加或压缩导致的精度问题出现,性能开始下降。 + +最坏情况下任意的查询语句都会命中所有的 key,这种情况下需要遍历整棵树。 + +## 6 Implementation Issues + +本节讨论实现 Gist 需要考虑的问题,并对此做一个概述。 + +- In-memory Efficiency: 上面讨论了 Gist 在磁盘访问方面的效率,为简化内存管理方面的开销,将 node 对象的实现开放为可扩展的。例如,可以重载线性排序的 GiST 树的 node 实现,以支持二分查找;可以重载支持 hB-tree 的 node 实现,以支持 hB-tree 所需的专用内部结构。 +- Concurrencu-control, Recovery and Consistency: 高并发、可恢复性、一致性是成熟数据库中的关键点。 +- Variable-Length Keys: 通常允许变长 key 是一个非常有用的特性,特别是 Gist 中允许使用压缩算法。但是这需要在实现树操作方法时候格外小心,例如 Insert 和 Split。 +- Bulk Loading: 在无序的数据上,如何在一个大的已经存在的数据集有效构建索引,当前而言并不十分清晰。应该为 Gist 扩展 BulkLoad 方法,来实现不同类型的数据集的批量加载。 +- Optimizer Integration: 要将 Gist 和查询优化器集成,必须让优化器知道哪些查询谓词与哪些 Gist 是匹配的。而估算 Gist 的代价更困难,需要进一步研究。 +- Coding Detail: 建议两种实现 Gist 的方式,一种是可扩展的方式,像 PG 或者 Illustra 一样,在运行时可扩展,这样可以非常方便使用;另一种是模板的方式,像 SHORE 一样,在编译时可扩展,这样可以获得更高的效率。以上两种方式基于相同的代码库构建,不需要复制逻辑。 + +## 7 Summary and Future Work + +数据库系统需要能够支持新的数据类型,这要求索引能够支持可扩展的查询集合。 + +为了实现这有点,Gist 对搜索树进行了抽象,提取了它们的一些共同特征,对各种搜索树进行了统一。 + +Gist 具有非常好的扩展性,允许对任意的数据集进行索引和查询,这引出了何时以何种方式生成搜索树的问题。 + +由于 Gist 对 B+ tree 以及 R-tree 进行了结构上的统一,对于需要这两种数据结构的系统而言这非常有用。 + +此外, Gist 的可扩展性也引发了许多有趣的研究问题: + +- 可索引性:Gist 虽然提供了一种为任意类型建立索引的方法,但是对于 ”可索引性“还缺乏一些理论来描述:对于一个给定的数据集,针对给定的查询是否能够使用索引。 +- 索引非标准数据:作为实际问题,对一些非标准数据,例如集合、图形、序列、图片、音频、视频等,探索这些数据类型,会对理论探索提供一些有趣的反馈。对集合数据的 RD-tree 的研究已经开始:已经在 SHORE 和 Illustra 中实现了 RD-tree,只是用的时 R-tree 而不是 Gist。一旦从 R-tree 转到 Gist 上,也可以通过实现新的 PickSplit 方法和新的谓词实现这一点。 +- 查询优化和代价评估:查询优化和代价评估需要能够处理 Gist。当前代价评估对 B+ tree 而言时准确且合理的,对 R-tree 而言可能相对差一些。R-tree 上的代价评估已经有一些工作已经完成,但是对更通用的 Gist 而言还有很多工作要做。另外,需要由用户实现的接口可能是非常耗时的操作,这些方法的 CPU cost 要注册给优化器。然后优化器在做代价评估时,将这些 CPU cost 正确地纳入计算中。 +- 有损 key 压缩技术:随着新的数据类型被索引,可能有必要找到新的有损压缩算法,来保留 Gist 的属性。 + +- 算法提升:Gist 的插入算法基于 R-tree 的插入算法。R\*-tree 使用了某种修改过的算法,对于空间数据似乎有一些性能的提升。特别是,R\*-tree 在分裂期间 ”强制重新插入“能获得很好的收益。如果这些技术证明是有益的,它们会被纳入到 Gist 中,作为可选项或者默认选项。要统一 R\*-tree 和 R-tree 中的并发控制和恢复方面的内容修改还需要一些工作。 + +未来争对特定领域的搜索树的改进应该将 Gist 提供的通用性问题考虑在内。如果统一的框架有不错的性能,那么没有理由再去从头开发新的、解决特定问题的搜索树。Gist 提供了这样的一个框架,可以在现有的系统中实现它,也可以作为独立的 C++ 的包来实现,以便被其他的系统使用。 + diff --git "a/content/zh/post/PARKERljc/2021-12-4-CentOs\350\231\232\346\213\237\346\234\272\344\270\213opengauss\347\232\204\351\205\215\347\275\256\344\275\277\347\224\250.md" "b/content/zh/post/PARKERljc/2021-12-4-CentOs\350\231\232\346\213\237\346\234\272\344\270\213opengauss\347\232\204\351\205\215\347\275\256\344\275\277\347\224\250.md" new file mode 100644 index 0000000000000000000000000000000000000000..8e12ec71a150a5df082841166b5bf8ae7fed688b --- /dev/null +++ "b/content/zh/post/PARKERljc/2021-12-4-CentOs\350\231\232\346\213\237\346\234\272\344\270\213opengauss\347\232\204\351\205\215\347\275\256\344\275\277\347\224\250.md" @@ -0,0 +1,446 @@ ++++ + +title = "CentOS虚拟机下openGauss的配置使用" + +date = "2021-12-04" + +tags = ["CentOS", "openGauss", "配置"] + +archives = "2021-12" + +author = "parker" + +summary = "个人配置openGauss的过程总结" + ++++ + +# 实验环境说明 +### 虚拟机平台 `VMware` +### 服务器端 `CentOS 7.9` +### 本机系统 `Windows 10` +### 部署版本 `OpenGauss 1.1.0` +# 安装详细步骤 +### 虚拟机`VMware` +本机已配置,该部分省略 + +### `CentOS 7.9` 安装 + +下载镜像源`CentOS-7-x86_64-DVD-2009.iso` + +![avatar](pic/openGauss安装/1.PNG) + +虚拟机中选中镜像进行安装 + +![avatar](pic/openGauss安装/2.PNG) + +![avatar](pic/openGauss安装/3.PNG) + +设置 + + 内存设置为2GB + 处理器设置为2 + 网络默认即可 + 声卡和打印机不使用直接进行了移除 + +启动后进入系统安装,注意的点如下: + +#### 分区 + +选择`系统`-`安装位置`-`手动分区`进行分区如下: + +![avatar](pic/openGauss安装/分区.PNG) + +![avatar](pic/openGauss安装/分区2.PNG) + +#### 网络和主机名 + +选择`系统`-`网络和主机名`进行设置如下: + +![avatar](pic/openGauss安装/网络和主机名.PNG) + +![avatar](pic/openGauss安装/网络和主机名2.PNG) + +记录ip和主机名,之后配置需要用到 + +`ip` `192.168.201.131` + +`主机名` `db1` + +#### 软件选择 + +选择`软件`-`软件选择`设置如下: + +![avatar](pic/openGauss安装/软件选择.PNG) + +#### 用户设置 + +上述设置完成后点击`开始安装`,该期间根据提示完成用户设置即可 + +![avatar](pic/openGauss安装/5.PNG) + + +#### 安装完成进行重启,登录系统完成安装 + +![avatar](pic/openGauss安装/8.PNG) + +##### 上网测试 + +![avatar](pic/openGauss安装/上网测试.PNG) + +##### 修改操作系统版本(CentOS 7.6可省略) +通过 +```vi /etc/redhat-releas```打开编辑文件,修改内容如下(请使用`su root`切换至root用户进行操作) + +![avatar](pic/openGauss安装/修改操作系统版本.PNG) + +#### 关闭防火墙 + +执行以下命令关闭防火墙 +```systemctl stop firewalld.service``` + +```systemctl disable firewalld.service``` + +![avatar](pic/openGauss安装/关闭防火墙.PNG) + +#### 设置字符集及环境变量 + +![avatar](pic/openGauss安装/设置字符集及环境变量.PNG) + +#### 关闭swap交换内存 + +![avatar](pic/openGauss安装/关闭swap交换内存.PNG) + +#### yum环境配置 + +备份yum配置文件 + +![avatar](pic/openGauss安装/备份yum配置文件.PNG) + +下载可用源的repo文件 + +![avatar](pic/openGauss安装/下载可用源的repo文件.PNG) + +查看repo文件是否正确 + +![avatar](pic/openGauss安装/查看repo文件是否正确.PNG) + +yum安装相关包 + +```yum install -y libaio-devel flex bison ncurses-devel glibc.devel patch lsb_release wget python3``` + +![avatar](pic/openGauss安装/yum安装相应的包.PNG) + +设置python版本为3.x + +![avatar](pic/openGauss安装/python版本.PNG) + +修改完成后,确认yum是否使用,若不能使用,如本例中。修改`/usr/bin/yum`文件,修改`#!/usr/bin/python`为`#!/usr/bin/python2.7` + +![avatar](pic/openGauss安装/yum使用验证并修改为python2.7.PNG) + +### 数据库安装 + +创建存放数据库安装目录 + +![avatar](pic/openGauss安装/创建存放数据库安装目录.PNG) + +下载数据库安装包 + +![avatar](pic/openGauss安装/下载数据库安装包.PNG) + +创建xml配置文件,用于数据库安装 +在`openGauss`文件夹下 +`vi clusterconfig.xml`编辑以下内容 +```javascript + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` +其中ip设置为之前的`192.168.201.131`,主机名为`db1`,如下: +![avatar](pic/openGauss安装/openGauss配置文件.PNG) + +解压安装包 + +![avatar](pic/openGauss安装/解压安装包.PNG) + +解压后查看并修改文件权限 + +![avatar](pic/openGauss安装/更改权限.PNG) + +执行初始化脚本 +``` cd /opt/software/openGauss/script``` + +``` python gs_preinstall -U omm -G dbgrp -X /opt/software/openGauss/clusterconfig.xml ``` +返回`Preinstallation succeeded`内容时,初始化完成 + +![avatar](pic/openGauss安装/初始化脚本执行完成.PNG) + +初始化数据库 + +重启虚拟机后使用omm用户进行数据库初始化 +``` gs_install -X /opt/software/openGauss/clusterconfig.xml --gsinit-parameter="--encoding=UTF8" --dn-guc="max_process_memory=2GB" --dn-guc="shared_buffers=128MB" --dn-guc="bulk_write_ring_size=128MB" --dn-guc="cstore_buffers=16MB"``` + +其中对应的参数内存大小须根据虚拟机情况进行设置 + +![avatar](pic/openGauss安装/数据库初始化.PNG) + +安装完成后清理软件安装包 + +![avatar](pic/openGauss安装/数据库安装结束.PNG) + +#### 连接数据库 + +![avatar](pic/openGauss安装/数据库使用步骤1-3.PNG) + +### JDBC配置 +从官方网站选取对应版本的jar包并解压,在eclipse上配置加载驱动类。 + +![avatar](pic/jdbc连接/windows配置jdbc包.PNG) + +第一次连接后操作数据库需要修改omm用户密码 + +![avatar](pic/jdbc连接/第一次连接后操作需要修改omm用户密码.PNG) + +根据官方文档提供的demo程序修改后进行连接测试,连接成功如下: + +![avatar](pic/jdbc连接/连接成功.PNG) + +demo程序: +```java +package gaussjdbc; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; +import java.sql.CallableStatement; + +public class Gaussjdbc { + + //创建数据库连接。 + public static Connection GetConnection(String username, String passwd) { + String driver = "org.postgresql.Driver"; + String sourceURL = "jdbc:postgresql://192.168.201.131:26000/postgres"; + Connection conn = null; + try { + //加载数据库驱动。 + Class.forName(driver).newInstance(); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + + try { + //创建数据库连接。 + conn = DriverManager.getConnection(sourceURL, username, passwd); + System.out.println("Connection succeed!"); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + + return conn; + }; + + //执行普通SQL语句,创建customer_t1表。 + public static void CreateTable(Connection conn) { + Statement stmt = null; + try { + stmt = conn.createStatement(); + + //执行普通SQL语句。 + int rc = stmt + .executeUpdate("CREATE TABLE customer_t1(c_customer_sk INTEGER, c_customer_name VARCHAR(32));"); + + stmt.close(); + } catch (SQLException e) { + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } + e.printStackTrace(); + } + } + + //执行预处理语句,批量插入数据。 + public static void BatchInsertData(Connection conn) { + PreparedStatement pst = null; + + try { + //生成预处理语句。 + pst = conn.prepareStatement("INSERT INTO customer_t1 VALUES (?,?)"); + for (int i = 0; i < 3; i++) { + //添加参数。 + pst.setInt(1, i); + pst.setString(2, "data " + i); + pst.addBatch(); + } + //执行批处理。 + pst.executeBatch(); + pst.close(); + } catch (SQLException e) { + if (pst != null) { + try { + pst.close(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } + e.printStackTrace(); + } + } + + //执行预编译语句,更新数据。 + public static void ExecPreparedSQL(Connection conn) { + PreparedStatement pstmt = null; + try { + pstmt = conn + .prepareStatement("UPDATE customer_t1 SET c_customer_name = ? WHERE c_customer_sk = 1"); + pstmt.setString(1, "new Data"); + int rowcount = pstmt.executeUpdate(); + pstmt.close(); + } catch (SQLException e) { + if (pstmt != null) { + try { + pstmt.close(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } + e.printStackTrace(); + } + } + + + //执行存储过程。 + public static void ExecCallableSQL(Connection conn) { + CallableStatement cstmt = null; + try { + + cstmt=conn.prepareCall("{? = CALL TESTPROC(?,?,?)}"); + cstmt.setInt(2, 50); + cstmt.setInt(1, 20); + cstmt.setInt(3, 90); + cstmt.registerOutParameter(4, Types.INTEGER); //注册out类型的参数,类型为整型。 + cstmt.execute(); + int out = cstmt.getInt(4); //获取out参数 + System.out.println("The CallableStatment TESTPROC returns:"+out); + cstmt.close(); + } catch (SQLException e) { + if (cstmt != null) { + try { + cstmt.close(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } + e.printStackTrace(); + } + } + + + /** + * 主程序,逐步调用各静态方法。 + * @param args + */ + public static void main(String[] args) { + //创建数据库连接。 + Connection conn = GetConnection("parker", "parker@123"); + + //创建表。 + CreateTable(conn); + + //批插数据。 + BatchInsertData(conn); + + //执行预编译语句,更新数据。 + ExecPreparedSQL(conn); + + //执行存储过程。 + //ExecCallableSQL(conn);//这部分在运行时有问题,直接注释掉了 + + //关闭数据库连接。 + try { + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + + } + +} + +``` + +# 安装中遇到的问题与解决过程 +### 初始化脚本失败报错 +![avatar](pic/openGauss安装/脚本报错.PNG) + +![avatar](pic/openGauss安装/报错解决.PNG) + +### `CentOS`上配置`JAVA` + +自带的java路径寻找: + +![avatar](pic/jdbc连接/自带的java路径寻找.PNG) + +配置`CentOS`环境变量: + +![avatar](pic/jdbc连接/java环境变量配置.PNG) + +而后期验证`javac`时发现`CentOS`其自带的java仅有运行环境,改用windows作为客户端。 + +也可以自行下载java环境配置进行解决配置: + +![avatar](pic/jdbc连接/java开发环境配置结束.PNG) + +### 数据库连接问题 + +地址错误 + +![avatar](pic/jdbc连接/运行demo报地址错误.PNG) + +修改后ip未放行错误 + +![avatar](pic/jdbc连接/修改ip后未放行错误.PNG) + +放行ip命令(在官方文档`客户端接入验证`处可以查询)如下 + +``` gs_guc set -N all -I all -h "host all parker 192.168.201.1/32 sha256"``` + +具体的接入ip若不清楚可以通过报错信息或本地的ipconfig进行查看 + +![avatar](pic/jdbc连接/放行ip.PNG) diff --git "a/content/zh/post/PARKERljc/2021-12-4-openGauss\346\225\260\346\215\256\345\272\223\346\272\220\347\240\201\345\255\246\344\271\240-\346\214\207\346\240\207\351\207\207\351\233\206\343\200\201\351\242\204\346\265\213\344\270\216\345\274\202\345\270\270\346\243\200\346\265\213.md" "b/content/zh/post/PARKERljc/2021-12-4-openGauss\346\225\260\346\215\256\345\272\223\346\272\220\347\240\201\345\255\246\344\271\240-\346\214\207\346\240\207\351\207\207\351\233\206\343\200\201\351\242\204\346\265\213\344\270\216\345\274\202\345\270\270\346\243\200\346\265\213.md" new file mode 100644 index 0000000000000000000000000000000000000000..4a1821dd47a2425061aa7d65b4b750117c88cb59 --- /dev/null +++ "b/content/zh/post/PARKERljc/2021-12-4-openGauss\346\225\260\346\215\256\345\272\223\346\272\220\347\240\201\345\255\246\344\271\240-\346\214\207\346\240\207\351\207\207\351\233\206\343\200\201\351\242\204\346\265\213\344\270\216\345\274\202\345\270\270\346\243\200\346\265\213.md" @@ -0,0 +1,171 @@ ++++ + +title = "openGauss数据库源码学习-指标采集、预测与异常检测" + +date = "2021-12-04" + +tags = ["openGauss", "性能数据采集","指标预测","异常检测"] + +archives = "2021-12" + +author = "parker" + +summary = "openGauss数据库源码学习-指标采集、预测与异常检测" + ++++ +# opengauss源码解析 +## 指标采集、预测与异常检测 +代码位置:/gausskernel/dbmind/tools/anomaly_detection + +各模组在整体结构上的组合在摩天轮论坛上官方解析文章已经叙述的相对完整详尽。该报告对应为具体模块内部的实现。 + +### Agent模块 +代码位置: ~/agent +`Agent`模块负责数据库指标数据采集与发送,从代码的结构上看,可以分为3个子模块,即`DBSource(/db_source.py)`,`MemoryChannel(/channel.py)`,`HttpSink(/sink.py)`,负责整合组织各个模块进行协作部分的代码位于~/metric_agent.py的agent_main()方法中。 + +![avatar](/pic/xmind/agent.png) + +#### DBSource +代码位置:~/agent/db_source.py +`DBSource`承担数据采集的功能,其承担的三个task可以在agent_main()部分直观的看到,分别为`database_exporter`,`os_exporter`,`wdr`。该三个task的代码位于anomaly_detection/task中. + +![avatar](pic/xmind/dbsource.png) + +![avatar](pic/xmind/addtask.png) + +在metric_agent()中的使用 + +![avatar](pic/xmind/tasks.png) + +##### os_exporter +负责收集部署opengauss的设备上系统的部分性能数据。 + +| 对象方法 | 对应实现 | +|:---:|:---:| +|cpu_usage()/memory_usage()|linux命令ps -ux获取| +|io_read()/io_write()|linux命令 pidstat获取| +|io_wait()|iostat获取| +|disk_space()|通过sql获取路径,再通过命令du -sh获取| + +可以看到该task收集的数据包括cpu使用率,io读写,io等待,内存使用以及硬盘空间存储部分。 +##### DatabaseExporter +负责收集数据库方面的性能数据。 +| 对象方法 | 对应实现 | +|:---:|:---:| +|guc_parameter()|通过sql语句查询pg_setting中的数据,包括工作内存,共享缓冲区的大小以及最大的连接数量| +|current_connections()|select count(1) from pg_stat_activity| +|gps()每秒事务量|通过sql语句从gs_sql_count中获取select/update/insert/delete计数,间隔0.1s采样,乘以10作为结果| +|process()|linux命令ps -aux获取进程信息,通过分割筛选等处理获取进程数据,返回key为`(pid)_(process)`,value为`(cpu_usage:memory_usage)`的字典| +|temp_file()|sql获取路径后检查临时数据文件夹pgsql_tmp是否存在,返回`t`/`f` +##### WDR(`Workload Diagnosis Report`) +WDR基于两次不同时间点系统的性能快照数据,生成两个时间点之间的性能表现报表,用于诊断数据库内核的性能故障。而该部分的wdr相关的仅仅是一小部分,`wdr`中`summary`级和`detail`级别的性能数据比起内置的这部分数据要丰富的多。该task模块中主要通过sql语句进行计数器的查询 + +![avatar](/pic/xmind/图片1.png) + +#### MemoryChannel +代码位置:~/agent/channel.py +整体为一个存放数据的队列,结构比较简单 + +![avatar](/pic/xmind/memorychannel.png) + +|对象方法|对应实现| +|:---:|:---:| +|put()|尝试向队列中放置数据,超过最大限度时log提醒并舍去新数据| +|take()|数据出列并返回其值| +|size()|返回队列数据量| + +#### HttpSink +代码位置:~/agent/sink.py +其从`MemoryChannel`获取数据,并根据metric_agent.py中提供的协议`ssl`/`http`进行转发,重试次数为5次,间隔1s +```python +def process(self): + agent_logger.info('Begin send data to {url}.'.format(url=self._url)) + while self.running: + contents = self._channel.take() + if not contents: + time.sleep(0.5) + continue + + contents.update(**{'flag': {'host': self.db_host, 'port': self.db_port, 'type': self.db_type}}) + retry_times = 5 + while retry_times: + try: + req = request.Request(self._url, headers=_JSON_HEADER, + data=json.dumps(contents).encode('utf-8'), + method='POST') + request.urlopen(req, context=self.context) + break + except Exception as e: + agent_logger.error("{error}, retry...".format(error=str(e))) + retry_times -= 1 + if not retry_times: + raise + time.sleep(1.0) + time.sleep(self._interval) +``` +### Detector + +代码位置: ~/detector + +detector模块负责数据预测与异常检测,从代码的结构上看,可以分为3个部分,即`algorithm`,`service`和`tools`,负责整合组织各个模块进行协作部分的代码位于~/metric_detector.py中。 + +![avatar](/pic/xmind/agent.png) + +#### receiver +代码位置:~/detector/service/resource/receiver.py + +`receiver`为service部分中,其功能为接受和存储获取到的性能数据。其中agent收集到的三部分tasks数据`os_exporter`,`database_exporter`以及`wdr`通过sqlite存储在本地。 +数据接收: + +![avatar](/pic/xmind/数据接收.png) + +数据存储: + +![avatar](/pic/xmind/数据存储.png) + +#### SQLiteStorage +代码位置:~/detector/service/storage/sqlite_storage.py +`SQLiteStorage`实现了一种通过SQLite进行本地化存储数据的方式,该类主要是加载sql配置,并封装了一些需要的各种sql操作以使用,如下: +|方法|对应功能| +|:---:|:---:| +|select_timeseries_by_timestamp|按照时间戳获取最近一段时间的数据| +|select_timeseries_by_number|按照编号获取最近一段时间的数据| +|load_sql_operation|加载本地table.json的sql操作| +|get_latest_timestamp|获取最新的时间戳| +##### table.json +位于/tools/anomaly_detection下,为该部分实现时加载的sql配置文件 + +![avatar](pic/xmind/table_json.png) + +#### algorithm +代码位置:~/detector/algorithm +`algorithm`为时序预测算法部分,其包括`arima`和`prophet`两套时间序列预测算法框架,`model`中的`AlgModel`为算法的父类,也提供了使用自定义算法时的模板,使用时若未指定算法会默认使用`auto_arima`. + +![avatar](/pic/xmind/algorithm.png) + +|对象方法|实现功能| +|:---:|:---:| +|fit()|输入时序数据list\[\[[[timestamp]]],\[[[value]]]]进行模型训练| +|forecast()|给定时间期间[[period]],[[freq]]进行数据预测,返回时序数据| +|save()/load()|通过`pickle`库来加载、存放模型| +##### fit(): + +![avatar](pic/xmind/fit.png) + +##### forecast(): +![avatar](pic/xmind/forecast1.png) + +#### trend +包括`forecast`和`detect`模块,提供时序数据预测与基于阈值的异常检测报警功能 +##### forecast +代码位置:~/detector/tools/trend/forecast.py + +![avatar](pic/xmind/forecast.png) + +该模块功能为读取时序数据(数量最少不低于设定值[[minimum_timeseries_length]]),并通过选定的预测算法进行拟合预测,生成数据`future_reselt`([[status]],[[metric_name]],[[detect_basis]] ([[minimum]],[[maximum]]),[[future_date]],[[future_value]])提供处理 +##### detect +代码位置:~/detector/tools/trend/detect.py + +![avatar](pic/xmind/detect.png) + +该模块根据`forecast`封装后的数据以及配置中设定的数据阈值进行判断,正常的数据写入[[m_logger]],检测异常的数据,即[[higher]],[[lower]]写入[[a_logger]]警示。 diff --git a/content/zh/post/PARKERljc/pic/issues.txt b/content/zh/post/PARKERljc/pic/issues.txt new file mode 100644 index 0000000000000000000000000000000000000000..b3f0362e8ebed1a8a75bb533c1fb0f48aa8c81e5 --- /dev/null +++ b/content/zh/post/PARKERljc/pic/issues.txt @@ -0,0 +1,6 @@ +数据库密码:parker@123 +java:/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.262.b10-1.el7.x86_64/jre/bin/java +centos自带java只有运行环境,无开发环境 +通过yum安装后路径发生了改变 +新的下载后java:/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64/jre/bin/java +重新修改配置文件 /etc/profile 运行javac无报错 \ No newline at end of file diff --git "a/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/java\345\274\200\345\217\221\347\216\257\345\242\203\351\205\215\347\275\256\347\273\223\346\235\237.PNG" "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/java\345\274\200\345\217\221\347\216\257\345\242\203\351\205\215\347\275\256\347\273\223\346\235\237.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..864e3b8fa9fd25d6932059fc55a8d11b81942ae4 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/java\345\274\200\345\217\221\347\216\257\345\242\203\351\205\215\347\275\256\347\273\223\346\235\237.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/java\347\216\257\345\242\203\345\217\230\351\207\217\351\205\215\347\275\256.PNG" "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/java\347\216\257\345\242\203\345\217\230\351\207\217\351\205\215\347\275\256.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..7ccc3027b66e998071871a87531a9ce669c9b331 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/java\347\216\257\345\242\203\345\217\230\351\207\217\351\205\215\347\275\256.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/jdbc\345\214\205\344\270\213\350\275\275.PNG" "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/jdbc\345\214\205\344\270\213\350\275\275.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..09266e85329eb80e6678a93ab461073c997d0601 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/jdbc\345\214\205\344\270\213\350\275\275.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/windows\351\205\215\347\275\256jdbc\345\214\205.PNG" "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/windows\351\205\215\347\275\256jdbc\345\214\205.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..fea0db212715dbd6fe3417570bec796aa82006cb Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/windows\351\205\215\347\275\256jdbc\345\214\205.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\344\270\213\350\275\275\345\214\205\351\224\231\350\247\243\345\206\263.PNG" "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\344\270\213\350\275\275\345\214\205\351\224\231\350\247\243\345\206\263.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..6990483650e4fbbfa78421c66a2400342239991f Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\344\270\213\350\275\275\345\214\205\351\224\231\350\247\243\345\206\263.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\344\270\213\350\275\275\346\212\245\351\224\231.PNG" "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\344\270\213\350\275\275\346\212\245\351\224\231.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..8694a1c5a4e18b195099f31e81be7e428e1ca971 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\344\270\213\350\275\275\346\212\245\351\224\231.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\344\277\256\346\224\271ip\345\220\216\346\234\252\346\224\276\350\241\214\351\224\231\350\257\257.PNG" "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\344\277\256\346\224\271ip\345\220\216\346\234\252\346\224\276\350\241\214\351\224\231\350\257\257.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..d50253a9dc13167fa80ee5be70d738e6e1982a5e Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\344\277\256\346\224\271ip\345\220\216\346\234\252\346\224\276\350\241\214\351\224\231\350\257\257.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\345\210\233\345\273\272\346\225\260\346\215\256\345\272\223.PNG" "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\345\210\233\345\273\272\346\225\260\346\215\256\345\272\223.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..42cdce6f5377f0b95a208049676f140b82649009 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\345\210\233\345\273\272\346\225\260\346\215\256\345\272\223.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\346\224\276\350\241\214ip.PNG" "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\346\224\276\350\241\214ip.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..72f9d0ebee5a203db6016d3307aacff10949d644 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\346\224\276\350\241\214ip.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\347\216\257\345\242\203\345\217\230\351\207\217\347\224\237\346\225\210\346\237\245\347\234\213.PNG" "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\347\216\257\345\242\203\345\217\230\351\207\217\347\224\237\346\225\210\346\237\245\347\234\213.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..15fcdc87696772a30ce02407e3f119b0ad05362c Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\347\216\257\345\242\203\345\217\230\351\207\217\347\224\237\346\225\210\346\237\245\347\234\213.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\347\254\254\344\270\200\346\254\241\350\277\236\346\216\245\345\220\216\346\223\215\344\275\234\351\234\200\350\246\201\344\277\256\346\224\271omm\347\224\250\346\210\267\345\257\206\347\240\201.PNG" "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\347\254\254\344\270\200\346\254\241\350\277\236\346\216\245\345\220\216\346\223\215\344\275\234\351\234\200\350\246\201\344\277\256\346\224\271omm\347\224\250\346\210\267\345\257\206\347\240\201.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..c93afc07ec11bfe5abbec34781a1a3eb66c9ac17 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\347\254\254\344\270\200\346\254\241\350\277\236\346\216\245\345\220\216\346\223\215\344\275\234\351\234\200\350\246\201\344\277\256\346\224\271omm\347\224\250\346\210\267\345\257\206\347\240\201.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\350\207\252\345\270\246java\346\227\240\345\274\200\345\217\221\347\216\257\345\242\203.PNG" "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\350\207\252\345\270\246java\346\227\240\345\274\200\345\217\221\347\216\257\345\242\203.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..f055f43de9d92eb66f363bdb6b7e27912387d1a5 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\350\207\252\345\270\246java\346\227\240\345\274\200\345\217\221\347\216\257\345\242\203.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\350\207\252\345\270\246\347\232\204java\350\267\257\345\276\204\345\257\273\346\211\276.PNG" "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\350\207\252\345\270\246\347\232\204java\350\267\257\345\276\204\345\257\273\346\211\276.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..dd7de840d5a3ad685b488d21d0e4d64ad85f87ee Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\350\207\252\345\270\246\347\232\204java\350\267\257\345\276\204\345\257\273\346\211\276.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\350\277\220\350\241\214demo\346\212\245\345\234\260\345\235\200\351\224\231\350\257\257.PNG" "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\350\277\220\350\241\214demo\346\212\245\345\234\260\345\235\200\351\224\231\350\257\257.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..8f29fcdc75d944c5adf81f4eb2f620139ce20bce Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\350\277\220\350\241\214demo\346\212\245\345\234\260\345\235\200\351\224\231\350\257\257.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\350\277\236\346\216\245\346\210\220\345\212\237.PNG" "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\350\277\236\346\216\245\346\210\220\345\212\237.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..7a87a0258d51848e84f694d7e158f48f6428ed68 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/jdbc\350\277\236\346\216\245/\350\277\236\346\216\245\346\210\220\345\212\237.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/1.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/1.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..bf28eb0b9c9e9c94338962fbba0811f8a61578a8 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/1.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/2.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/2.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..a4b88375b7f1cb309a2d0ebeb34265513299a567 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/2.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/3.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/3.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..bde576bd8c4eb0cdd62a71b6d337136525bdd7ff Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/3.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/4.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/4.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..97180d74e05a44f7c0bad0e74884638348d3bac9 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/4.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/5.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/5.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..a7fa30c73450e31479b1967b5d74f5783f69ce04 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/5.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/6.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/6.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..ff53e186d6bf7f8bddc6451571a6d59d913ba229 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/6.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/7.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/7.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..ba69df3b3d9ecbf40a149203140710972644f7be Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/7.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/8.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/8.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..237afc5b6a9b20c4ba0f0e3529504e58a482ed25 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/8.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/Vim-E212.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/Vim-E212.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..ceab5721665c77e3aebc0320b236282e8dbd00c8 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/Vim-E212.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/Vim-E212\344\277\256\346\224\271\345\256\214\346\257\225.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/Vim-E212\344\277\256\346\224\271\345\256\214\346\257\225.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..a31d569403c6a4f9e9e6b00a84a48c230f792113 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/Vim-E212\344\277\256\346\224\271\345\256\214\346\257\225.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/issues.txt" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/issues.txt" new file mode 100644 index 0000000000000000000000000000000000000000..d235e9688684ea8867bf60dee5844fc7d501d991 --- /dev/null +++ "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/issues.txt" @@ -0,0 +1 @@ +数据库密码:parker@123 \ No newline at end of file diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/openGauss\351\205\215\347\275\256\346\226\207\344\273\266.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/openGauss\351\205\215\347\275\256\346\226\207\344\273\266.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..ff779efe38d2d312dd599232f85001b260751c5c Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/openGauss\351\205\215\347\275\256\346\226\207\344\273\266.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/python\347\211\210\346\234\254.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/python\347\211\210\346\234\254.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..30023e9ef3ca5afc2556719473c7ace82d535641 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/python\347\211\210\346\234\254.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/yum\344\275\277\347\224\250\351\252\214\350\257\201\345\271\266\344\277\256\346\224\271\344\270\272python2.7.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/yum\344\275\277\347\224\250\351\252\214\350\257\201\345\271\266\344\277\256\346\224\271\344\270\272python2.7.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..1693e9fdb3dd1e5e942312246cda418576a6c412 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/yum\344\275\277\347\224\250\351\252\214\350\257\201\345\271\266\344\277\256\346\224\271\344\270\272python2.7.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/yum\345\256\211\350\243\205\347\233\270\345\272\224\347\232\204\345\214\205.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/yum\345\256\211\350\243\205\347\233\270\345\272\224\347\232\204\345\214\205.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..56926bc06e65e931ecbda4166033ce0b5e2aeab9 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/yum\345\256\211\350\243\205\347\233\270\345\272\224\347\232\204\345\214\205.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\344\270\212\347\275\221\346\265\213\350\257\225.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\344\270\212\347\275\221\346\265\213\350\257\225.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..0f059c998d0a47958a2062a6b1d353c42c8ee575 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\344\270\212\347\275\221\346\265\213\350\257\225.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\344\270\213\350\275\275\345\217\257\347\224\250\346\272\220\347\232\204repo\346\226\207\344\273\266.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\344\270\213\350\275\275\345\217\257\347\224\250\346\272\220\347\232\204repo\346\226\207\344\273\266.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..9629817a5cfe4f13b6f138cc4ecfecf513acb450 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\344\270\213\350\275\275\345\217\257\347\224\250\346\272\220\347\232\204repo\346\226\207\344\273\266.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\344\270\213\350\275\275\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\345\214\205.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\344\270\213\350\275\275\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\345\214\205.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..1b29b8175ad80492fd5ca8bb9dcbd7c5f9eb4312 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\344\270\213\350\275\275\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\345\214\205.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\344\277\256\346\224\271\346\223\215\344\275\234\347\263\273\347\273\237\347\211\210\346\234\254.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\344\277\256\346\224\271\346\223\215\344\275\234\347\263\273\347\273\237\347\211\210\346\234\254.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..273565cf09f6ba3e43a983df92dee38f4a7e2c28 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\344\277\256\346\224\271\346\223\215\344\275\234\347\263\273\347\273\237\347\211\210\346\234\254.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\205\263\351\227\255swap\344\272\244\346\215\242\345\206\205\345\255\230.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\205\263\351\227\255swap\344\272\244\346\215\242\345\206\205\345\255\230.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..250d290fc508170e1ac9723bb4cf6088d100efbb Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\205\263\351\227\255swap\344\272\244\346\215\242\345\206\205\345\255\230.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\205\263\351\227\255\351\230\262\347\201\253\345\242\231.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\205\263\351\227\255\351\230\262\347\201\253\345\242\231.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..b28490662275f5b75c6a06bd4cf9ff081e011408 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\205\263\351\227\255\351\230\262\347\201\253\345\242\231.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\210\206\345\214\272.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\210\206\345\214\272.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..3b5af978d5d670215de39aae1eb28dd5723ed8ed Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\210\206\345\214\272.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\210\206\345\214\2722.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\210\206\345\214\2722.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..87be33970cb9fd48a1c94376eb87bd7448f50b97 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\210\206\345\214\2722.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\210\233\345\273\272\345\255\230\346\224\276\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\347\233\256\345\275\225.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\210\233\345\273\272\345\255\230\346\224\276\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\347\233\256\345\275\225.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..bc5bd1795fb97edbe7a23c5371f388fea99cfb7f Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\210\233\345\273\272\345\255\230\346\224\276\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\347\233\256\345\275\225.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\210\235\345\247\213\345\214\226\350\204\232\346\234\254\346\211\247\350\241\214\345\256\214\346\210\220.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\210\235\345\247\213\345\214\226\350\204\232\346\234\254\346\211\247\350\241\214\345\256\214\346\210\220.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..ba9b390678d835f3d613902b3008f83f56a897a4 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\210\235\345\247\213\345\214\226\350\204\232\346\234\254\346\211\247\350\241\214\345\256\214\346\210\220.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\244\207\344\273\275yum\351\205\215\347\275\256\346\226\207\344\273\266.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\244\207\344\273\275yum\351\205\215\347\275\256\346\226\207\344\273\266.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..3eb015ad3b77e040959a2e3e0db25dcb135393c9 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\244\207\344\273\275yum\351\205\215\347\275\256\346\226\207\344\273\266.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\255\227\347\254\246\351\233\206.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\255\227\347\254\246\351\233\206.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..6fc4756a9d1bf798796a6d69397ba2a2f9bcd236 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\255\227\347\254\246\351\233\206.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\255\227\347\254\246\351\233\206Error.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\255\227\347\254\246\351\233\206Error.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..51f2c826987333287f9a05d5bcafbfa04c7517cc Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\255\227\347\254\246\351\233\206Error.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\255\227\347\254\246\351\233\206\350\256\276\347\275\256.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\255\227\347\254\246\351\233\206\350\256\276\347\275\256.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..c6755cc7717d811cec235885170c451afe26c897 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\255\227\347\254\246\351\233\206\350\256\276\347\275\256.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\255\227\347\254\246\351\233\206\350\256\276\347\275\256\345\221\275\344\273\244.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\255\227\347\254\246\351\233\206\350\256\276\347\275\256\345\221\275\344\273\244.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..ce4eaf818cf1695edd0a33e9a994cf3aae9a357b Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\345\255\227\347\254\246\351\233\206\350\256\276\347\275\256\345\221\275\344\273\244.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\346\212\245\351\224\231\350\247\243\345\206\263.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\346\212\245\351\224\231\350\247\243\345\206\263.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..d1089b06d2dcdf18f328efb62bb245c346cc9d20 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\346\212\245\351\224\231\350\247\243\345\206\263.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\346\225\260\346\215\256\345\272\223\344\275\277\347\224\250\346\255\245\351\252\2441-3.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\346\225\260\346\215\256\345\272\223\344\275\277\347\224\250\346\255\245\351\252\2441-3.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..f72e32c3b199c28ea3bd0ca485e4725f7faa7737 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\346\225\260\346\215\256\345\272\223\344\275\277\347\224\250\346\255\245\351\252\2441-3.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\346\225\260\346\215\256\345\272\223\345\210\235\345\247\213\345\214\226.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\346\225\260\346\215\256\345\272\223\345\210\235\345\247\213\345\214\226.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..6fbee3e981dc1eebea15cc2ce70e2347b698dc09 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\346\225\260\346\215\256\345\272\223\345\210\235\345\247\213\345\214\226.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\347\273\223\346\235\237.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\347\273\223\346\235\237.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..49aa3f5be8e56fac975bfa924f0186a396cafcf7 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\347\273\223\346\235\237.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\346\233\264\346\224\271\346\235\203\351\231\220.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\346\233\264\346\224\271\346\235\203\351\231\220.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..05737d942a33c963e8ede22684c350ddec7b2a5a Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\346\233\264\346\224\271\346\235\203\351\231\220.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\346\237\245\347\234\213repo\346\226\207\344\273\266\346\230\257\345\220\246\346\255\243\347\241\256.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\346\237\245\347\234\213repo\346\226\207\344\273\266\346\230\257\345\220\246\346\255\243\347\241\256.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..04fd21029139345e4ec8ca4d0a003a83756af98c Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\346\237\245\347\234\213repo\346\226\207\344\273\266\346\230\257\345\220\246\346\255\243\347\241\256.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\347\275\221\347\273\234\345\222\214\344\270\273\346\234\272\345\220\215.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\347\275\221\347\273\234\345\222\214\344\270\273\346\234\272\345\220\215.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..2505ecef2d7c5a14acae9f87d5651fd4815b2d34 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\347\275\221\347\273\234\345\222\214\344\270\273\346\234\272\345\220\215.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\347\275\221\347\273\234\345\222\214\344\270\273\346\234\272\345\220\2152.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\347\275\221\347\273\234\345\222\214\344\270\273\346\234\272\345\220\2152.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..c6309b178d2a3b41c807344d5460839280742fc8 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\347\275\221\347\273\234\345\222\214\344\270\273\346\234\272\345\220\2152.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\350\204\232\346\234\254\346\212\245\351\224\231.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\350\204\232\346\234\254\346\212\245\351\224\231.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..14b4c86fb4b9682d9eb1efe2620a5059a18e1eb6 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\350\204\232\346\234\254\346\212\245\351\224\231.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\350\247\243\345\216\213\345\256\211\350\243\205\345\214\205.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\350\247\243\345\216\213\345\256\211\350\243\205\345\214\205.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..3d2407cd98f7a5a972082ed5b44fec7d09d5a463 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\350\247\243\345\216\213\345\256\211\350\243\205\345\214\205.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\350\256\276\347\275\256\345\255\227\347\254\246\351\233\206\345\217\212\347\216\257\345\242\203\345\217\230\351\207\217.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\350\256\276\347\275\256\345\255\227\347\254\246\351\233\206\345\217\212\347\216\257\345\242\203\345\217\230\351\207\217.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..3d828dfa2e8b918986f733e6c8d2ea85924e0cbf Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\350\256\276\347\275\256\345\255\227\347\254\246\351\233\206\345\217\212\347\216\257\345\242\203\345\217\230\351\207\217.PNG" differ diff --git "a/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\350\275\257\344\273\266\351\200\211\346\213\251.PNG" "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\350\275\257\344\273\266\351\200\211\346\213\251.PNG" new file mode 100644 index 0000000000000000000000000000000000000000..38e31bbf4a1460c4d964709d73382fe2d1aa6ebc Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/openGauss\345\256\211\350\243\205/\350\275\257\344\273\266\351\200\211\346\213\251.PNG" differ diff --git a/content/zh/post/PARKERljc/pic/xmind/addtask.png b/content/zh/post/PARKERljc/pic/xmind/addtask.png new file mode 100644 index 0000000000000000000000000000000000000000..4579d32239416899c01e49df828e49bc3a0838ef Binary files /dev/null and b/content/zh/post/PARKERljc/pic/xmind/addtask.png differ diff --git a/content/zh/post/PARKERljc/pic/xmind/agent.png b/content/zh/post/PARKERljc/pic/xmind/agent.png new file mode 100644 index 0000000000000000000000000000000000000000..3cbc71c58d34bb380bf51b9dc081302261b71bbe Binary files /dev/null and b/content/zh/post/PARKERljc/pic/xmind/agent.png differ diff --git a/content/zh/post/PARKERljc/pic/xmind/algorithm.png b/content/zh/post/PARKERljc/pic/xmind/algorithm.png new file mode 100644 index 0000000000000000000000000000000000000000..7e300871674d2d9fb3d962eee00da809c2248905 Binary files /dev/null and b/content/zh/post/PARKERljc/pic/xmind/algorithm.png differ diff --git a/content/zh/post/PARKERljc/pic/xmind/dbsource.png b/content/zh/post/PARKERljc/pic/xmind/dbsource.png new file mode 100644 index 0000000000000000000000000000000000000000..ac59b08868d4b03a356271a32b46ffd689fb846f Binary files /dev/null and b/content/zh/post/PARKERljc/pic/xmind/dbsource.png differ diff --git a/content/zh/post/PARKERljc/pic/xmind/detect.png b/content/zh/post/PARKERljc/pic/xmind/detect.png new file mode 100644 index 0000000000000000000000000000000000000000..d7a4a94ee5d5daad0d8f60c181328ae9c5515f8e Binary files /dev/null and b/content/zh/post/PARKERljc/pic/xmind/detect.png differ diff --git a/content/zh/post/PARKERljc/pic/xmind/detector.png b/content/zh/post/PARKERljc/pic/xmind/detector.png new file mode 100644 index 0000000000000000000000000000000000000000..3ddcbc9262059ca2f52ee6b9847d7763a40cce08 Binary files /dev/null and b/content/zh/post/PARKERljc/pic/xmind/detector.png differ diff --git a/content/zh/post/PARKERljc/pic/xmind/fit.PNG b/content/zh/post/PARKERljc/pic/xmind/fit.PNG new file mode 100644 index 0000000000000000000000000000000000000000..5d580af60c2790943bdb48e4e092e7f07e4ed5f4 Binary files /dev/null and b/content/zh/post/PARKERljc/pic/xmind/fit.PNG differ diff --git a/content/zh/post/PARKERljc/pic/xmind/forecast.png b/content/zh/post/PARKERljc/pic/xmind/forecast.png new file mode 100644 index 0000000000000000000000000000000000000000..9e15932028cffd325eb0a3ebae5185b985d4f9d4 Binary files /dev/null and b/content/zh/post/PARKERljc/pic/xmind/forecast.png differ diff --git a/content/zh/post/PARKERljc/pic/xmind/forecast1.PNG b/content/zh/post/PARKERljc/pic/xmind/forecast1.PNG new file mode 100644 index 0000000000000000000000000000000000000000..1d1354e021c6fd9370f6ed7cfa05fb2c0170da8b Binary files /dev/null and b/content/zh/post/PARKERljc/pic/xmind/forecast1.PNG differ diff --git a/content/zh/post/PARKERljc/pic/xmind/httpsink.png b/content/zh/post/PARKERljc/pic/xmind/httpsink.png new file mode 100644 index 0000000000000000000000000000000000000000..6cd251ebeb71341db3dff162e25a9c49789ec715 Binary files /dev/null and b/content/zh/post/PARKERljc/pic/xmind/httpsink.png differ diff --git a/content/zh/post/PARKERljc/pic/xmind/memorychannel.png b/content/zh/post/PARKERljc/pic/xmind/memorychannel.png new file mode 100644 index 0000000000000000000000000000000000000000..6a64c398609287dbcf2f1bd29d745115aaf6af38 Binary files /dev/null and b/content/zh/post/PARKERljc/pic/xmind/memorychannel.png differ diff --git a/content/zh/post/PARKERljc/pic/xmind/table_json.png b/content/zh/post/PARKERljc/pic/xmind/table_json.png new file mode 100644 index 0000000000000000000000000000000000000000..d43f7cd22e76571cab95ebecec3a2bc5fb844a39 Binary files /dev/null and b/content/zh/post/PARKERljc/pic/xmind/table_json.png differ diff --git a/content/zh/post/PARKERljc/pic/xmind/tasks.png b/content/zh/post/PARKERljc/pic/xmind/tasks.png new file mode 100644 index 0000000000000000000000000000000000000000..bc39cfeca31ac1cb4cd52c1febad8f9389d7fe47 Binary files /dev/null and b/content/zh/post/PARKERljc/pic/xmind/tasks.png differ diff --git "a/content/zh/post/PARKERljc/pic/xmind/\345\233\276\347\211\2071.png" "b/content/zh/post/PARKERljc/pic/xmind/\345\233\276\347\211\2071.png" new file mode 100644 index 0000000000000000000000000000000000000000..73aa2782eafb64d7eb17298d8ae7b537f00b6b5b Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/xmind/\345\233\276\347\211\2071.png" differ diff --git "a/content/zh/post/PARKERljc/pic/xmind/\346\225\260\346\215\256\345\255\230\345\202\250.png" "b/content/zh/post/PARKERljc/pic/xmind/\346\225\260\346\215\256\345\255\230\345\202\250.png" new file mode 100644 index 0000000000000000000000000000000000000000..705ff0071f7529a372a7fcb708006088614d8d2c Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/xmind/\346\225\260\346\215\256\345\255\230\345\202\250.png" differ diff --git "a/content/zh/post/PARKERljc/pic/xmind/\346\225\260\346\215\256\346\216\245\346\224\266.png" "b/content/zh/post/PARKERljc/pic/xmind/\346\225\260\346\215\256\346\216\245\346\224\266.png" new file mode 100644 index 0000000000000000000000000000000000000000..6c854fd92c1385cd092a326e6a7fb6ec88557bc4 Binary files /dev/null and "b/content/zh/post/PARKERljc/pic/xmind/\346\225\260\346\215\256\346\216\245\346\224\266.png" differ diff --git "a/content/zh/post/PARKERljc/pic/xmind/\350\256\241\347\256\227\346\234\272\345\237\272\347\241\200\347\254\254\344\272\214\347\253\240\344\275\234\344\270\232" "b/content/zh/post/PARKERljc/pic/xmind/\350\256\241\347\256\227\346\234\272\345\237\272\347\241\200\347\254\254\344\272\214\347\253\240\344\275\234\344\270\232" new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/content/zh/post/adadaadadade/img/image-20210913111340859.png b/content/zh/post/adadaadadade/img/image-20210913111340859.png new file mode 100644 index 0000000000000000000000000000000000000000..b0ffbd78f9a893b1d849fa3c3b764ad1c6bb9a8b Binary files /dev/null and b/content/zh/post/adadaadadade/img/image-20210913111340859.png differ diff --git a/content/zh/post/adadaadadade/img/image-20210913111551057.png b/content/zh/post/adadaadadade/img/image-20210913111551057.png new file mode 100644 index 0000000000000000000000000000000000000000..778d6a9937c2e88fe7c80342f31cb9991b766cf3 Binary files /dev/null and b/content/zh/post/adadaadadade/img/image-20210913111551057.png differ diff --git "a/content/zh/post/adadaadadade/openGauss\347\232\204\351\224\201\346\234\272\345\210\266\346\272\220\347\240\201\345\210\206\346\236\220.md" "b/content/zh/post/adadaadadade/openGauss\347\232\204\351\224\201\346\234\272\345\210\266\346\272\220\347\240\201\345\210\206\346\236\220.md" new file mode 100644 index 0000000000000000000000000000000000000000..c196e0349279ccc954abbf28c744d1578a5c9b70 --- /dev/null +++ "b/content/zh/post/adadaadadade/openGauss\347\232\204\351\224\201\346\234\272\345\210\266\346\272\220\347\240\201\345\210\206\346\236\220.md" @@ -0,0 +1,534 @@ ++++ +title = "openGauss的锁机制源码分析 " +date = "2021-12-13" +tags = ["openGauss的锁机制源码分析 "] +archives = "2021-12" +author = "adadaadadade" +summary = "openGauss的锁机制源码分析 " +times = "17:30" ++++ + +# 锁机制 + +在数据库中,会存在大量线程访问公共资源的情况,为保证安全性和高效性,引入锁机制为这一情况做并发控制。在openGauss中,根据锁的用途不同,和对于性能的要求不同,有3种:自旋锁(spinlock)、轻量级锁(LWLock)和常规锁,使用锁管理器lmgr提供常规锁的调用。使用锁的一般操作流程可以简述为3步:加锁、临界区操作、放锁。 + +# 文件目录 + +头文件目录: src/include/storage/lock/ +cpp文件目录: src/gausskernel/storage/lmgr/ + +``` +s_lock.cpp # 自旋锁的硬件相关实现 +spin.cpp # 自旋锁的硬件独立实现 + +lwlock_be.cpp # 轻量级锁和pgstat之间的桥梁 +lwlock.cpp # 轻量级锁管理器 +lwlocknames.txt # 轻量级锁名及编号 115个 +generate-lwlocknames.pl # 从lwlocknames.txt生成lwlocknames.h和lwlocknames.cpp + +lock.cpp # 常规锁 +lmgr.cpp # 锁管理器 +deadlock.cpp # 死锁检测 + +predicate.cpp # postgres谓词锁 +proc.cpp # 管理每个进程共享内存数据结构的例程 +``` + +## 自旋锁(SpinLock) + +自旋锁在openGauss中,使用CPU的原子指令TAS实现,使用场合为:修改或读取标志位等时间短,情况简单的场合。没有死锁检测,使用中由编码避免死锁。 + +自旋锁的CPU TAS指令调用实现,以下代码是x86_64的例子。 + +```c++ +#ifdef __x86_64__ /* AMD Opteron, Intel EM64T */ +#define HAS_TEST_AND_SET + +typedef unsigned char slock_t; + +#define TAS(lock) tas(lock) + +static __inline__ int tas(volatile slock_t* lock) +{ + register slock_t _res = 1; + + /* + * On Opteron, using a non-locking test before the locking instruction + * is a huge loss. On EM64T, it appears to be a wash or small loss, + * so we needn't bother to try to distinguish the sub-architectures. + */ + __asm__ __volatile__(" lock \n" + " xchgb %0,%1 \n" + : "+q"(_res), "+m"(*lock) + : + : "memory", "cc"); + return (int)_res; +} +...... +#define TAS_SPIN(lock) TAS(lock) +# TAS_SPIN是等待锁时使用的TAS指令,有些架构,如IA64中不同于TAS +``` + +自旋锁的主要函数有: + +```c++ +#define SpinLockInit(lock) S_INIT_LOCK(lock) + +#define SpinLockAcquire(lock) S_LOCK(lock) + +#define SpinLockRelease(lock) S_UNLOCK(lock) + +#define SpinLockFree(lock) S_LOCK_FREE(lock) +``` + +主要学习请求和释放锁的过程。 + +请求锁: + +可以看到使用简单的while语句和TAS_SPIN(lock)来作阻塞 + +```c++ +#if !defined(S_LOCK) +#define S_LOCK(lock) \ + do { \ + if (TAS(lock)) \ + s_lock((lock), __FILE__, __LINE__); \ + } while (0) +#endif /* S_LOCK */ + +int s_lock(volatile slock_t* lock, const char* file, int line) +{ + SpinDelayStatus delayStatus = init_spin_delay((void*)lock); + // 在这里等待锁 + while (TAS_SPIN(lock)) { + perform_spin_delay(&delayStatus); + } + + finish_spin_delay(&delayStatus); + + return delayStatus.delays; +} +``` + +释放锁: + +```c++ +void s_unlock(volatile slock_t* lock) +{ +#ifdef TAS_ACTIVE_WORD + /* HP's PA-RISC */ + *TAS_ACTIVE_WORD(lock) = -1; +#else + *lock = 0; +#endif +} +``` + +### 无锁原子操作 + +openGauss还封装了一些32,64,128的简单原子操作来实现简单变量的原子更新,看代码应该只是支持armv8.1-a的硬件实现。 + +以下为32,64位加和128位交换的例子: + +```c++ +// src/include/utils/atomic.h +static inline int32 gs_atomic_add_32(volatile int32* ptr, int32 inc) +{ +    return __sync_fetch_and_add(ptr, inc) + inc; +} +static inline int64 gs_atomic_add_64(int64* ptr, int64 inc) +{ +    return __sync_fetch_and_add(ptr, inc) + inc; +} +static inline bool gs_compare_and_swap_32(int32* dest, int32 oldval, int32 newval) +{ +    if (oldval == newval) +        return true; + volatile bool res = __sync_bool_compare_and_swap(dest, oldval, newval); + return res; +} +// src/include/utils/atomic_lse.h +static inline uint32 __lse_atomic_fetch_add_u32(volatile uint32 *ptr, uint32 val) +{ + register uint32 w0 asm ("w0") = val; \ + register uint32 *x1 asm ("x1") = (uint32 *)(unsigned long)ptr; \ + \ + asm volatile(".arch_extension lse\n" \ + " ldaddal %w[val], %w[val], %[v]\n" \ + : [val] "+r" (w0), [v] "+Q" (*ptr) \ + : "r" (x1) \ + : "x16", "x17", "x30", "memory"); \ + return w0; \ +} +``` + + + +## 轻量级锁(LWLock) + +轻量级锁主要用于openGauss内部临界区操作相对较久的场合,存在共享锁和排他锁两种类型。应由编码保证不会出现死锁,但openGauss也提供了死锁检测机制。 + +一些常用的轻量级锁在 lwlocknames.txt 中定义,使用 generate-lwlocknames.pl 来生成wlocknames.h和lwlocknames.cpp文件 + +lwlocknames.h 中有一个宏定义 NUM_INDIVIDUAL_LWLOCKS和 对应每一个锁名 +#define $lockname (&t_thrd.shemem_ptr_cxt.mainLWLockArray[$lockidx].lock) + +lwlocknames.cpp 中有字符串常量数组 MainLWLockNames + +通过GetMainLWLockByIndex(i)来对常用的轻量级锁作调用 + +```c++ +// src/include/storage/lock/lwlock.h +#define GetMainLWLockByIndex(i) \ +    (&t_thrd.shemem_ptr_cxt.mainLWLockArray[i].lock) + +``` + +锁的数据结构: + +```c++ +// src/include/storage/lock/lwlock.h +typedef enum LWLockMode { + LW_EXCLUSIVE, // 排他锁 + LW_SHARED, // 共享锁 + LW_WAIT_UNTIL_FREE /* A special mode used in PGPROC->lwlockMode, + * when waiting for lock to become free. Not + * to be used as LWLockAcquire argument */ +} LWLockMode; + +typedef struct LWLock { +    uint16      tranche;            /* 锁标识 ID */ +    pg_atomic_uint32 state; /* 状态位*/ +    dlist_head waiters;     /* 等待线程的列表*/ +#ifdef LOCK_DEBUG +    pg_atomic_uint32 nwaiters; /* 等待线程的个数 */ +    struct PGPROC* owner;      /* 最后独占的线程 */ +#endif +#ifdef ENABLE_THREAD_CHECK +    pg_atomic_uint32 rwlock; +    pg_atomic_uint32 listlock; +#endif +} LWLock; + +``` + +请求锁: + +```c++ +// src/gausskernel/storage/lmgr/lwlock.cpp +bool LWLockAcquire(LWLock *lock, LWLockMode mode, bool need_update_lockid) +…… +for (;;) { +        bool mustwait = false; +        mustwait = LWLockAttemptLock(lock, mode); /* 第一次尝试 */ +        if (!mustwait) { +            LOG_LWDEBUG("LWLockAcquire", lock, "immediately acquired lock"); +            break; /* got the lock */ +        } +        instr_stmt_report_lock(LWLOCK_WAIT_START, mode, NULL, lock->tranche); +        pgstat_report_waitevent(PG_WAIT_LWLOCK | lock->tranche); + +      LWLockQueueSelf(lock, mode); /* 加入队列等待解锁 */ +        mustwait = LWLockAttemptLock(lock, mode);/* ok, grabbed the lock the second time round, need to undo queueing */ +…… +} + +``` + +释放锁: + +```c++ +// src/gausskernel/storage/lmgr/lwlock.cpp +void LWLockRelease(LWLock *lock) +{ +…… +    /* We're still waiting for backends to get scheduled, don't wake them up again. */ +    check_waiters = +        ((oldstate & (LW_FLAG_HAS_WAITERS | LW_FLAG_RELEASE_OK)) == (LW_FLAG_HAS_WAITERS | LW_FLAG_RELEASE_OK)) +        && ((oldstate & LW_LOCK_MASK) == 0); +    /* As waking up waiters requires the spinlock to be acquired, only do so +     * if necessary. */ +    if (check_waiters) { +        /* XXX: remove before commit? */ +        LOG_LWDEBUG("LWLockRelease", lock, "releasing waiters"); + // 唤醒队列中的线程,应该是使用信号量实现 +        LWLockWakeup(lock); +    } +…… +} +``` + +openGauss中的死锁检测通过一个例程来实现 + +死锁检测: + +```c++ +// src/gausskernel/process/postmaster/lwlockmonitor.cpp +NON_EXEC_STATIC void FaultMonitorMain() +{ +…… +for (;;) { +…… +if (u_sess->attr.attr_common.fault_mon_timeout > 0) { +  if (NULL != prev_snapshot) { + …… + /* phase 1: light-weight detect using fast changcount */ + + // 从统计信息结构体中读取线程及锁id相关的时间戳,并记录到指针队列中。 +           curr_snapshot = pgstat_read_light_detect(); + // 跟几秒检测之前的时间对比,如果找到可能发生死锁的线程及锁id则返回true,否则返回false。 +           continue_next = lwm_compare_light_detect(prev_snapshot, curr_snapshot);      + if (continue_next) { + /* phase 2 if needed: heavy-weight diagnosis for lwlock deadlock */ + ..... + } + if (continue_next) { +                    /* phase 3 if needed: auto healing for lwlock deadlock */ +                    lw_deadlock_auto_healing(&deadlock); +           } + +…… +} + +void lw_deadlock_auto_healing(lwm_deadlock* deadlock) +{ +    /* choose one thread to be victim */ +    int info_idx = 0; +    int backend_victim = choose_one_victim(deadlock, &info_idx); +    if (backend_victim >= 0) { +        if (backend_victim >= MAX_BACKEND_SLOT) { +            ereport(PANIC, (errmsg("process suicides because the victim of lwlock deadlock is an auxiliary thread"))); +            return; +        } +        /* wake up this victim */ +        lw_deadlock_info* info = deadlock->info + info_idx; + // 处理方法为找一个 线程wakeup +        wakeup_victim(info->lock, info->waiter.thread_id); +    } else { +        /* LOG, maybe deadlock disappear */ +        ereport(LOG, (errmsg("victim not found, maybe lwlock deadlock disappear"))); +    } +} + +``` + +## 常规锁(Lock) + +常规锁主要用于给业务访问中的数据库对象加锁,使用tag哈希的方式来找到锁,支持多种锁的模式,有死锁检测,当检测到死锁发生时选择一个事务进行回滚。 + +1级锁一般用于SELECT查询操作;3级锁一般用于基本的INSERT、UPDATE、DELETE操作;4级锁用于VACUUM、analyze等操作;8级锁一般用于各类DDL语句。 + +常规锁的级别模式: + +```c++ +/* NoLock is not a lock mode, but a flag value meaning "don't get a lock" */ +#define NoLock 0 +#define AccessShareLock 1  /* SELECT */ +#define RowShareLock 2     /* SELECT FOR UPDATE/FOR SHARE */ +#define RowExclusiveLock 3 /* INSERT, UPDATE, DELETE */ +#define ShareUpdateExclusiveLock                         \ +    4               /* VACUUM (non-FULL),ANALYZE, CREATE \ +                     * INDEX CONCURRENTLY */ +#define ShareLock 5 /* CREATE INDEX (WITHOUT CONCURRENTLY) */ +#define ShareRowExclusiveLock                \ +    6 /* like EXCLUSIVE MODE, but allows ROW \ +       * SHARE */ +#define ExclusiveLock                  \ +    7 /* blocks ROW SHARE/SELECT...FOR \ +       * UPDATE */ +#define AccessExclusiveLock              \ +    8 /* ALTER TABLE, DROP TABLE, VACUUM \ +       * FULL, and unqualified LOCK TABLE */ +``` + +常规锁的数据结构: + +```c++ + +typedef struct LOCK { +    /* hash key */ +    LOCKTAG tag; /* 锁对象的唯一标识 */ +    /* data */ +    LOCKMASK grantMask;           /* 已经获取锁对象的位掩码 */ +    LOCKMASK waitMask;            /* 等待锁对象的位掩码 */ +    SHM_QUEUE procLocks;          /* 与锁关联的PROCLOCK对象链表 */ +    PROC_QUEUE waitProcs;         /* 等待锁的PGPROC对象链表 */ +    int requested[MAX_LOCKMODES]; /* counts of requested locks */ +    int nRequested;               /* total of requested[] array */ +    int granted[MAX_LOCKMODES];   /* counts of granted locks */ +    int nGranted;                 /* total of granted[] array */ +} LOCK; + +// PROCLOCK结构,主要是将同一锁对象等待和持有者的线程信息串联起来的结构体。 +typedef struct PROCLOCK { +    /* tag */ +    PROCLOCKTAG tag; /* proclock对象的唯一标识 */ +    /* data */ +    PGPROC  *groupLeader; /* group leader, or NULL if no lock group */   +    LOCKMASK holdMask;    /* 已获取锁类型的位掩码 */ +    LOCKMASK releaseMask; /* 预释放锁类型的位掩码 */ +    SHM_QUEUE lockLink;   /* 指向锁对象链表的指针 */ +    SHM_QUEUE procLink;   /* 指向PGPROC链表的指针 */ +} PROCLOCK; + +``` + +以锁管理器中给一个元组作请求、释放锁的操作为例子. + +请求锁: + +```c++ +// src/gausskernel/storage/lmgr/lmgr.cpp +void LockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode, bool allow_con_update) +{ +    LOCKTAG tag; +    SET_LOCKTAG_TUPLE(tag, +                      relation->rd_lockInfo.lockRelId.dbId, +                      relation->rd_lockInfo.lockRelId.relId, +                      relation->rd_lockInfo.lockRelId.bktId, +                      ItemPointerGetBlockNumber(tid), +                      ItemPointerGetOffsetNumber(tid)); + // 请求一个常规锁 +    (void)LockAcquire(&tag, lockmode, false, false, allow_con_update); +} + +// src/gausskernel/storage/lmgr/lock.cpp +// LockAcquire 内调用 LockAcquireExtended, LockAcquireExtended 内调用 LockAcquireExtendedXC +static LockAcquireResult LockAcquireExtendedXC(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, bool only_increment,bool allow_con_update) +{ +…… + localtag.lock = *locktag; +    localtag.mode = lockmode; + // 寻找需要的lock + locallock = (LOCALLOCK *)hash_search(t_thrd.storage_cxt.LockMethodLocalHash, (void *)&localtag, HASH_ENTER, &found); + if (!found) { // 如果找不到锁 + //初始化 + ... + } + else { + ... + //添加自己为锁的拥有者之一 + } + if (locallock->nLocks > 0) { // 如果自己已经持有该锁 + // 加锁,报告,return + ... + } + // 多个if语句 根据lockmode等各种情况做分支 +…… +} +``` + +释放锁: + +```c++ +// src/gausskernel/storage/lmgr/lmgr.cpp +void UnlockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode) +{ +    LOCKTAG tag; +    SET_LOCKTAG_TUPLE(tag, +                      relation->rd_lockInfo.lockRelId.dbId, +                      relation->rd_lockInfo.lockRelId.relId, +                      relation->rd_lockInfo.lockRelId.bktId, +                      ItemPointerGetBlockNumber(tid), +                      ItemPointerGetOffsetNumber(tid)); +   (void)LockRelease(&tag, lockmode, false); +} + +// src/gausskernel/storage/lmgr/lock.cpp +bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock) +{ +…… +        /* +     * Do the releasing.  CleanUpLock will waken any now-wakable waiters. +     */ +    wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable); +    CleanUpLock(lock, proclock, lockMethodTable, locallock->hashcode, wakeupNeeded); +    LWLockRelease(partitionLock); +    instr_stmt_report_lock(LOCK_RELEASE, lockmode, locktag); +    RemoveLocalLock(locallock); +…… +} +static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded) +{ +…… +    if (lock->nRequested == 0) { +         ……//The caller just released the last lock, so garbage-collect the lockobject. + } else if (wakeupNeeded) { +        // 唤醒一个线程 +        ProcLockWakeup(lockMethodTable, lock, proclock); +    } +…… +} + +``` + +openGauss在获取锁时如果没有冲突可以直接上锁;如果有冲突则设置一个定时器timer,并进入等待,过一段时间会被timer唤起进行死锁检测。 + +死锁检测: + +```c++ +// src/gausskernel/storage/lmgr/proc.cpp +// 由信号量,函数 void handle_sig_alarm(SIGNAL_ARGS) 调用 内判断是否超时 +static void CheckDeadLock(void) +{ + int i; + // 获取对整个共享锁数据结构的排他锁。 +    for (i = 0; i < NUM_LOCK_PARTITIONS; i++) +        (void)LWLockAcquire(GetMainLWLockByIndex(FirstLockMgrLock + i), LW_EXCLUSIVE); + // 二次检测是否可以继续运行 + if (t_thrd.proc->links.prev == NULL || t_thrd.proc->links.next == NULL) +        goto check_done; +#ifdef LOCK_DEBUG +    if (u_sess->attr.attr_storage.Debug_deadlocks) +        DumpAllLocks(); +#endif    // 执行死锁检测, 返回死锁的类型 +    t_thrd.storage_cxt.deadlock_state = DeadLockCheck(t_thrd.proc); + if (t_thrd.storage_cxt.deadlock_state == DS_HARD_DEADLOCK) { // 是一个hard死锁 +        Assert(t_thrd.proc->waitLock != NULL); + // 从等待队列中移除再睡眠 +        RemoveFromWaitQueue(t_thrd.proc, LockTagHashCode(&(t_thrd.proc->waitLock->tag))); +        PGSemaphoreUnlock(&t_thrd.proc->sem); +    } else if (u_sess->attr.attr_storage.log_lock_waits || +               t_thrd.storage_cxt.deadlock_state == DS_BLOCKED_BY_AUTOVACUUM || +               t_thrd.storage_cxt.deadlock_state == DS_BLOCKED_BY_REDISTRIBUTION) { +        PGSemaphoreUnlock(&t_thrd.proc->sem); // 发送睡眠信号量 +    } else if (u_sess->attr.attr_storage.LockWaitTimeout > 0) { +        PGSemaphoreUnlock(&t_thrd.proc->sem); // 发送睡眠信号量 +    } +} +// src/gausskernel/storage/lmgr/deadlock.cpp +DeadLockState DeadLockCheck(PGPROC *proc) +{ +…… + /* 搜索死锁和是否存在解决方案, 不存在解决方案返回 True, 无死锁返回 Fasle, 如果True是一个Hard死锁*/ + if (DeadLockCheckRecurse(proc)) { +        /* +         * Call FindLockCycle one more time, to record the correct +         * deadlockDetails[] for the basic state with no rearrangements. +         */ +        int nSoftEdges; +        TRACE_POSTGRESQL_DEADLOCK_FOUND(); +       t_thrd.storage_cxt.nWaitOrders = 0; +        if (!FindLockCycle(proc, t_thrd.storage_cxt.possibleConstraints, &nSoftEdges)) { +            elog(FATAL, "deadlock seems to have disappeared"); +        } +         return DS_HARD_DEADLOCK; /* cannot find a non-deadlocked state */ +    } + // 之后判断具体是哪种死锁 +…… +} + + +``` + +## 参考资料 + +openGauss源码 + +https://gitee.com/opengauss/openGauss-server + +openGauss 数据库源码解析系列文章—— 事务机制源码解析(二) +https://blog.csdn.net/GaussDB/article/details/119532011 + +openGauss 数据库源码解析系列文章—— 事务机制源码解析(一) +https://blog.csdn.net/GaussDB/article/details/119388841 diff --git "a/content/zh/post/adadaadadade/\345\233\275\344\272\247\345\274\200\346\272\220\346\225\260\346\215\256\345\272\223OpenGauss\347\232\204\345\256\211\350\243\205\350\277\220\350\241\214.md" "b/content/zh/post/adadaadadade/\345\233\275\344\272\247\345\274\200\346\272\220\346\225\260\346\215\256\345\272\223OpenGauss\347\232\204\345\256\211\350\243\205\350\277\220\350\241\214.md" new file mode 100644 index 0000000000000000000000000000000000000000..b88224bf3dc1b6a1e322147bb3392847c86777cd --- /dev/null +++ "b/content/zh/post/adadaadadade/\345\233\275\344\272\247\345\274\200\346\272\220\346\225\260\346\215\256\345\272\223OpenGauss\347\232\204\345\256\211\350\243\205\350\277\220\350\241\214.md" @@ -0,0 +1,545 @@ ++++ +title = "国产开源数据库OpenGauss的安装运行 " +date = "2021-12-13" +tags = ["国产开源数据库OpenGauss的安装运行 "] +archives = "2021-12" +author = "adadaadadade" +summary = "国产开源数据库OpenGauss的安装运行 " +times = "17:30" ++++ + +## 步骤一:OpenGauss的安装 + +### 环境 + +OS:openEuler 20.03 64bit with ARM + +架构:arm64 + +部署:单机 + +### 安装过程 + +#### 1、环境配置 + +安装依赖包: + +~~~ +yum install libaio-devel flex bison ncurses-devel glibc-devel patch readline-devel +~~~ + + + +#### 2、创建xml配置文件 + +创建cluster_config.xml配置文件并进行配置 + +~~~ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +~~~ + +注意节点hostname应与/etc/hostname中保持一致 + +#### 3、初始化安装环境 + + + +1.以root用户登录待安装openGauss的任意主机,并按规划创建存放安装包的目录。 + +~~~shell +mkdir -p /opt/software/openGauss +chmod 755 -R /opt/software +~~~ + +2.下载安装包并将配置文件“cluster_config.xml”都上传至上一步所创建的目录中。 + +~~~shell +wget https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.1/arm/openGauss-2.0.1-openEuler-64bit-all.tar.gz +~~~ + +3.解压安装包。 + +~~~shell +tar -zxvf openGauss-2.0.1-openEuler-64bit-all.tar.gz +tar jxvf openGauss-2.0.1-openEuler-64bit.tar.bz2 +tar -zxvf openGauss-2.0.1-openEuler-64bit-om.tar.gz +tar -zxvf upgrade_sql.tar.gz +~~~ + +4.进入到工具脚本存放目录下。 + +~~~shell +cd /opt/software/openGauss/script +~~~ + +5.如果是openEuler的操作系统为确保适配python版本,执行如下命令打开gspylib/common/CheckPythonVersion.py文件,将if not pythonVersion = = (3, 6):修改为if not pythonVersion > = (3, 6):,键入“ESC”键进入指令模式,执行**:wq**保存并退出修改。(我在实际操作中进入后发现无需修改) + +~~~shell +vi gspylib/common/CheckPythonVersion.py +~~~ + +6.如果是openEuler的操作系统,执行如下命令打开performance.sh文件,用#注释sysctl -w vm.min_free_kbytes=112640 &> /dev/null,键入“ESC”键进入指令模式,执行**:wq**保存并退出修改。 + +~~~shell +vi /etc/profile.d/performance.sh +~~~ + +7.为确保openssl版本正确,执行预安装前请加载安装包中lib库。执行命令如下,其中_{packagePath}_为用户安装包放置的路径,本示例中为/opt/software/openGauss。 + +~~~shell +export LD_LIBRARY_PATH=/opt/software/openGauss/script/gspylib/clib:$LD_LIBRARY_PATH +~~~ + +8.为确保成功安装,检查 hostname 与 /etc/hostname 是否一致。预安装过程中,会对hostname进行检查。 + +9.使用gs_preinstall准备好安装环境。若为共用环境需加入--sep-env-file=ENVFILE参数分离环境变量,避免与其他用户相互影响,ENVFILE为用户自行指定的环境变量分离文件的路径。 + +* 采用交互模式执行前置,并在执行过程中自动创建操作系统root用户互信和omm用户互信: + + ~~~shell + ./gs_preinstall -U omm -G dbgrp -X /opt/software/openGauss/cluster_config.xml + ~~~ + + 在执行中会要求输入omm用户的密码。 + + 运行结果应类似: + + ~~~ + plat1:/opt/software/openGauss/script # ./gs_preinstall -U omm -G dbgrp -X /opt/software/openGauss/cluster_config.xml + Parsing the configuration file. + Successfully parsed the configuration file. + Installing the tools on the local node. + Successfully installed the tools on the local node. + Are you sure you want to create trust for root (yes/no)? yes + Please enter password for root. + Password: + Creating SSH trust for the root permission user. + Checking network information. + All nodes in the network are Normal. + Successfully checked network information. + Creating SSH trust. + Creating the local key file. + Successfully created the local key files. + Appending local ID to authorized_keys. + Successfully appended local ID to authorized_keys. + Updating the known_hosts file. + Successfully updated the known_hosts file. + Appending authorized_key on the remote node. + Successfully appended authorized_key on all remote node. + Checking common authentication file content. + Successfully checked common authentication content. + Distributing SSH trust file to all node. + Successfully distributed SSH trust file to all node. + Verifying SSH trust on all hosts. + Successfully verified SSH trust on all hosts. + Successfully created SSH trust. + Successfully created SSH trust for the root permission user. + Setting pssh path + Successfully set core path. + Distributing package. + Begin to distribute package to tool path. + Successfully distribute package to tool path. + Begin to distribute package to package path. + Successfully distribute package to package path. + Successfully distributed package. + Are you sure you want to create the user[omm] and create trust for it (yes/no)? yes + Please enter password for cluster user. + Password: + Please enter password for cluster user again. + Password: + Successfully created [omm] user on all nodes. + Preparing SSH service. + Successfully prepared SSH service. + Installing the tools in the cluster. + Successfully installed the tools in the cluster. + Checking hostname mapping. + Successfully checked hostname mapping. + Creating SSH trust for [omm] user. + Checking network information. + All nodes in the network are Normal. + Successfully checked network information. + Creating SSH trust. + Creating the local key file. + Successfully created the local key files. + Appending local ID to authorized_keys. + Successfully appended local ID to authorized_keys. + Updating the known_hosts file. + Successfully updated the known_hosts file. + Appending authorized_key on the remote node. + Successfully appended authorized_key on all remote node. + Checking common authentication file content. + Successfully checked common authentication content. + Distributing SSH trust file to all node. + Successfully distributed SSH trust file to all node. + Verifying SSH trust on all hosts. + Successfully verified SSH trust on all hosts. + Successfully created SSH trust. + Successfully created SSH trust for [omm] user. + Checking OS software. + Successfully check os software. + Checking OS version. + Successfully checked OS version. + Creating cluster's path. + Successfully created cluster's path. + Setting SCTP service. + Successfully set SCTP service. + Set and check OS parameter. + Setting OS parameters. + Successfully set OS parameters. + Preparing CRON service. + Successfully prepared CRON service. + Setting user environmental variables. + Successfully set user environmental variables. + Setting the dynamic link library. + Successfully set the dynamic link library. + Setting Core file + Successfully set core path. + Setting pssh path + Successfully set pssh path. + Set ARM Optimization. + No need to set ARM Optimization. + Fixing server package owner. + Setting finish flag. + Successfully set finish flag. + Preinstallation succeeded. + ~~~ + +#### 4、执行安装 + +内存小于安装要求的32G应该做一些配置修改: + +``` +# vim /etc/sysctl.conf +kernel.shmall = 1125899906842624 +kernel.shmmax = 1351079888211149 + +# vim /opt/huawei/install/data/db1/postgresql.conf +cstore_buffers=16MB +bulk_write_ring_size=128MB +shared_buffers=128MB +max_process_memory=2GB +max_connections=10 +``` + +切换到omm用户下执行安装: + +```shell +su - omm +gs_install -X /opt/software/openGauss/cluster_config.xml +``` + +## 步骤二 数据库的简单链接与使用 + +### 1、创建新用户,新数据库并赋予权限 + +使用gsql 用omm 管理账号登陆,创建新用户jack,创建新数据库testjack,赋予权限,执行 + +~~~mysql +CREATE USER jack PASSWORD 'Gaussdba@Mpp'; +CREATE DATABASE testjack OWNER jack; +GRANT SELECT ON pg_catalog.pg_roles to jack; +GRANT SELECT ON pg_catalog.pg_user_status to jack; +GRANT ALL PRIVILEGES on TABLESPACE pg_default,pg_global TO jack; +~~~ + +然后退出,使用jack用户登录gsql + +~~~mysql +gsql -U jack -d testjack -p "Gaussdba@Mpp" +~~~ + +创建 SCHEMA + +~~~mysql +CREATE SCHEMA jack AUTHORIZATION jack; +~~~ + +退出gsql,赋予jack权限,这里client_address是客户端的地址 + +~~~mysql +gs_guc set -N all -I all -h "host all jack client_address/32 sha256 +~~~ + +或者也可以修改pg_hba.conf,添加 + +~~~mysql +host all jack client_address/32 sha256 +~~~ + +### 2、允许客户端访问数据库 + +执行,这里的client_address是要客户端的地址, listen_addresses是参数名。 + +~~~mysql +gs_guc set -I all -c "listen_addresses='client_address'" +~~~ + +或在使用omm账号在gsql中 + +~~~mysql +ALTER SYSTEM SET listen_addresses TO "client_address"; +~~~ + +之后重启数据库 + +### 3、关闭防火墙,打开端口 + +### 4、使用Data Studio 访问数据库 + +可在opengauss官网下载DATA STUDIO应用 + +![](img/image-20210913111340859.png) + +填入对应参数,注意这里应去掉启用SSL的选项,因为SSL还需要配置证书或密钥。 + +连接后的界面 + +![](img/image-20210913111551057.png) + +### 5、使用JDBC访问数据库 + +我这里使用windows系统作为客户端连接数据库。 + +在opengauss网站下载对应的JDBC包,并解压。 + +创建Gauss.java文件 + +~~~java +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.CallableStatement; +import java.sql.Types; +import java.util.Collections; +import java.util.Properties; + +public class Gauss { + + public static void main(String[] args) { + Connection connection; + ResultSet resultSet; + String url = "jdbc:postgresql://address:port/testjack"; //address 地址 port 端口 testjack 数据库名 + String user = "××××"; // 数据库用户名 + String password = "××××"; // 对应密码 + String sql; + if(args.length > 0) + { + sql = args[0]; + } + else + { + System.out.println("输入一条sql语句"); + return; + } + + if ((connection = getConnect(user, password, url)) != null) { + System.out.println(connection.toString()); + } + + if ((resultSet = execSql(connection, sql)) != null) + { + + + } + } + + // 以下代码将获取数据库连接操作封装为一个接口,可通过给定用户名和密码来连接数据库。 + public static Connection getConnect(String username, String passwd, String url) { + // 驱动类。 + String driver = "org.postgresql.Driver"; + // 数据库连接描述符。 + String sourceURL = url; + Connection conn = null; + + try { + // 加载驱动。 + Class.forName(driver); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + + try { + // 创建连接。 + conn = DriverManager.getConnection(sourceURL, username, passwd); + System.out.println("Connection succeed!"); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + + return conn; + }; + + // 以下代码将使用Properties对象作为参数建立连接 + public static Connection getConnectUseProp(String username, String passwd, String url) { + // 驱动类。 + String driver = "org.postgresql.Driver"; + // 数据库连接描述符。 + String sourceURL = url + "?"; + Connection conn = null; + Properties info = new Properties(); + + try { + // 加载驱动。 + Class.forName(driver); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + + try { + info.setProperty("user", username); + info.setProperty("password", passwd); + // 创建连接。 + conn = DriverManager.getConnection(sourceURL, info); + System.out.println("Connection succeed!"); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + + return conn; + }; + + public static ResultSet execSql(Connection conn, String sql) { + Statement stmt = null; + ResultSet rs = null; + SQLWarning sqlw = null; + try { + stmt = conn.createStatement(); + // 执行普通SQL语句。 + stmt.execute(sql); + if((sqlw = stmt.getWarnings()) != null) + System.out.println(sqlw.toString()); + if((rs = stmt.getResultSet()) != null) + printResultSet(rs); + + stmt.close(); + } catch (SQLException e) { + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } + e.printStackTrace(); + } + return rs; + } + + + private static void printResultSet(ResultSet rs) + { + String line = ""; + try { + ResultSetMetaData rsmd = rs.getMetaData(); + for(int i = 1; i <= rsmd.getColumnCount(); i ++) + { + String label = rsmd.getColumnLabel(i).toString(); + System.out.print(label + "\t"); + line += String.join("", Collections.nCopies(label.length(), "-")) + "\t"; + } + System.out.println("\n" + line); + + while(rs.next()) + { + for(int i = 1; i <= rsmd.getColumnCount(); i ++) + { + System.out.print(rs.getObject(i).toString() + "\t"); + } + System.out.println(""); + + } + } catch (Exception e) { + e.printStackTrace(); + } + } + +} + +~~~ + +编译 + +~~~shell +javac .\Gauss.java -encoding "utf-8" +~~~ + +运行,我这里将postgresql.jar放在同一目录下,创建一个表nt作为测试 + +~~~shell +java -cp ".;postgresql.jar" Gauss "CREATE TABLE nt(id INTEGER, name VARCHAR(20))" +java -cp ".;postgresql.jar" Gauss "INSERT into nt(id, name) VALUES (1,'n1'),(2,'n2'),(3,'n3');" +java -cp ".;postgresql.jar" Gauss "SELECT * FROM nt;" +~~~ + +最后一句输出结果为,可以看到成功进行了连接和操作。 + +~~~ +九月 13, 2021 11:58:25 上午 org.postgresql.core.v3.ConnectionFactoryImpl openConnectionImpl +信息: [75000bb7-1475-4579-94cb-f53a01bec9eb] Try to connect. IP: *.*.*.*:**** +九月 13, 2021 11:58:26 上午 org.postgresql.core.v3.ConnectionFactoryImpl openConnectionImpl +信息: [*.*.*.*:****/*.*.*.*:****] Connection is established. ID: 75000bb7-1475-4579-94cb-f53a01bec9eb +九月 13, 2021 11:58:26 上午 org.postgresql.core.v3.ConnectionFactoryImpl openConnectionImpl +信息: Connect complete. ID: 75000bb7-1475-4579-94cb-f53a01bec9eb +Connection succeed! +id name +-- ---- +1 n1 +2 n2 +3 n3 + +~~~ + diff --git "a/content/zh/post/awei/Centos7.9\345\256\211\350\243\205\344\270\216\351\205\215\347\275\256.md" "b/content/zh/post/awei/Centos7.9\345\256\211\350\243\205\344\270\216\351\205\215\347\275\256.md" new file mode 100644 index 0000000000000000000000000000000000000000..d00c54719a91e2079c51494694531f61a95912f5 --- /dev/null +++ "b/content/zh/post/awei/Centos7.9\345\256\211\350\243\205\344\270\216\351\205\215\347\275\256.md" @@ -0,0 +1,55 @@ ++++ + +title = "***Centos7.9安装与配置***" +date = "2021-12-03" +tags = ["华为OpenGauss数据库安装与使用"] +archives = "2021-12" +author = "awei" +summary = "华为OpenGauss数据库安装:***Centos7.9安装与配置***" +times = "17:30" + ++++ + +## ***Centos7.9安装与配置*** + +**一、** ***Vmware虚拟机安装(安装版本为VMware Workstation 16 Pro)*** + +1. 先去官网下载:https://www.vmware.com/cn/products/workstation-pro/workstation-pro-evaluation.html + +2. 运行下载完成的Vmware Workstation虚拟机软件包。如下图:![img](images/wps27.jpg)![img](images/wps28.jpg)![img](images/wps29.jpg) ![img](images/wps30.jpg) + +3. 一切准备就绪后,单击“升级”按钮。进行安装![img](images/wps31.jpg)![img](images/wps32.jpg) + +4. 安装完成,重启电脑,输入软件激活序列号即可使用。![img](images/wps33.jpg) + +**二、** ***虚拟机上Centos操作系统安装(centos7.9)*** + +1. 先去官网下载centos安装包,指导书上说要安装centos7.6,没找到,安装的centos7.9,安装opengauss数据库前要改一个文件内容,不然会报错版本不匹配。下载的为everything版本(CentOS-7-x86_64-Everything-2009.iso)。![img](images/wps34.jpg) + +2. 下载完后打开刚安装的VMware Workstation点击文件->新建虚拟机,选择自定义,之后下一步。![img](images/wps35.jpg) ![img](images/wps36.jpg) + +3. 选择稍后安装操作系统,下一步,选择linux操作系统,版本Centos7 64位。![img](images/wps37.jpg) ![img](images/wps38.jpg) + +4. 下一步,虚拟机命名并选择位置。![img](images/wps39.jpg) + +5. 自己进行选择相关配置(内存尽量大一些,以防后面出现问题,网络连接选择NAT模式,其它可不变)![img](images/wps40.jpg) ![img](images/wps41.jpg) ![img](images/wps42.jpg) + +6. 继续相关配置![img](images/wps43.jpg) ![img](images/wps44.jpg)![img](images/wps45.jpg) + +7. 一直下一步,到已准备好创建虚拟机状态,选择自定义硬件![img](images/wps46.jpg) 使用已下载好的ISO映像文件。![img](images/wps47.jpg) + +8. 完成后打开此虚拟机,开启虚拟机后会出现以下选择: + + ``` + Install CentOS Linux 7 安装CentOS 7 + Test this media & install CentOS Linux 7 测试安装文件并安装CentOS 7 + Troubleshooting 修复故障 + ``` + + 选择第一项,直接安装CentOS 7,回车,进入下面的安装界面![img](images/wps48.jpg) + +9. 安装完成进入centos7(选择中文,简体中文,按顺序选择自己需要的配置就可以,软件选择GNOME桌面,便于操作;安装目的地进行磁盘划分;网络和主机名进行联网)![img](images/wps49.jpg) ![img](images/wps50.jpg) ![img](images/wps51.jpg) ![img](images/wps52.jpg) 之后可以添加用户,设置名字和密码。最后点击开始安装。 + +10. 安装成功后重启进入即可。 + + \ No newline at end of file diff --git a/content/zh/post/awei/Snipaste_2021-09-09_14-30-17.png b/content/zh/post/awei/Snipaste_2021-09-09_14-30-17.png new file mode 100644 index 0000000000000000000000000000000000000000..5163539bb9e73d0b4429423f843126d43f6f4d32 Binary files /dev/null and b/content/zh/post/awei/Snipaste_2021-09-09_14-30-17.png differ diff --git a/content/zh/post/awei/images/wps1.jpg b/content/zh/post/awei/images/wps1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b9a0e757d707a1f3d2fafed34fa6966fd19cae32 Binary files /dev/null and b/content/zh/post/awei/images/wps1.jpg differ diff --git a/content/zh/post/awei/images/wps10.jpg b/content/zh/post/awei/images/wps10.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87bf450ec8d07bd08787c77fc8ed8cc0882ae0fb Binary files /dev/null and b/content/zh/post/awei/images/wps10.jpg differ diff --git a/content/zh/post/awei/images/wps11.jpg b/content/zh/post/awei/images/wps11.jpg new file mode 100644 index 0000000000000000000000000000000000000000..15c70abe4b2304ed6ca12814abea5ca93e636367 Binary files /dev/null and b/content/zh/post/awei/images/wps11.jpg differ diff --git a/content/zh/post/awei/images/wps12.jpg b/content/zh/post/awei/images/wps12.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f24c551328f845cc6da0aa463f49513d3747f3b7 Binary files /dev/null and b/content/zh/post/awei/images/wps12.jpg differ diff --git a/content/zh/post/awei/images/wps13.jpg b/content/zh/post/awei/images/wps13.jpg new file mode 100644 index 0000000000000000000000000000000000000000..332c7b83994e3cc08b236ef3b5de4a436991553b Binary files /dev/null and b/content/zh/post/awei/images/wps13.jpg differ diff --git a/content/zh/post/awei/images/wps14.jpg b/content/zh/post/awei/images/wps14.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3f7603084b5362761053e815a13e90e096a7a0da Binary files /dev/null and b/content/zh/post/awei/images/wps14.jpg differ diff --git a/content/zh/post/awei/images/wps15.jpg b/content/zh/post/awei/images/wps15.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c749ce1a76be71c2c2d0b4718c98dc042c13fb84 Binary files /dev/null and b/content/zh/post/awei/images/wps15.jpg differ diff --git a/content/zh/post/awei/images/wps16.jpg b/content/zh/post/awei/images/wps16.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f24c551328f845cc6da0aa463f49513d3747f3b7 Binary files /dev/null and b/content/zh/post/awei/images/wps16.jpg differ diff --git a/content/zh/post/awei/images/wps17.jpg b/content/zh/post/awei/images/wps17.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4b088d3b9bb142a5655d949f3612674964d7e56d Binary files /dev/null and b/content/zh/post/awei/images/wps17.jpg differ diff --git a/content/zh/post/awei/images/wps2.jpg b/content/zh/post/awei/images/wps2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e8ce4841fe64c6f03c7c9890791971c4a029388b Binary files /dev/null and b/content/zh/post/awei/images/wps2.jpg differ diff --git a/content/zh/post/awei/images/wps27.jpg b/content/zh/post/awei/images/wps27.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4d6f506fdea10821bc27f98a7ac9d85ba6d55fe2 Binary files /dev/null and b/content/zh/post/awei/images/wps27.jpg differ diff --git a/content/zh/post/awei/images/wps28.jpg b/content/zh/post/awei/images/wps28.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f9743a71e4ccbc31bedc35e9f126dc04c932f69b Binary files /dev/null and b/content/zh/post/awei/images/wps28.jpg differ diff --git a/content/zh/post/awei/images/wps29.jpg b/content/zh/post/awei/images/wps29.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a7bb292e18910567fdcc122825f827bce7f49aba Binary files /dev/null and b/content/zh/post/awei/images/wps29.jpg differ diff --git a/content/zh/post/awei/images/wps3.jpg b/content/zh/post/awei/images/wps3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..487aa33bb030768e127f01a41848c20b30cfe0ea Binary files /dev/null and b/content/zh/post/awei/images/wps3.jpg differ diff --git a/content/zh/post/awei/images/wps30.jpg b/content/zh/post/awei/images/wps30.jpg new file mode 100644 index 0000000000000000000000000000000000000000..29ca0c8f2870146f5e5f68e203cab2155f82cd6a Binary files /dev/null and b/content/zh/post/awei/images/wps30.jpg differ diff --git a/content/zh/post/awei/images/wps31.jpg b/content/zh/post/awei/images/wps31.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bdec83aba18390b79113caa7afbd9da5f7dbbb43 Binary files /dev/null and b/content/zh/post/awei/images/wps31.jpg differ diff --git a/content/zh/post/awei/images/wps32.jpg b/content/zh/post/awei/images/wps32.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a020ab6320e8a272c6890c3ff498fd04fbf02459 Binary files /dev/null and b/content/zh/post/awei/images/wps32.jpg differ diff --git a/content/zh/post/awei/images/wps33.jpg b/content/zh/post/awei/images/wps33.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5e7e47e5b9b738e8dcd4d93ef9f8131dd1e0208d Binary files /dev/null and b/content/zh/post/awei/images/wps33.jpg differ diff --git a/content/zh/post/awei/images/wps34.jpg b/content/zh/post/awei/images/wps34.jpg new file mode 100644 index 0000000000000000000000000000000000000000..97667bed337e2de22ecc4681f6232be3320ac0af Binary files /dev/null and b/content/zh/post/awei/images/wps34.jpg differ diff --git a/content/zh/post/awei/images/wps35.jpg b/content/zh/post/awei/images/wps35.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ce1c1ae800e958999ae531327f94dc2fa240405 Binary files /dev/null and b/content/zh/post/awei/images/wps35.jpg differ diff --git a/content/zh/post/awei/images/wps36.jpg b/content/zh/post/awei/images/wps36.jpg new file mode 100644 index 0000000000000000000000000000000000000000..45deae9deb329bb3d2a97cff86c38e362fbde0f4 Binary files /dev/null and b/content/zh/post/awei/images/wps36.jpg differ diff --git a/content/zh/post/awei/images/wps37.jpg b/content/zh/post/awei/images/wps37.jpg new file mode 100644 index 0000000000000000000000000000000000000000..19bd58db6bf3ece6072259985d782b4b8699e214 Binary files /dev/null and b/content/zh/post/awei/images/wps37.jpg differ diff --git a/content/zh/post/awei/images/wps38.jpg b/content/zh/post/awei/images/wps38.jpg new file mode 100644 index 0000000000000000000000000000000000000000..665327c0176f4251a607117ba82f4b60d295b046 Binary files /dev/null and b/content/zh/post/awei/images/wps38.jpg differ diff --git a/content/zh/post/awei/images/wps39.jpg b/content/zh/post/awei/images/wps39.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d2f2a9afaeb7b4a48fc8c1f0ec77e8e139a19e3 Binary files /dev/null and b/content/zh/post/awei/images/wps39.jpg differ diff --git a/content/zh/post/awei/images/wps4.png b/content/zh/post/awei/images/wps4.png new file mode 100644 index 0000000000000000000000000000000000000000..e9f2e227249d0db3ece49f9bbb0fb48430adc7f3 Binary files /dev/null and b/content/zh/post/awei/images/wps4.png differ diff --git a/content/zh/post/awei/images/wps40.jpg b/content/zh/post/awei/images/wps40.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7626660777a1b8416984536ab64184f134f7cf93 Binary files /dev/null and b/content/zh/post/awei/images/wps40.jpg differ diff --git a/content/zh/post/awei/images/wps41.jpg b/content/zh/post/awei/images/wps41.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6821858a211882f805552dd70d679623c9b8e940 Binary files /dev/null and b/content/zh/post/awei/images/wps41.jpg differ diff --git a/content/zh/post/awei/images/wps42.jpg b/content/zh/post/awei/images/wps42.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ede1960e01a5e4ced879a5b3a1ae1828fb292b70 Binary files /dev/null and b/content/zh/post/awei/images/wps42.jpg differ diff --git a/content/zh/post/awei/images/wps43.jpg b/content/zh/post/awei/images/wps43.jpg new file mode 100644 index 0000000000000000000000000000000000000000..34c67148308d0dfa088ef98ae560f9e27ef62b46 Binary files /dev/null and b/content/zh/post/awei/images/wps43.jpg differ diff --git a/content/zh/post/awei/images/wps44.jpg b/content/zh/post/awei/images/wps44.jpg new file mode 100644 index 0000000000000000000000000000000000000000..17cd82ad4c30b2bd00659941e441209e78bd1ba8 Binary files /dev/null and b/content/zh/post/awei/images/wps44.jpg differ diff --git a/content/zh/post/awei/images/wps45.jpg b/content/zh/post/awei/images/wps45.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8949952b5eca7f8358f47d2489eaf69b927d0633 Binary files /dev/null and b/content/zh/post/awei/images/wps45.jpg differ diff --git a/content/zh/post/awei/images/wps46.jpg b/content/zh/post/awei/images/wps46.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a767b2a1ab2183fe44cde21c0e4281358b309b8c Binary files /dev/null and b/content/zh/post/awei/images/wps46.jpg differ diff --git a/content/zh/post/awei/images/wps47.jpg b/content/zh/post/awei/images/wps47.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a301ce885a3ce8cc16dc1dc4f07f5eaa433ccc00 Binary files /dev/null and b/content/zh/post/awei/images/wps47.jpg differ diff --git a/content/zh/post/awei/images/wps48.jpg b/content/zh/post/awei/images/wps48.jpg new file mode 100644 index 0000000000000000000000000000000000000000..38747624065b0fa0013a35c421fd61b3dd5befe6 Binary files /dev/null and b/content/zh/post/awei/images/wps48.jpg differ diff --git a/content/zh/post/awei/images/wps49.jpg b/content/zh/post/awei/images/wps49.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc2924c936a163865481b731b41211266c6daf3f Binary files /dev/null and b/content/zh/post/awei/images/wps49.jpg differ diff --git a/content/zh/post/awei/images/wps5.jpg b/content/zh/post/awei/images/wps5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aa9db5e611c0d24162de66e574480ebac843b90e Binary files /dev/null and b/content/zh/post/awei/images/wps5.jpg differ diff --git a/content/zh/post/awei/images/wps50.jpg b/content/zh/post/awei/images/wps50.jpg new file mode 100644 index 0000000000000000000000000000000000000000..774038e09df82ca20e51ba9542f72752d079f90f Binary files /dev/null and b/content/zh/post/awei/images/wps50.jpg differ diff --git a/content/zh/post/awei/images/wps51.jpg b/content/zh/post/awei/images/wps51.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a38763bafb2967ab8928e5527c95b8259e9dc6a9 Binary files /dev/null and b/content/zh/post/awei/images/wps51.jpg differ diff --git a/content/zh/post/awei/images/wps52.jpg b/content/zh/post/awei/images/wps52.jpg new file mode 100644 index 0000000000000000000000000000000000000000..79d428113186fd01f4d0a3aa4760b6fccc94af0f Binary files /dev/null and b/content/zh/post/awei/images/wps52.jpg differ diff --git a/content/zh/post/awei/images/wps6.jpg b/content/zh/post/awei/images/wps6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4b50f4f0b5173652885dbc42da0d73b6c8403cbf Binary files /dev/null and b/content/zh/post/awei/images/wps6.jpg differ diff --git a/content/zh/post/awei/images/wps7.jpg b/content/zh/post/awei/images/wps7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e0790aa4ec9951965bf5cbae0e9fe7c28093e5bf Binary files /dev/null and b/content/zh/post/awei/images/wps7.jpg differ diff --git a/content/zh/post/awei/images/wps8.jpg b/content/zh/post/awei/images/wps8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0311ce81d9a93248aded48ce0a8e15f4d11225b4 Binary files /dev/null and b/content/zh/post/awei/images/wps8.jpg differ diff --git a/content/zh/post/awei/images/wps9.jpg b/content/zh/post/awei/images/wps9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fdae13526fa356ae6e0a6e816f71d626e65d7d68 Binary files /dev/null and b/content/zh/post/awei/images/wps9.jpg differ diff --git "a/content/zh/post/awei/openGauss\345\256\211\350\243\205+\350\277\236\346\216\245.md" "b/content/zh/post/awei/openGauss\345\256\211\350\243\205+\350\277\236\346\216\245.md" new file mode 100644 index 0000000000000000000000000000000000000000..26012174364876fd2c49b917a204388d77244747 --- /dev/null +++ "b/content/zh/post/awei/openGauss\345\256\211\350\243\205+\350\277\236\346\216\245.md" @@ -0,0 +1,332 @@ ++++ + +title = "***国产开源数据框opengauss安装与jdbc连接***" +date = "2021-12-03" +tags = ["国产开源数据库opengauss的安装与运行"] +archives = "2021-12" +author = "awei" +summary = "***国产开源数据库opengauss的安装与运行***" +times = "17:30" + ++++ + +# **一、** openGauss安装(企业版) + +## 1. 获取安装包 + +### 1.1. 从openGauss开源社区下载对应平台的安装包。 + + 通过https://opengauss.org/zh/download.html 登录openGauss开源社区,选择 2.0.0版本对应平台企业版安装包(openGauss-2.0.0-CentOS-64bit-all.tar.gz)。 单击“下载”。 + +### 1.2. 检查安装包。 + +解压安装包,检查安装目录及文件是否齐全。在安装包所在目录执行以下命令: + +`tar -zxvf openGauss-2.0.0-CentOS-64bit-all.tar.gz` +`ls -1b` + +执行ls命令,显示类似如下信息: + +![](https://pic.imgdb.cn/item/615130622ab3f51d9113b94a.png) +----结束 + +##2. 修改操作系统配置 + +### **2.1. 修改文件** + +如果安装的不是Centos7.6,则要修改/etc/rehat-release文件,将 +` CentOS Linux release 7.9.2003 (Core)`修改为`CentOS Linux release 7.6 (Core)` + +![](https://pic.imgdb.cn/item/615fb1c62ab3f51d9149eea9.png) + +### **2.2. 关闭操作系统防火墙** + +**步骤1 修改/etc/selinux/config文件中的“SELINUX”值为“disabled”。** +使用VIM打开config文件。 +`vim /etc/selinux/config` +修改“SELINUX”的值“disabled”,执行**:wq**保存并退出修改。 +`SELINUX=disabled` +![](https://pic.imgdb.cn/item/615130622ab3f51d9113b952.png) + +**步骤2 重新启动操作系统。** +`reboot` +**步骤3 检查防火墙是否关闭。** +`systemctl status firewalld` +若防火墙状态显示为active (running),则表示防火墙未关闭,请执行步骤4; +若防火墙状态显示为inactive (dead),则无需再关闭防火墙。 +**步骤4 关闭防火墙。** +`systemctl disable firewalld.service` +`systemctl stop firewalld.service` + +![](https://pic.imgdb.cn/item/615130622ab3f51d9113b95d.png) + +**步骤5 在其他主机上重复步骤1到步骤4。** +----结束 + +### 2.3. **设置字符集参数** + +将各数据库节点的字符集设置为相同的字符集,可以在/etc/profile文件中添加"export LANG=XXX"(XXX为Unicode编码)。 +`vim /etc/profile` + +### 2.4. **设置时区和时间** + +在各数据库节点上,确保时区和时间一致。 +**步骤1 执行如下命令检查各数据库节点时间和时区是否一致。如果不一致,请执行步骤2~步骤3。** +`date` +**步骤2 使用如下命令将各数据库节点/usr/share/zoneinfo/目录下的时区文件拷贝为/etc/localtime文件。** +`cp /usr/share/zoneinfo/$地区/$时区/etc/localtime` +**说明**: *$*地区**/$**时区为需要设置时区的信息,例如:Asia/Shanghai。 +**步骤3使用date -s命令将各数据库节点的时间设置为统一时间,举例如下。** +`date -s "Sat Sep 27 16:00:07 CST 2020"` + +----结束 + +### **2.5. 设置网卡MTU 值** + +将各数据库节点的网卡MTU值设置为相同大小。 + +**步骤1 执行如下命令查询服务器的网卡名称** +`ifconfig` +如下图所示: +![](https://pic.imgdb.cn/item/615130622ab3f51d9113b96b.png) +**步骤2 使用如下命令将各数据库节点的网卡MTU值设置为相同大小。** +对于X86,MTU值推荐1500;对于ARM,MTU值推荐8192。 +`ifconfig 网卡名称 mtu mtu值` +----结束 + +## 3. 安装openGauss + +### **3.1. 创建XML配置文件** + +安装openGauss前需要创建cluster_config.xml文件。cluster_config.xml文件包含部署 +openGauss的服务器信息、安装路径、IP地址以及端口号等。用于告知openGauss如何 +部署。用户需根据不同场景配置对应的XML文件。 + +配置数据库节点名称时,请通过hostname命令获取数据库节点的主机名称。![](https://pic.imgdb.cn/item/615130812ab3f51d9113da0a.png) + +单节点配置文件如下:![](https://pic.imgdb.cn/item/615fb6952ab3f51d91500880.png) + +### **3.2.初始化安装环境** + +#### 3.2.1. 准备安装用户及环境 + +**步骤1** 以root用户登录待安装openGauss的任意主机,并按规划创建存放安装包的目录。 + +![](https://pic.imgdb.cn/item/615fb9772ab3f51d91538d82.png) + +**步骤2** 将安装包“openGauss-2.0.0-CentOS-64bit-all.tar.gz”和配置文件“cluster_config.xml”都上传至上一步所创建的目录中。 + +**步骤3** 在安装包所在的目录下,解压安装包openGauss-2.0.0-CentOS-64bit-all.tar.gz。安装包解压后,会有OM安装包和Server安装包。继续解压OM安装包,会在/opt/software/openGauss路径下自动生成script子目录,并且在script目录下生成gs_preinstall等各种OM工具脚本。 + +**步骤4** 进入到工具脚本存放目录下。 +`cd /opt/software/openGauss/script` + +**步骤5** 为确保成功安装,执行命令检查 hostname 与 /etc/hostname 是否一致。 +`hostname` +`cat /etc/hostname ` + +![](https://pic.imgdb.cn/item/615130812ab3f51d9113da0a.png) + +**步骤6** 使用gs_preinstall需要python3.6的环境,一般自带的是python2.7。 + +- 安装CentOS开发工具 【用于允许您从源代码构建和编译软件】 + `sudo yum -y “groupinstall development”` + +- 下载epel + + `sudo yum install epel-release` + +- 安装python3 + + `sudo yum install python36` + + ![](https://pic.imgdb.cn/item/615130812ab3f51d9113da0f.png) + +- 更改默认python + + ![](https://pic.imgdb.cn/item/615130812ab3f51d9113da15.png) + +**步骤7** 使用gs_preinstall准备好安装环境 + +采用交互模式执行前置,并在执行过程中自动创建root用户互信和openGauss用 +户互信: +`./gs_preinstall -U omm -G dbgrp -X /opt/software/openGauss/cluster_config.xml` + +![](https://pic.imgdb.cn/item/615130812ab3f51d9113da1d.png) + +![](https://pic.imgdb.cn/item/615130812ab3f51d9113da23.png) + +#### 3.2.2. 建立互信(**使用脚本建立互信**) + +**步骤1** 创建一个执行互信脚本所需要的输入文本,并在此文件中添加openGauss中所有主机 +IP。 +` vim hostfile` + +**步骤2** 以需要创建互信的用户执行下面脚本建立互信。 +` ./gs_sshexkey -f /opt/software/hostfile -W wangjingwei1` + +运行成功截图如下![](https://pic.imgdb.cn/item/615130922ab3f51d9113f02e.png) + +### **3.3.执行安装** + +**步骤1** 登录到openGauss的主机,并切换到omm用户。 +`su - omm` +**步骤2** 使用gs_install安装openGauss。 +`gs_install -X /opt/software/openGauss/cluster_config.xml` +在执行过程中,用户需根据提示输入数据库用户的密码,密码应具有一定的复杂度. +![](https://pic.imgdb.cn/item/615130922ab3f51d9113f039.png) +![](https://pic.imgdb.cn/item/615130922ab3f51d9113f041.png) + +**步骤3** 安装执行成功之后,需要手动删除主机root用户的互信,即删除openGauss数据库各 +节点上的互信文件。 +`rm -rf ~/.ssh` + +安装完成 + +## 4. 安装验证 + +**步骤1** 以omm用户身份登录服务器。 + +**步骤2** 执行如下命令检查数据库状态是否正常,“cluster_state ”显示“Normal”表示数据 +库可正常使用。 +`gs_om -t status` + +**步骤3** 数据库安装完成后,默认生成名称为postgres的数据库。第一次连接数据库时可以连接到此数据库。其中postgres为需要连接的数据库名称,26000为数据库主节点的端口号,即XML配置.文件中的dataPortBase的值。请根据实际情况替换。 +`gsql -d postgres -p 26000` +连接成功后,系统显示类似如下信息表示数据库连接成功。 +`gsql ((openGauss x.x.x build 290d125f) compiled at 2021-03-08 02:59:43 commit 2143 last mr 131 Non-SSL connection (SSL connection is recommended when requiring high-security) Type "help" for help.` + +![](https://pic.imgdb.cn/item/615130922ab3f51d9113f04f.png) + +**步骤4** 建立表,并插入内容进行查询 + +![](https://pic.imgdb.cn/item/615130a72ab3f51d91140865.png) + +![](https://pic.imgdb.cn/item/615130b72ab3f51d91141947.png) + +# 二、使用jdbc连接数据库 + +## **1. 确认连接信息** + +**步骤1** 以操作系统用户omm登录数据库主节点。 + +**步骤2** 使用“gs_om -t status --detail”命令查询openGauss各实例情况。 + +![](https://pic.imgdb.cn/item/615130a72ab3f51d91140860.png) + +## **2. 配置服务端远程连接** + +**步骤1** 以操作系统用户omm登录数据库主节点。 + +**步骤2** 配置客户端认证方式 + +- 需先本地连接数据库,并在数据库中使用如下语句建立“jack”用 + + 户: + + `postgres=# CREATE USER jack PASSWORD 'Test@123';` + +- 允许客户端以“jack”用户连接到本机,此处远程连接禁止使用 + + “omm”用户(即数据库初始化用户)。下面示例中配置允许IP地址为10.27.1.209的客户端访问本机。 + + ![](https://pic.imgdb.cn/item/615130bc2ab3f51d91141e59.png) + +**步骤3** 配置**listen_addresses**,listen_addresses即远程客户端连接使用的数据库主节点ip或者主机名。 + +- 使用如下命令查看数据库主节点目前的listen_addresses配置。 + + `gs_guc check -I all -c "listen_addresses"` + +- 使用如下命令把要添加的IP追加到listen_addresses后面,多个配置项之间用英文逗号分隔。例如,追加IP地址10.11.12.13。 + + `gs_guc set -I all -c"listen_addresses='localhost,192.168.0.100,10.11.12.13'"` + +**步骤4** 执行如下命令重启openGauss。 + `gs_om -t stop && gs_om -t start` + +![](https://pic.imgdb.cn/item/61638b282ab3f51d91c7fae2.png) + +## **3. JDBC 包、驱动类和环境类** + +- 在openGauss官网下载JDBC包,openGauss-2.0.0-JDBC.tar.gz,解压获得驱动jar包postgresql.jar。 +- 在创建数据库连接之前,需要加载数据库驱动类“org.postgresql.Driver”。 +- 终端输入“java -version”,查看JDK版本,确认为JDK1.8版本。![](https://pic.imgdb.cn/item/61638ba52ab3f51d91c87e93.png) + +## **4. 驱动加载** + +在代码中创建连接之前任意位置隐含装载:`Class.forName("org.postgresql.Driver");` + +- 在windows下运行代码进行连接时,使用eclipse装载驱动即可。 + + ![](https://pic.imgdb.cn/item/616396cd2ab3f51d91d61cdc.png) + +- 在centos系统下连接数据库时,要将postgresql.jar驱动包设置到java的classpath**环境变量**中。 + + 将postgresql.jar类库文件拷贝到...\Java\jdk1.7.0\jre\lib\ext目录下。(这个路径根据JDK的版本和安装路径确定,下同) + + 将postgresql.jar类库文件拷贝到...\Java\jre7\lib\ext目录下( 最好是,只要是jre文件夹,都复制一个postgresql.jar到jre7\lib\ext里去) + +## **5. 连接数据库** + +JDBC提供了三个方法,用于创建数据库连接。 + +1. DriverManager.getConnection(String url) +2. DriverManager.getConnection(String url, Properties info); +3. DriverManager.getConnection(String url, String user, String password); + +- 连接数据库代码如下:![](https://pic.imgdb.cn/item/6163985a2ab3f51d91d7cffb.png) + + ![](https://pic.imgdb.cn/item/6163985a2ab3f51d91d7cfe9.png) + +- 运行截图![](https://pic.imgdb.cn/item/615130bc2ab3f51d91141e77.png) + +- 连接数据库并在表中插入数据 + + 连接时会以某一用户访问某一表,要先在数据库中对该用户进行授权。![](https://pic.imgdb.cn/item/615130bc2ab3f51d91141e6d.png) + + 代码如下:![](https://pic.imgdb.cn/item/6163985a2ab3f51d91d7cfee.png) + + 运行截图:![](https://pic.imgdb.cn/item/615130b72ab3f51d9114194d.png) + + 查询此表进行验证:![](https://pic.imgdb.cn/item/615130a72ab3f51d9114088b.png) + +# 三、 遇到的问题 + +1. 刚开始安装的极简版,发现有一个命令使用不了,后来改安装企业版就没遇到这个问题。 + +2. centos版本问题:在网上没有找到centos7.6,只找到7.9,运行时会报错,要修改/etc/rehat-release文件,将 + ` CentOS Linux release 7.9.2003 (Core)`修改为`CentOS Linux release 7.6 (Core)`。 + +3. 安装python3 + + - 安装CentOS开发工具 【用于允许您从源代码构建和编译软件】 + `sudo yum -y “groupinstall development”` + - 下载epel `sudo yum install epel-release` + - 安装python3 `sudo yum install python36` + - 更改默认python![](H:\awjw\大四上\系统\blog\content\zh\post\awei\Snipaste_2021-09-09_14-30-17.png) + +4. 连接问题 + + 连接windows主机时,远程连接也配置了,互信也建立了,总是报连接错误问题。后来选择连接虚拟机,连接虚拟机要先配jdk,直接使用yum安装比较方便: + + - 搜索jdk安装包 + + ``` + # yum search java|grep jdk + ``` + + - 下载jdk1.8,下载后默认目录为:/uer/lib/jvm/ + + ``` + # yum install java-1.8.0-openjdk + ``` + + - 验证安装 + + 后面要进行驱动加载,不加载的话使用不了下载的文件。要将postgresql.jar驱动包设置到java的classpath**环境变量**中。 + + 将postgresql.jar类库文件拷贝到...\Java\jdk1.7.0\jre\lib\ext目录下。(这个路径根据JDK的版本和安装路径确定,下同) + + 将postgresql.jar类库文件拷贝到...\Java\jre7\lib\ext目录下( 最好是,只要是jre文件夹,都复制一个postgresql.jar到jre7\lib\ext里去) + diff --git "a/content/zh/post/awei/\344\273\243\347\240\201\350\247\243\346\236\220\357\274\232\344\272\213\345\212\241\347\256\241\347\220\206\345\231\250.md" "b/content/zh/post/awei/\344\273\243\347\240\201\350\247\243\346\236\220\357\274\232\344\272\213\345\212\241\347\256\241\347\220\206\345\231\250.md" new file mode 100644 index 0000000000000000000000000000000000000000..d52a647a7330f31460c91f51258316f91140b505 --- /dev/null +++ "b/content/zh/post/awei/\344\273\243\347\240\201\350\247\243\346\236\220\357\274\232\344\272\213\345\212\241\347\256\241\347\220\206\345\231\250.md" @@ -0,0 +1,107 @@ ++++ + +title = "***开源OpenGauss数据库中事务管理源码解析***" +date = "2021-12-03" +tags = ["开源OpenGauss数据库中事务管理源码解析"] +archives = "2021-12" +author = "awei" +summary = "***开源OpenGauss数据库中事务管理源码解析***" +times = "17:30" + ++++ + +## 开源OpenGauss数据库中事务管理源码解析 + +### 一、 事务状态机 + +openGauss将事务系统分为上层(事务块TBlockState)以及底层(TransState)两个层次。(代码位于src/gausskernel/storage/access/transam/xact.cpp) + +1. 上层事务块:客户端query的状态,用于提高用户操作数据的灵活性,用事务块的形式支持在一个事务中执行多条query语句。![img](images\wps1.jpg) 事务块状态机:![img](images\wps2.jpg) 每个事务状态都对应一个事务状态机结构体。 + + 在无异常情形下,一个事务块的状态机如上图所示按照默认(TBLOCK_DEFAULT)->已开始(TBLOCK_STARTED)->事务块开启(TBLOCK_BEGIN)->事务块运行中(TBLOCK_INPROGRESS)->事务块结束(TBLOCK_END)->默认(TBLOCK_DEFAULT)循环。剩余的状态机是在上述正常场景下的各个状态点的异常处理分支。 + + 在事务块运行中(TBLOCK_INPROGRESS)出错分为2种情形。事务执行失败:事务块运行中(TBLOCK_INPROGRESS)->回滚(TBLOCK_ABORT)->回滚结束(TBLOCK_ABORT_END)->默认(TBLOCK_DEFAULT);用户手动回滚执行成功的事务:事务块运行中(TBLOCK_INPROGRESS)->回滚等待(TBLOCK_ABORT_PENDING)->默认(TBLOCK_DEFAULT)。 + +2. 底层(TransState):从内核视角的事务状态,真正意义上的事务状态。![img](images\wps3.jpg) 事务底层状态:![img](images\wps4.png) + + 内核内部底层状态如上图所示,底层状态机的描述见结构体TransState。在事务开启前事务状态为TRANS_DEFAULT。 + + 事务开启过程中事务状态为TRANS_START。 + + 事务成功开启后一直处于TRANS_INPROGRESS。 + + 事务结束/回滚的过程中为TARNS_COMMIT/ TRANS_ABORT。 + + 事务结束后事务状态回到TRANS_DEFAULT。 + + + +### 二、 事务状态机系统实例 + +``` + BEGIN; + + SELECT * FROM TABLE1; + + END; +``` + +1. 整体流程 + + 任何语句的执行总是先进入事务处理接口事务块中,然后调用事务底层函数处理具体命令,最后返回到事务块中。![img](images\wps5.jpg) + +2. BEGIN执行流程 + + (1) 通过入口函数exec_simple_query处理begin命令。(函数位于\openGauss-server-master\src\gausskernel\process\tcop\postgres.cpp) + + ``` + static void exec_simple_query(const char* query_string, MessageType messageType, StringInfo msg = NULL) + ``` + + 该函数添加默认参数unint16 messageType。其默认值为0。如果我们收到混合消息,该参数将设置为1,以告诉我们正常查询字符串后跟信息字符串。query_string = normal querystring + message + + begin的query_string=0x1002cd0”begin;” + + 然后我们将meesageType与1进行比较。当messageType为1时,表示此消息是混合消息。查询字符串后面是一些我们必须处理的信息。我们将查询字符串分为两部分,普通查询字符串和信息字符串。然后主要调用`start_xact_command();` + + ` (void)PortalRun(portal, FETCH_ALL, isTopLevel, receiver, receiver, completionTag);` + + `finish_xact_command();` + + (2) start_xact_command函数开始一个query命令,调用StartTransactionCommand函数,此时事务块上层状态未TBLOCK_DEFAULT,继续调用StartTransaction函数,设置事务底层状态TRANS_START,完成内存、缓存区、锁资源的初始化后将事务底层状态设为TRANS_INPROGRESS,最后在StartTransactionCommand函数中设置事务块上层状态为TBLOCK_STARTED。![img](images\wps6.jpg)![img](images\wps7.jpg) TBLOCK_DEFAULT->TBLOCK_STARTED + + TRANS_DEFAULT->TRANS_START->TRANS_INPROGRESS + + TBLOCK_DEFAULT->TBLOCK_STARTED + + TRANS_DEFAULT->TRANS_START->TRANS_INPROGRESS + + TBLOCK_DEFAULT->TBLOCK_STARTED + + TRANS_DEFAULT->TRANS_START->TRANS_INPROGRESS + + TBLOCK_DEFAULT->TBLOCK_STARTED + +| | | +| ---- | ------------------------------------------------------------ | +| | ![img](images\wps8.jpg)(3) PortalRun处理begin语句![img](images\wps9.jpg) ![img](images\wps10.jpg)![img](images\wps11.jpg) (4) finish_xact_command![img](images\wps12.jpg) ![img](images\wps13.jpg) finish_xact_command函数结束一个query命令,调用CommitTransactionCommand函数设置事务块上层状态从TBLOCK_BEGIN变为TBLOCK_INPROGRESS,并等待读取下一条命令。 | + +3. Select语句流程 + + (1) exec_simple_query select 的 query_string=0x1002cd0"SELECT * FROM table1;" + + (2) start_xact_command调用StartTransactionCommand函数,由于当前上层事务块状态为TBLOCK_INPROGRESS,说明已经在事务块内部,则直接返回,不改变事务上层以及底层的状态。 + + (3) PortalRun:依次向下调用函数ExecutorRun根据执行计划执行最优路径查询。![img](images\wps14.jpg) (4) finish_xact_command 调用CommitTransactionCommand函数,当前事务块上层状态仍为TBLOCK_INPROGESS,不改变当前事务上层以及底层的状态。 + +4. Begin语句流程 + + (1) exec_simple_query end 的 query_string=0x1002cd0"end;" + + (2) start_xact_command调用StartTransactionCommand函数,由于当前上层事务块状态为TBLOCK_INPROGRESS,说明已经在事务块内部,则直接返回,不改变事务上层以及底层的状态。 + + (3) PortalRun 设置事务块上层状态为TBLOCK_END。![img](images\wps15.jpg) (4) finish_xact_command 设置事务底层状态为TRANS_COMMIT,进行事务提交流程并且清理事务资源;清理后设置底层事务状态为TRANS_DEFAULT![img](images\wps16.jpg) ![img](images\wps17.jpg) 继续调用CommitTransaction函数提交事务,设置事务底层状态为TRANS_COMMIT,进行事务提交流程并且清理事务资源;本地持久化CLOG及XLOG日志,并清空相应的事务槽位信息。清理后设置底层事务状态为TRANS_DEFAULT,返回CommitTansactionCommand函数;设置事务块上层状态为TBLOCK_DEFAULT,整个事务块结束。 + + + + \ No newline at end of file diff --git "a/content/zh/post/baikaishui/Vmware15+CentOS7.9\345\256\211\350\243\205openGauss.md" "b/content/zh/post/baikaishui/Vmware15+CentOS7.9\345\256\211\350\243\205openGauss.md" new file mode 100644 index 0000000000000000000000000000000000000000..8e98e3297cb1f992eba86c505e5c08882efcd63d --- /dev/null +++ "b/content/zh/post/baikaishui/Vmware15+CentOS7.9\345\256\211\350\243\205openGauss.md" @@ -0,0 +1,205 @@ ++++ +title = "Vmware15+CentOS7.9安装openGauss" +date ="2021-12-01" +tags =["openGauss安装"] +archives ="2021-12" +author ="baikaishui" +summary ="Vmware15+CentOS7.9安装openGauss" +times ="19:20" ++++ +# 1. centos安装 +这里我使用的是vmware workstation Pro 15 +虽然官网了解了一下openGauss最适合的centos版本为centos7.6 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-a6a50103-ada8-47e6-9b8b-67e549907cb0.png) +但是因为centos7.6版本已经停更,所以我这里下载的是7.9版本的镜像文件 + ![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-afd3f661-5aa6-471b-bbcd-348df0cb5f8b.png) +下载完成后打开vmware,创建新的虚拟机 + ![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-761b80ef-2a71-4847-b2b5-2484be596cf9.png) +自定义配置,选择下一步。 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-509e08fb-fe6e-4780-ac8b-66dfd64e1298.png) +直接下一步 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-9fa6d835-4e4b-49aa-8d7a-4c72fff0e2a0.png) +选择稍后安装操作系统,下一步 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-26342b98-d470-469c-aaf3-ef1d92c5c7e5.png) +客户机操作系统选Linux,版本选CentOS7 64位 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-a2bbc16f-73ce-4f06-832f-3acb547fa6c4.png) +命名随意 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-687bd6a2-03d5-4be9-ae63-f703aa0ccb26.png) +处理器配置默认全1(这里可以根据自己电脑配置自行选择) +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-29542fd4-fad1-400a-9228-36c8e9091a21.png) +虚拟机内存我选的是2GB(这里也是根据自己电脑内存选择的)PS:据同学说这里虚拟机内存选8GB可以直接使用openGauss的简易安装模式,但我的电脑只有8GB所以没有尝试。 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-a83880a8-b691-4e5b-be49-5ac6ad1486da.png) +网络连接选NAT +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-f16115d9-ffed-4901-9d0a-1fc2d3c22e8f.png) +后面两项默认推荐 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-58ef519d-0fdf-4268-a965-9fd1202bc687.png) +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-9b157160-f3cf-47ba-95f4-4b3ebff566a6.png) +创建新虚拟磁盘 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-336de266-3567-4a8c-9774-5ed2d20acc7e.png) +最大磁盘大小选20GB,选将虚拟磁盘拆分成多个文件 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-b40b3965-e8e1-421a-af60-111ed671d721.png) +默认下一步 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-9fc08dfe-373d-4cca-9c35-b9662d8f60e0.png) +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-c19ba93f-b62b-4bec-b86c-8b1a6d2d9c7b.png) +右键CentOS点设置,点CD/DVD,使用ISO映像文件,选之前下载的镜像 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-bd77a466-0996-446e-9a0b-7ee3d9f72124.png) +然后开启虚拟机,这里我遇到了第一个问题,一开启虚拟机,宿主机就蓝屏死机。一开始我以为我后台开了什么东西内存占用太大,但关闭后台进程后依旧存在这个问题,查了一下,大多解释是说VMware的鲁棒性很差,在win10某次更新后VMware就存在这个蓝屏的问题。解决方法是更新至最新的VMware16 Pro版本。我试了一下,确实可行,而且覆盖更新不用重新配置虚拟机,上面的工作也没有白费。接下来继续安装。 +打开虚拟机,选择Install CentOS Linux7 ,enter。 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-a9577981-cecb-41d3-be49-de06c1254f00.png) +语言就看个人情况选择了,不过中文可能会有些乱码问题。 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-056ea17d-e704-47a4-8cc9-39adad3b3725.png) +这里配置一些基本信息 +1、点击安装源,进入之后直接选择done,警告符号随即消失。 +2、软件选择:GNOME桌面,一个友好的图形化界面 +3、根据需要禁用Kdump +4、自动分区,一般化为4个分区,如图所示 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-c58ec813-5513-4b9b-b39c-4e5b93e096c6.png) +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-c6e79bdb-5ef7-4032-a73d-ac0ca9d60ba0.png) +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-b505b663-c25f-41e4-b5c2-bef61a9f6b37.png) +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-1915e9de-6812-4c2e-84a5-dc2923ec1a54.png) +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-9086ab93-8c53-421b-a22d-3a0962d78d3f.png) +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-ab3d2903-4b9b-4fb7-9471-0f7ac281e713.png) +在安装过程中设置用户和密码 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-57e3ac12-ece8-40d2-97e6-93817d3d2ee6.png) +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-bbda861e-a48e-43db-becb-d9ef142092d9.png) +安装后点重启,看到如下界面 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-b21ea234-737b-4c98-9218-7670007800db.png) +接受许可并配置网络 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-7ad37229-535b-4b33-a753-d24cbf63ab90.png) +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-3b214586-e402-4f17-80aa-5e4c0df67cd1.png) +登录后就可进入图形界面 +右上角打开有线设置 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-a0b8e6e9-3f46-43cd-b849-23e11a1d05af.png) +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-2f8d5ff2-05f7-460f-845c-5b0e98bd531c.png) +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-64a7d3c3-3d0b-4f5b-af78-9c0956e92b01.png) +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-97ea268b-5c3a-428e-9063-b6c8149a1a37.png) +到此,CentOS的安装就完成了。 +# 2. openGauss安装 +2.1准备工作 +查看ip ifconfig +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-e934a54e-ed70-4f80-a78d-c0dea7344055.png) +vi /etc/profile编辑/etc/profile文件,在末尾加上ulimit -c unlimited,然后重新加载该文件 +source /etc/profile +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-a2646262-5c43-4d36-891d-a2b18a969c16.png) +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-9403d6ec-ff8a-4d8d-b2e4-02338fe00da0.png) +关闭防火墙,禁用SWAP,SELINUX(为了后面安装避免过多验证以及避免连接失败) +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-43835996-f37b-4849-b1db-849b7d035cc1.png) +继续执行yum install python3.6*命令(我这里由于之前已经安装过python3.6了,因此得到的结果如下图) +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-f49ab3f9-8a2d-42ac-9d63-6ceedbce4ab2.png) +没安装过的话效果是这样的 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-c8c5e0b9-78ef-4aff-ad2b-4e1c92bf9be0.png) +然后进行其他软件包的安装 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-928bdc29-2350-4227-9644-4279a3d6e53a.png) +linux中权限最大的用户是root,Gauss数据库官方默认以dbgrp为用户组,omm为用户,所以需要进行用户创建。 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-87f1d013-8d7e-4d2d-81c1-ad710c601fc2.png) +然后我们为openGauss建一个目录,用来存放openGauss的压缩包以及该压缩包解压后的文件。这里我在/opt/software下新建了一个openGauss的文件夹。执行chmod -R 755 /opt/software/openGauss命令给予openGauss文件夹读写权限。 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-781fd591-60f7-43aa-86e1-adc4b09165f0.png) +2.2下载openGauss安装包 +在官网下载openGauss 的安装包,我这里选择的是2.0.1企业版。 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-e2d4bbc9-1c3b-46fb-bef9-176909559e15.png) +然后设置VMware的共享文件夹用于在宿主机和虚拟机之间传递文件。 +设置共享文件夹一般有自动和手动两种方式 +因为未知原因,我的安装vmware tools的按键灰色不可用 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-3d8cafff-c158-4fbc-8e64-8cb87698e2a1.png) +尝试了很多解决办法也没有作用,只能选择手动设置共享文件夹。 +右键centos选择设置,进入选项界面 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-81e598ec-8bc2-4cb9-a614-0b9651b96411.png) +选择共享文件夹,总是启用,并在宿主机上设置共享文件夹。 +设置完成后,使用vmhgfs-fuse .host:/ /mnt/hgfs指令完成共享文件夹的挂载 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-6e6350a3-4bec-4604-b586-d336618365f8.png) +cd进根目录/mnt/hgfs,可以看到先前设置的主机共享文件夹 +不过这种方法配置共享文件夹需要每次开机后重新进行挂载,比较麻烦。 +然后将宿主机中的安装包放入共享文件夹中,再通过mv指令将安装包放入openGauss文件夹下 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-79b89929-92a7-4a30-a202-1a7c321b56e5.png) +然后进入openGauss文件夹解压安装包 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-a5a81815-0ded-4f93-abb8-196d6034c8ba.png) +然后执行命令ls -l,显示类似如下信息即可 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-1db931d7-5e74-4dee-bc96-ed67eabafa40.png) +# 3. 配置XML文件 + 进入刚刚解压产生的script文件夹,查看是否有预安装脚本: +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-15bdf36d-8159-47b4-b211-bf760de3ef9a.png) +在openGauss目录下执行vim clusterconfig.xml命令。然后将下面内容右键粘贴到新建的xml文件中,然后按esc退出插入模式,输入:wq!保存并退出。 +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + <--> + + + +``` +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-28038b33-f321-46d4-82a1-9e0567371b03.png) +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-e7909e4e-abfa-4411-af1d-6b1ebdc4ba56.png) +这里的节点名称和IP要改成自己的,这些在准备工作中已经查看了。 +执行vi /etc/profile命令打开profile文件,添加如下命令: +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-222643e8-5859-44e2-aebb-fe0e8a238868.png) +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-4a752f1f-43e2-41e0-af89-286b63dabc5a.png) +退出插入模式,输入:wq!保存并退出。然后需要source一下 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-f9aafbcf-580c-4b7d-b709-bc945b58cb3e.png) +# 4. 预安装 +在/opt/software/openGauss/script文件夹下,执行命令 +./gs_preinstall -U omm -G dbgrp -X /opt/software/openGauss/clusterconfig.xml +正常情况下会出现以下反馈 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-e9daf0ee-25bf-4428-81a2-2b7dd01f1899.png) +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-9ab118b5-8063-4b1a-81e8-b3606e85cd4c.png) +但是可能是因为我的xml文件配置错误,又或者是其他原因,导致我的预安装指令没有反馈,不论成功还是报错都没有。这里我就犯了个错误,在没有反馈的情况下反复执行预安装指令,没有反馈重启终端再执行;还没有反馈重启虚拟机再执行。反复操作下我的centos开启过程开始报错piix4_smbus: Host SMBus controller not enabled!;输入密码后从登录界面又跳回登录界面没法进入系统。 +在尝试了诸多解决方法之后,我发现这时我的centos重启后会丢失数据,按照网上的解决方法更改的文件无法保存,所以都没有发挥作用。在进行了一个下午的尝试后我放弃了,我选择重新配置一个新的虚拟机,按照上述步骤重来一次。现在想来可能是反复执行预安装命令产生大量重复的root用户和openGauss用户的互信信息导致磁盘占用率接近满值导致出现该问题。在重新配置虚拟机后我再次开始预安装,这次出现了报错反馈Exception: [GAUSS-51900] The current OS is not supported. The current system is: centos7.9这里提示我们CentOS7.9不支持openGauss,所以我们需要降级到7.6版本,但7.6版本的镜像我在网上没能找到,据说openGauss相关书籍里会提供7.6版本镜像。但我这里使用的是wget http://vault.centos.org/7.6.1810/os/x86_64/Packages/centos-release-7-6.1810.2.el7.centos.x86_64.rpm 指令来下载centos7.6版本rpm包 +安装下载的7.6 rpm 包 +rpm -ivh centos-release-7-6.1810.2.el7.centos.x86_64.rpm –force +这时重新运行 rpm -qa | grep -i centos-release 就可以看到两个发行版本 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-0842a449-c815-4a43-80f6-a564d0e4980e.png) +卸载掉7.7版本 +rpm -ev centos-release-7-7.1908.0.el7.centos.x86_64 +之后再次进行预安装,这次成功进行了预安装。 +通过openGauss提供的gs_checkos工具来检查系统状态。注意需要切换到/opt目录下执行命令。 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-126222ef-e752-4ccb-897f-a7c1965002ad.png) +# 5. 正式安装 +切换到omm用户,进行安装。 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-575c8328-0121-4f51-b4d9-ffff17afdb84.png) +执行过程中需要用户设定密码,最后出现completed就完成了。 +在omm用户下,执行gs_om -t start命令和gs_om -t stop命令启动或关闭数据库 +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-26622cc5-8c41-4076-a7f7-baf638b0b48f.png) +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20210927-2c23e3a1-bf7b-4e75-ae1f-4235e85f3626.png) + + +参考文章:[openGauss安装](https://blog.csdn.net/qq_38029916/article/details/119136887?ops_request_misc=%257B%2522request%255Fid%2522%253A%2522163270679216780262549001%2522%252C%2522scm%2522%253A%252220140713.130102334..%2522%257D&request_id=163270679216780262549001&biz_id=0&utm_medium=distribute.pc_search_result.none-task-blog-2~all~sobaiduend~default-1-119136887.pc_search_ecpm_flag&utm_term=vmware+centos+opengauss&spm=1018.2226.3001.4187) +;[CentOS7安装](https://blog.csdn.net/tsundere_x/article/details/104263100?ops_request_misc=%257B%2522request%255Fid%2522%253A%2522163270756416780357262837%2522%252C%2522scm%2522%253A%252220140713.130102334.pc%255Fall.%2522%257D&request_id=163270756416780357262837&biz_id=0&utm_medium=distribute.pc_search_result.none-task-blog-2~all~first_rank_ecpm_v1~rank_v29_ecpm-2-104263100.pc_search_ecpm_flag&utm_term=vmware安装centos&spm=1018.2226.3001.4187) \ No newline at end of file diff --git "a/content/zh/post/baikaishui/openGauss\346\225\260\346\215\256\345\272\223\346\272\220\347\240\201\350\247\243\346\236\220\342\200\224\342\200\224\346\205\242SQL\346\243\200\346\265\213.md" "b/content/zh/post/baikaishui/openGauss\346\225\260\346\215\256\345\272\223\346\272\220\347\240\201\350\247\243\346\236\220\342\200\224\342\200\224\346\205\242SQL\346\243\200\346\265\213.md" new file mode 100644 index 0000000000000000000000000000000000000000..9cc4ec6a09df1fe18b184ad1fdfa370632412afe --- /dev/null +++ "b/content/zh/post/baikaishui/openGauss\346\225\260\346\215\256\345\272\223\346\272\220\347\240\201\350\247\243\346\236\220\342\200\224\342\200\224\346\205\242SQL\346\243\200\346\265\213.md" @@ -0,0 +1,121 @@ ++++ +title = "openGauss数据库源码解析——慢SQL检测" +date ="2021-12-09" +tags =["openGauss安装"] +archives ="2021-12" +author ="baikaishui" +summary ="openGauss数据库源码解析——慢SQL检测" +times ="20:20" ++++ +# openGauss数据库源码解析——慢SQL检测 + +## **慢SQL检测的定义:** + +基于历史SQL语句信息进行模型训练,并用训练好的模型进行SQL语句的预测,利用预测结果判断该SQL语句是否是潜在的慢SQL。当发现潜在的慢SQL后,开发者便可以进行针对性优化或者风险评估,以防业务上线后发生问题。 + +## **慢SQL检测的功能:** + +上线业务预检测:上线一批新业务前,使用SQL诊断功能评估此次上线业务的预估执行时长,便于用户参考是否应该修改上线业务。workload分析:能够对现有workload进行分析,将现有workload自动分为若干类别,并依次分析此类别SQL语句执行代价,以及各个类别之间的相似程度。 + +## **首先,明确一下慢SQL发现的几个不同阶段,及其对应解决的问题。** + +阶段1:对用户输入的一批业务SQL语句进行分析,推断SQL语句执行时间的快慢,进而可以将评估为慢SQL的语句识别出来。 + +阶段2:对识别出的潜在慢SQL进行根因诊断,判断这些SQL语句是因为什么慢,例如比较常见的原因可能是数据量过大、SQL语句自身过于复杂、容易产生并发的锁冲突、没有创建索引导致全表扫描等等。 + +阶段3:对于已经识别出来的慢SQL语句的可能问题源,给出针对性的解决方案,譬如可以提示用户进行SQL语句的改写、创建索引等。 + +目前openGauss已具备阶段1的能力,正在推进阶段2能力,同时发布了部分阶段3的能力,如索引推荐功能。业内对于上述第一阶段的主要实现方法大部分是通过执行计划进行估计的,第二阶段大多是通过构建故障模式库、通过启发式规则来实现的,有了上述前两个阶段的准备,第三阶段的实现往往是比较独立的。学术界对于第一阶段的研究比较多,第二阶段采用常规的构建故障模式库的方法实现已经能取得比较好的效果了,因此并不是研究的热点,而第三阶段的工作又相对独立,可以单独作为一个领域进行研究。 + +## **基于执行计划的DNN模型:** + +![](https://oss-emcsprod-public.modb.pro/image/editor/20211206-a0155d62-7c76-44c0-8ca0-644fbcebb6b7.png) + +功能流程: + +![](https://oss-emcsprod-public.modb.pro/image/editor/20211206-2470d485-1f71-4f9a-b7f9-8df79843f8a8.png) + +该算法是将执行计划中的算子信息输入到深度学习网络中,从而对执行时间进行预测的。对于每个算子,收集左右子树的向量化特征、优化器代价及执行时间,输入与之对应的模型中,预测该算子的向量化特征及执行时间等。上述过程是个自底向上的过程。 + +## **例如——Join操作预测流程** + +![](https://oss-emcsprod-public.modb.pro/image/editor/20211206-763a81c7-b907-412d-a043-cc67e1c6d3d0.png) + +该流程图显示了一个join操作的预测流程,其左右子树均为Scan算子,将两个Scan算子通过对应的模型预测出的向量化特征、执行时间,以及该join算子的优化器评估代价作为入参,输出join算子模型得到该操作的向量化特征及预测出的执行时间。 + +上述技术的缺点。 +(1) 需要通过已预测算子不断修正模型,预测过程会较慢。 +(2) 对环境变化感知差,如数据库参数变化会使得原模型几乎完全失效。 +(3) 预测过程依赖待测语句的执行计划,加重了数据库的负荷,对于OLTP场景格外不适用。 + +## 基于执行计划的MART(multiple additive regression trees,多重累加回归树)模型,主要包含离线训练模块和在线预测模块。他们的功能如下所示。 + + ![](https://oss-emcsprod-public.modb.pro/image/editor/20211206-5956992c-432e-4853-97c8-57f71fca288b.png) + +**离线训练阶段:**针对数据库每种类型的算子(如Table Scan,Merge Join,Sort…),分别训练其对应的模型,用于估算此算子的开销。此外,使用单独的训练阶段,可为不同的算子选择适当的缩放函数。最后,形成带缩放函数的不同的回归树模型。 + +**在线预测阶段:**计算出执行计划中所有算子的特征值。然后,使用特征值为算子选择合适的模型,并使用它来估算执行时间。 + +## **功能流程:** + +![](https://oss-emcsprod-public.modb.pro/image/editor/20211206-973086cb-e4cb-40f2-abc2-98233bb73447.png) + +## 基于执行计划MART模型技术调优技术的缺点。 + +1.泛用性较差,强依赖训练好的算子模型,遇到例如用户自定义函数的未知语句时,预测效果会较差。 + +2.缩放函数依赖于先验结果,对于超出范围的特征值效果无法保证。 + +3.预测过程依赖待测语句的执行计划,加重了数据库的负荷,很难推广到OLTP场景中。 + +## **慢SQL检测采取的策略:** + +![](https://oss-emcsprod-public.modb.pro/image/editor/20211206-56ce6c57-4130-402a-bf1c-18d01f846389.png) + +## **基于SQL模板化的流程** + +![](https://oss-emcsprod-public.modb.pro/image/editor/20211206-8eb7d06c-0348-4883-8a9f-e3ee76b49347.png) + +## 基于SQL模板化的流程类似于基于执行计划MART模型技术调优技术,他的具体流程如下: + +1.获取SQL流水数据。 + +2.检测本地是否存在对应实例的历史模板信息,如果存在,则加载该模板信息,如果不存在,则对该模板进行初始化。 + +3.基于SQL数据,提取SQL的粗粒度模板信息。粗粒度模板表示将SQL中表名、列名和其他敏感信息去除之后的SQL语句模板,该模板只保留最基本的SQL语句骨架。 + +4.基于SQL数据,提取SQL细粒度的模板信息。细粒度模板表示在粗粒度模板信息的基础上保留表名、列名等关键信息的SQL语句模板。细粒度模板相对粗粒度模板保留了更多SQL语句的信息。 + +5.执行训练过程时,首先构造SQL语句的基于粗粒度模板和细粒度模板信息,例如粗粒度模板ID、执行平均时间、细模板执行时间序列、执行平均时间和基于滑动窗口计算出的平均执行时间等。最后将上述模板信息进行储存。 + +6.执行预测过程时,首先导入对应实例的模板信息,如果不存在该模板信息,则直接报错退出;否则继续检测是否存在该SQL语句的粗粒度模板信息,如果不存在,则基于模板相似度计算方法在所有粗粒度模板里面寻找最相似的N条模板,之后基于KNN(K近邻)算法预测出执行时间;如果存在粗粒度模板,则接着检测是否存在近似的细粒度模板,如果不存在,则基于模板相似度计算方法在所有细粒度模板里面寻找最相似的N条模板,之后基于KNN预测出执行时间;如果存在匹配的细粒度模板,则基于当前模板数据,直接返回对应的执行时间。 + +## **实现代码:** + +![](https://oss-emcsprod-public.modb.pro/image/editor/20211206-6121c222-f7c6-4f31-9fca-cadded23a147.png) + + ![](https://oss-emcsprod-public.modb.pro/image/editor/20211206-7ee30873-03d5-421c-bbb7-51860ef202f7.png) + +## **基于深度学习的执行流程** + +![](https://oss-emcsprod-public.modb.pro/image/editor/20211206-aa63931a-ff58-49a6-a88c-2f38a977c554.png) + +1.获取SQL流水。 + +2.在训练过程中,首先判断是否存在历史模型,如果存在,则导入模型进行增量训练;如果不存在历史模型,则首先利用word2vector算法对SQL语句进行向量化,即图8-11中的SQL embeding过程。而后创建深度学习模型,将该SQL语句向量化的结果作为输入特征。基于训练数据进行训练,并将模型保存到本地。值得一提的是,该深度学习模型的最后一个全连接层网络的输出结果作为该SQL语句的特征向量。 + +3.在预测过程中,首先判断是否存在模型,如果模型不存在,则直接报错退出;如果存在模型,则导入模型,并利用word2vector算法将待预测的SQL语句进行向量化,并将该向量输入到深度学习网络中,获取该神经网络的最后一个全连接层的输出结果,即为该SQL语句的特征向量。最后,利用余弦相似度在样本数据集中进行寻找,找到相似度最高的SQL语句,将该结果返回即为该待预测SQL语句的预估执行时间。当然,如果是基于最新SQL语句执行时间数据集训练出的深度学习模型,则模型的回归预测结果也可以作为预估执行时间。 + +## **实现代码:** + +![](https://oss-emcsprod-public.modb.pro/image/editor/20211206-8cdb6977-81bd-4e4a-b840-99da3d3facdf.png) + +![](https://oss-emcsprod-public.modb.pro/image/editor/20211206-a7424067-b725-41fe-a0e1-92d7d3ef3e1b.png) + +## **总体流程代码解析** + +![](https://oss-emcsprod-public.modb.pro/image/editor/20211206-42feeab5-e4de-41e6-a704-fbec10987aff.png) + + + +参考文章:[Gauss松鼠会源码解析](https://blog.csdn.net/GaussDB/article/details/119989581?ops_request_misc=%257B%2522request%255Fid%2522%253A%2522163876954016780269849143%2522%252C%2522scm%2522%253A%252220140713.130102334.pc%255Fblog.%2522%257D&request_id=163876954016780269849143&biz_id=0&utm_medium=distribute.pc_search_result.none-task-blog-2~blog~first_rank_v2~rank_v29-3-119989581.pc_v2_rank_blog_default&utm_term=%E6%BA%90%E7%A0%81%E6%A3%80%E6%B5%8B&spm=1018.2226.3001.4450) \ No newline at end of file diff --git a/content/zh/post/buter/k8Sinit_guide.md b/content/zh/post/buter/k8Sinit_guide.md new file mode 100644 index 0000000000000000000000000000000000000000..9712d94ee2608e34987d8d7bf29f3b3cc6c6f201 --- /dev/null +++ b/content/zh/post/buter/k8Sinit_guide.md @@ -0,0 +1,407 @@ ++++ +title = "在K8S上面搭建一主两备openGauss" +date = "2021-09-10" +tags = ["openGauss分布式解决方案"] +archives = "2021-03" +author = "buter" +summary = "openGauss分布式解决方案" +times = "17:30" + ++++ + +# 在K8S上面搭建一主两备openGauss +## 初始化环境(以下操作需在master和node节点执行) +| IP | Hostname | Role | +|-|-|-| +|192.168.0.87|k8smaster|master| +|192.168.0.161|k8snode01|node| +``` +关闭firewalld +systemctl stop firewalld +systemctl disable firewalld +``` +## 1. 更新docker + +``` +rpm -qa|grep docker + +yum remove docker + +curl -fsSL https://get.docker.com/ | sh + +systemctl start docker + +systemctl enable docker + +``` + + +## 2. 准备kubernetes源 +``` +vim /etc/yum.repos.d/kubernetes.repo +[kubernetes] +name=Kubernetes +baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 +enabled=1 +gpgcheck=0 +repo_gpgcheck=0 +gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg + + +yum install -y kubeadm kubectl etcd +``` + +## 3. 查看kubeadm所需镜像名字 +``` +[root@ecs-66cc dockerimages]# kubeadm config images list + +k8s.gcr.io/kube-apiserver:v1.21.1 +k8s.gcr.io/kube-controller-manager:v1.21.1 +k8s.gcr.io/kube-scheduler:v1.21.1 +k8s.gcr.io/kube-proxy:v1.21.1 +k8s.gcr.io/pause:3.4.1 +k8s.gcr.io/etcd:3.4.13-0 +k8s.gcr.io/coredns/coredns:v1.8.0 +``` + +## 4. 安装K8S所需镜像 + +``` +docker pull registry.aliyuncs.com/google_containers/kube-apiserver:v1.21.1 +docker pull registry.aliyuncs.com/google_containers/kube-controller-manager:v1.21.1 +docker pull registry.aliyuncs.com/google_containers/kube-scheduler:v1.21.1 +docker pull registry.aliyuncs.com/google_containers/kube-proxy:v1.21.1 +docker pull registry.aliyuncs.com/google_containers/pause:3.4.1 +docker pull registry.aliyuncs.com/google_containers/etcd:3.4.13-0 +docker pull coredns/coredns:1.8.0 +``` + + + +## 5.修改docker Tag 使其与kubeadm所需匹配 +- 用国内源下载镜像 +``` +docker tag registry.aliyuncs.com/google_containers/kube-apiserver:v1.21.1 k8s.gcr.io/kube-apiserver:v1.21.1 +docker tag registry.aliyuncs.com/google_containers/kube-controller-manager:v1.21.1 k8s.gcr.io/kube-controller-manager:v1.21.1 +docker tag registry.aliyuncs.com/google_containers/kube-scheduler:v1.21.1 k8s.gcr.io/kube-scheduler:v1.21.1 +docker tag registry.aliyuncs.com/google_containers/kube-proxy:v1.21.1 k8s.gcr.io/kube-proxy:v1.21.1 +docker tag registry.aliyuncs.com/google_containers/pause:3.4.1 k8s.gcr.io/pause:3.4.1 +docker tag registry.aliyuncs.com/google_containers/etcd:3.4.13-0 k8s.gcr.io/etcd:3.4.13-0 +docker tag docker.io/coredns/coredns:1.8.0 k8s.gcr.io/coredns/coredns:v1.8.0 +``` +- 删除无效镜像 +``` +docker rmi registry.aliyuncs.com/google_containers/kube-apiserver:v1.21.1 +docker rmi registry.aliyuncs.com/google_containers/kube-controller-manager:v1.21.1 +docker rmi registry.aliyuncs.com/google_containers/kube-scheduler:v1.21.1 +docker rmi registry.aliyuncs.com/google_containers/kube-proxy:v1.21.1 +docker rmi registry.aliyuncs.com/google_containers/pause:3.4.1 +docker rmi registry.aliyuncs.com/google_containers/etcd:3.4.13-0 +docker rmi coredns/coredns:1.8.0 +``` + +## 6.编写K8S初始化配置&&初始化(在master节点执行) +kubeadm.yaml +``` +apiVersion: kubeadm.k8s.io/v1beta2 +clusterName: kubernetes +kind: ClusterConfiguration +kubernetesVersion: v1.21.1 +controllerManager: + extraArgs: + horizontal-pod-autoscaler-use-rest-clients: "true" + horizontal-pod-autoscaler-sync-period: "10s" + node-monitor-grace-period: "10s" +apiServer: + extraArgs: + runtime-config: "api/all=true" +``` +拷贝配置文件至kubernetes 并初始化时指定。 + +` cp kubeadm.yaml /etc/kubernetes/manifests/` + +` kubeadm init --config kubeadm.yaml` + +成功后保留如下信息,后面会使用到: +``` +kubeadm join 192.168.0.35:6443 --token ru2883.u4rhwkx5oqrol9at \ + --discovery-token-ca-cert-hash sha256:f2dbe7ce49b322e8145b6e9b4303e56468ad1352daabecb797f7bd161a64e018 +``` + +初始化 + +``` +mkdir -p $HOME/.kube +sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config +sudo chown $(id -u):$(id -g) $HOME/.kube/config +``` + +安装网络插件 + +`kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"` + +## 7. Node节点join +Node节点执行完安装后无需初始化,执行`kubeadm join`命令加入主节点 +``` +kubeadm join 192.168.0.35:6443 --token ru2883.u4rhwkx5oqrol9at \ + --discovery-token-ca-cert-hash sha256:f2dbe7ce49b322e8145b6e9b4303e56468ad1352daabecb797f7bd161a64e018 +``` +## 8. 导入镜像(master和node节点) +``` +docker load < opengauss.tar.gz +``` + + +## 9. 创建service(svc)(master节点) +给pod创建对应的svc:kubectl create -f opengauss-svc.yaml +``` +apiVersion: v1 +kind: Service +metadata: + name: opengauss-service-1 +spec: + ports: + - port: 5432 + protocol: TCP + targetPort: 5432 + name: gsql + - port: 5434 + protocol: TCP + targetPort: 5434 + name: localport + - port: 2380 + protocol: TCP + targetPort: 2380 + name: etcd1-service + - port: 2379 + protocol: TCP + targetPort: 2379 + name: etcd1-local + selector: + app: opengauss-1 + clusterIP: None + +--- + + +apiVersion: v1 +kind: Service +metadata: + name: opengauss-service-2 +spec: + ports: + - port: 5432 + protocol: TCP + targetPort: 5432 + name: gsql + - port: 5434 + protocol: TCP + targetPort: 5434 + name: localport + - port: 2380 + protocol: TCP + targetPort: 2380 + name: etcd1-service + - port: 2379 + protocol: TCP + targetPort: 2379 + name: etcd1-local + selector: + app: opengauss-2 + clusterIP: None + + +--- + + +apiVersion: v1 +kind: Service +metadata: + name: opengauss-service-3 +spec: + ports: + - port: 5432 + protocol: TCP + targetPort: 5432 + name: gsql + - port: 5434 + protocol: TCP + targetPort: 5434 + name: localport + - port: 2380 + protocol: TCP + targetPort: 2380 + name: etcd1-service + - port: 2379 + protocol: TCP + targetPort: 2379 + name: etcd1-local + selector: + app: opengauss-3 + clusterIP: None + +``` +## 10.创建pod(master节点) + +创建openGauss主备pod:kubectl create -f opengauss-pod.yaml + +``` +apiVersion: v1 +kind: Pod +metadata: + name: opengauss-1 + labels: + app: opengauss-1 +spec: + restartPolicy: Never + containers: + - name: opengauss-1 + image: opengauss:1.0.5 + imagePullPolicy: Never + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: /var/lib/opengauss/data/ + name: openguass-volume + ports: + - containerPort: 5432 + name: opengauss + env: + - name: HOST_NAME + value: opengauss-1 + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: PEER_IPS + value: opengauss-service-2,opengauss-service-3 + - name: PEER_HOST_NAMES + value: opengauss-2,opengauss-3 + - name: PORT + value: "5432" + - name: GS_PASSWORD + value: "Test@56789" + - name: SERVER_MODE + value: primary + - name: db_config + value: + volumes: + - name: openguass-volume + hostPath: + path: /data/opengauss-1/ + type: DirectoryOrCreate + +--- + +apiVersion: v1 +kind: Pod +metadata: + name: opengauss-2 + labels: + app: opengauss-2 +spec: + restartPolicy: Never + containers: + - name: opengauss-2 + image: opengauss:1.0.5 + imagePullPolicy: Never + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: /var/lib/opengauss/data/ + name: openguass-volume + ports: + - containerPort: 5432 + name: opengauss + env: + - name: HOST_NAME + value: opengauss-2 + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: PEER_IPS + value: opengauss-service-1,opengauss-service-3 + - name: PEER_HOST_NAMES + value: opengauss-1,opengauss-3 + - name: PORT + value: "5432" + - name: GS_PASSWORD + value: "Test@56789" + - name: SERVER_MODE + value: standby + - name: db_config + value: + volumes: + - name: openguass-volume + hostPath: + path: /data/opengauss-2/ + type: DirectoryOrCreate + +--- + +apiVersion: v1 +kind: Pod +metadata: + name: opengauss-3 + labels: + app: opengauss-3 +spec: + restartPolicy: Never + containers: + - name: opengauss-3 + image: opengauss:1.0.5 + imagePullPolicy: Never + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: /var/lib/opengauss/data/ + name: openguass-volume + ports: + - containerPort: 5432 + name: opengauss + env: + - name: HOST_NAME + value: opengauss-3 + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: PEER_IPS + value: opengauss-service-1,opengauss-service-2 + - name: PEER_HOST_NAMES + value: opengauss-1,opengauss-2 + - name: PORT + value: "5432" + - name: GS_PASSWORD + value: "Test@56789" + - name: SERVER_MODE + value: standby + - name: db_config + value: + volumes: + - name: openguass-volume + hostPath: + path: /data/opengauss-3/ + type: DirectoryOrCreate +``` + + + +## 11. 测试数据库(master节点) + +``` +1.进入数据库主节点:kubectl exec -it opengauss-1 -- /bin/bash +2.切换用户:su omm +3.进入数据库:gsql +``` +## 12. 常用命令 +所有命令在master节点执行 +``` +查看集群节点:kubectl get node +查看集群pod:kubectl get pod --all-namespaces +查看集群服务:kubectl get svc --all-namespaces +进入容器:kubectl exec -it 容器名(单个容器的话为pod名) -n opengauss -- /bin/bash +查看pod/svc详情:kubectl describe pod/svc pod/svc名称 -n pod/svc的namespaces +查看日志信息:kubectl logs pod pod名称 -n pod的namespaces +``` \ No newline at end of file diff --git "a/content/zh/post/cchen676/openGauss_\347\274\226\350\257\221\345\256\211\350\243\205\345\270\270\350\247\201\351\224\231\350\257\257.md" "b/content/zh/post/cchen676/openGauss_\347\274\226\350\257\221\345\256\211\350\243\205\345\270\270\350\247\201\351\224\231\350\257\257.md" new file mode 100644 index 0000000000000000000000000000000000000000..f774dee442e76677740b533d7841080c5b60c5bc --- /dev/null +++ "b/content/zh/post/cchen676/openGauss_\347\274\226\350\257\221\345\256\211\350\243\205\345\270\270\350\247\201\351\224\231\350\257\257.md" @@ -0,0 +1,77 @@ ++++ +title = "openGauss编译安装常见错误及解决" +date = "2021-12-20" +tags = ["openGauss使用增强"] +archives = "2021-12" +author = "cchen676" +summary = "openGauss编译安装常见错误及解决" +img = "/zh/post/cchen676/title/img26.png" +times = "16:30" ++++ +# openGauss编译安装常见错误及解决 +### 一、编译安装流程 +在编译安装openGauss中的遇到的问题大部分和环境和编译步骤相关, 下面先简单介绍下编译安装的步骤, 详细可参考openGauss-server仓的readme +1. 准备环境: 主要分为下载社区代码, 三方库, 准备安装环境, 按要求准备操作系统, 软件包依赖 +2. 配置环境变量: 配置GAUSSHOME路径, 三方库路径, GCC路径, LD_LIBRARY_PATH路径和PATH等 +3. 执行编译命令: 执行configure, make, make install + +更详细的步骤可参考[openGauss数据库编译指导 +](https://opengauss.org/zh/blogs/blogs.html?post/xingchen/opengauss_compile/) + + +### 二、常见编译安装问题总结 + +**问题1**: 缺少相关动态库依赖 +比如常见报错libreadline.so.7: cannot open shared object file: No such file or directory +解决办法: 安装对应的依赖软件包即可 + +| 软件 | 推荐版本 | +| ---- | --- | +| libaio-devel | 0.3.109-13 | +| flex | 2.5.31及以上版本 | +| bison | 2.7-4 | +| ncurses-devel | 5.9-13.20130511 | +| glibc-devel | 2.17-111 | +| patch | 2.7.1-10 | +| lsb_release | 4.1 | +| readline-devel | 7.0-13 | + +有时repo仓库中默认的软件版本不是推荐的版本, 可以查询是否有相关版本, 然后yum安装指定版本, 比如: +```shell +yum --showduplicates list _软件包名称_ | expand +yum install flex-2.6.1-13.oe1 +``` +**问题2** undefined reference to `core_yylex(core_YYSTYPE*, int*, void*)' +这个报错一般是由于flex和bison的版本不匹配导致, 详细的原因可以参考: +[#I3NW7K:编译报错](https://gitee.com/opengaussorg/dashboard?issue_id=I3NW7K) + +一般常见于在非安装指导中列出的操作系统和对应版本上安装 + +比如在openEuler 20.03 LTS SP1上安装, 红旗上安装, 等等 + +解决办法: 卸载原flex和bison, 然后安装满足要求的flex和bison的版本, 删除已下载的源码, 重新下载后编译 + +> 这种错误可能是由于flex和bison版本引起的。 请先检查两个库是否安装,以及安装的版本。 建议两个版本不能距离太远。 +> 例如像 flex-2.6.1 匹配 bison-3.5.3就合适。 +> 举例: +> centos7.6,默认安装的 flex-2.5.37 bison-3.0.5 是可以编译的; +> openEuler20.03 默认安装的 flex-2.6.1 bison-3.5 是可以编译的; +> ubuntu18.04下,默认安装的 flex-2.6.4 bison-3.0.5,则编译会遇到这个问题。 +> ubuntu18.04下,手动安装 flex-2.6.1 bison-3.5.3,就可以编译成功。 +> Euler2.9 下,flex 2.6.4 bison3.6.4 是有问题的。 bison使用3.5的版本应该ok。 + +**问题3** 找不到jni.h, jni_conn_cursor.cpp:26:10: fatal error: jni.h: No such file or directory + +该问题比较少见, 一般是由于操作系统环境的其他环境变量影响, 导致编译程序找不到三方库目录中的jni.h文件导致 +一种解决方案是手动把三方库目录中的jni.h文件所在的目录添加到LD_LIBRARY_PATH中, 比如加入$BINARYLIBS/platform/centos7.6_x86_64/openjdk8/jdk1.8.0_222/include目录: +```shell +export LD_LIBRARY_PATH=$GAUSSHOME/lib:$GCC_PATH/gcc/lib64:$GCC_PATH/isl/lib:$GCC_PATH/mpc/lib:$GCC_PATH/mpfr/lib:$GCC_PATH/gmp/lib:$BINARYLIBS/platform/centos7.6_x86_64/openjdk8/jdk1.8.0_222/include:$LD_LIBRARY_PATH +``` + +**问题4** gcc版本不对 + +该问题一般是由于环境变量配置不对导致, 编译安装时需要使用的是三方库目录中带的gcc 7.3版本, 而不是操作系统中的版本 +所以在编译时有环境变量GCC_PATH的路径配置 +```shell +export GCC_PATH=$BINARYLIBS/buildtools/centos7.6_x86_64/gcc7.3/ +``` diff --git a/content/zh/post/chendong/HowToRunSimpleinstall.md b/content/zh/post/chendong/HowToRunSimpleinstall.md new file mode 100644 index 0000000000000000000000000000000000000000..63e2f21a72350d7ad5a3c5fdff01434fbacd0eb5 --- /dev/null +++ b/content/zh/post/chendong/HowToRunSimpleinstall.md @@ -0,0 +1,177 @@ ++++ + +title = "openGauss简化安装脚本" + +date = "2020-12-16" + +tags = ["openGauss简化安装脚本"] + +archives = "2020-12" + +author = "chendong" + +summary = "openGauss简化安装脚本" + +times = "15:30" + ++++ + +## 1 概述 + +本文主要介绍采用openGauss简化安装脚本(以下简称安装脚本),一键式安装openGauss数据库所必须的系统环境及安装步骤。 + +#### 注意事项 + +简化安装脚本是指存在于[OM仓库](https://gitee.com/opengauss/openGauss-OM)内的安装脚本(代码路径`openGauss-OM/simpleInstall/install.sh`)属于企业版特性,仅在企业版安装包中包含。极简版中包含的同名文件(代码路径`openGauss-server/simpleInstall/install.sh`)是极简安装脚本,与本文介绍内容无关,用法也不相同。 + +## 2 安装环境要求 + +### 2.1 openGauss环境要求 + +安装openGauss的具体环境要求,请参考《openGauss安装指南》中的“2.3.1节软硬件环境要求”章节。 + +### 2.2 安装脚本环境要求 + +#### 硬件环境要求 + +安装脚本对安装环境的操作系统及对应处理器架构进行了限制,目前支持的环境如表1所示。 + +**表1** 硬件环境要求 + +| 操作系统 | 处理器架构 | +| --------- | ---------- | +| openEuler | aarch64 | +| openEuler | x86_64 | +| CentOS | x86_64 | +| Ubuntu | x86_64 | + +#### 软件依赖要求 + +安装脚本依赖于其它软件的支持,如表2所示。 + +**表2** 软件依赖要求 + +| 所需软件 | 建议版本 | +| --------- | -------- | +| firewalld | - | +| python | 3 | + +## 3 安装openGauss + +### 3.1 安装前准备 + +#### 导入安装脚本 + +安装脚本包含了多个文件,其用途如表3所示。导入安装脚本时,建议直接导入tar包至安装环境中,随后在安装环境中进行解压,否则可能出现window与unix风格不兼容的问题。若出现此类问题,可以使用dos2unix命令对安装脚本进行格式转换。 + +**表3** 安装脚本清单 + +| 文件名称 | 用途 | +| ------------ | ---------------------- | +| install.sh | 简化安装主程序 | +| common.sh | 公共命令 | +| README.md | 参考文档 | +| template.xml | xml模板 | +| finance.sql | 金融数据模型展示数据库 | +| school.sql | 学校数据模型展示数据库 | + +#### 导入openGauss数据库安装包 + +安装脚本支持以下两种方式导入openGauss数据库安装包: + +- 手动导入 + +在[openGauss官网](https://opengauss.org/zh/download.html)下载对应版本安装包拷贝至安装环境中,存放路径为安装脚本的上层目录。 + +- 自动导入 + +配置安装环境外网访问,并确保安装脚本上层路径不存在openGauss数据库安装包。当运行安装脚本时,会自动下载对应版本安装包进行安装。 + +### 3.2 执行安装 + +使用如下命令执行安装脚本。 + +```shell +sh install.sh -U user_name -G user_group -h host_ip -p port [-D install_path] +``` + +#### 参数说明 + +- user_name为openGauss数据库的安装用户。 +- user_group为openGauss数据库安装用户所属用户组。 +- host_ip为主机在后端存储网络中的IP地址(内网IP)。 +- host_port为数据库节点的基础端口号。 +- install_path为openGauss数据库安装路径,该参数为可选参数。 + +以上参数的详细信息,请参考《openGauss安装指南》中的3.1节创建XML配置文件。 + +#### 注意事项 + +- 不指定install_path参数时,数据库默认安装在/opt/user_name路径下。 + +- 无论采用何种方式导入openGauss安装包,安装脚本都会在/home/user_name/openGaussTar路径下建立本地安装包文件。该路径支持修改,请在install.sh中修改install_location参数,但是需要与安装路径install_path不同。 +- 安装脚本必须在root下执行,且同一时刻只有1个安装脚本正在运行。 + +## 4 导入展示数据库 + +### 4.1 学校数据模型 + +假设A市B学校为了加强对学校的管理,引入了openGauss数据库。在B学校里,主要涉及的对象有学生、教师、班级、院系和课程。本实验假设在B学校数据库中,教师会教授课程,学生会选修课程,院系会聘请教师,班级会组成院系,学生会组成班级。因此,根据此关系,本文给出了相应的关系模式如下。在运行安装脚本时,会根据用户选择安装该展示模型。 + +#### 关系模式 + +对于B校中的5个对象,分别建立属于每个对象的属性集合,具体属性描述如下: + +- 学生(学号,姓名,性别,出生日期,入学日期,家庭住址) +- 教师(教师编号,教师姓名,职称,性别,年龄,入职日期) +- 班级(班级编号,班级名称,班主任) +- 院系(系编号,系名称,系主任) +- 课程(课程编号,课程名称,课程类型,学分) + +上述属性对应的编号为: + +- student(std_id,std_name,std_sex,std_birth,std_in,std_address) +- teacher(tec_id,tec_name,tec_job,tec_sex,tec_age,tec_in) +- class(cla_id,cla_name,cla_teacher) +- school_department(depart_id,depart_name,depart_teacher) +- course(cor_id,cor_name,cor_type,credit) + +对象之间的关系: + +- 一位学生可以选择多门课程,一门课程可被多名学生选择 +- 一位老师可以选择多门课程,一门课程可被多名老师教授 +- 一个院系可由多个班级组成 +- 一个院系可聘请多名老师 +- 一个班级可由多名学生组成 + +### 4.2 金融数据模型 + +假设A市C银行为了方便对银行数据的管理和操作,引入了openGauss数据库。针对C银行的业务,本实验主要将对象分为客户、银行卡、理财产品、保险、基金和资产。因此,针对这些数据库对象,本实验假设C银行的金融数据库存在着以下关系:客户可以办理银行卡,同时客户可以购买不用的银行产品,如资产,理财产品,基金和保险。那么,根据C银行的对象关系,本文给出了相应的关系模式如下。在运行安装脚本时,会根据用户选择安装该展示模型。 + +#### 关系模式 + +对于C银行中的6个对象,分别建立属于每个对象的属性集合,具体属性描述如下: + +- 客户(客户编号、客户名称、客户邮箱,客户身份证,客户手机号,客户登录密码) +- 银行卡(银行卡号,银行卡类型,所属客户编号) +- 理财产品(产品名称,产品编号,产品描述,购买金额,理财年限) +- 保险(保险名称,保险编号,保险金额,适用人群,保险年限,保障项目) +- 基金(基金名称,基金编号,基金类型,基金金额,风险等级,基金管理者) +- 资产(客户编号,商品编号,商品状态,商品数量,商品收益,购买时间) + +上述属性对应的编号为: + +- client(c_id,c_name,c_mail,c_id_card,c_phone,c_password) +- bank_card(b_number,b_type,b_c_id) +- finances_product(p_name,p_id,p_description,p_amount,p_year) +- insurance(i_name,i_id,i_amount,i_person,i_year,i_project) +- fund(f_name,f_id,f_type,f_amount,risk_level,f_manager) +- property(pro_c_id,pro_id,pro_status,pro_quantity,pro_income,pro_purchase_time) + +对象之间的关系: + +- 一个客户可以办理多张银行卡 +- 一个客户可有多笔资产 +- 一个客户可以购买多个理财产品,同一类理财产品可由多个客户购买 +- 一个客户可以购买多个基金,同一类基金可由多个客户购买 +- 一个客户可以购买多个保险,同一类保险可由多个客户购买 \ No newline at end of file diff --git a/content/zh/post/chenguang/figures/zh-cn_image_0000001092009378.jpg b/content/zh/post/chenguang/figures/zh-cn_image_0000001092009378.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb177d8ae285a06ea9ce751bf002c5efbb02ca21 Binary files /dev/null and b/content/zh/post/chenguang/figures/zh-cn_image_0000001092009378.jpg differ diff --git a/content/zh/post/chenguang/figures/zh-cn_image_0000001092009382.jpg b/content/zh/post/chenguang/figures/zh-cn_image_0000001092009382.jpg new file mode 100644 index 0000000000000000000000000000000000000000..805ba6a67fd2bae00e807af73a995820a5032baa Binary files /dev/null and b/content/zh/post/chenguang/figures/zh-cn_image_0000001092009382.jpg differ diff --git a/content/zh/post/chenguang/figures/zh-cn_image_0000001092137856.jpg b/content/zh/post/chenguang/figures/zh-cn_image_0000001092137856.jpg new file mode 100644 index 0000000000000000000000000000000000000000..42eae96edd1415c1126fe14f6bb8741dd9cc266f Binary files /dev/null and b/content/zh/post/chenguang/figures/zh-cn_image_0000001092137856.jpg differ diff --git a/content/zh/post/chenguang/figures/zh-cn_image_0000001092137858.jpg b/content/zh/post/chenguang/figures/zh-cn_image_0000001092137858.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f2b916e12bfb6c67342148bfb6b10b246eb1f12f Binary files /dev/null and b/content/zh/post/chenguang/figures/zh-cn_image_0000001092137858.jpg differ diff --git a/content/zh/post/chenguang/figures/zh-cn_image_0000001092457462.jpg b/content/zh/post/chenguang/figures/zh-cn_image_0000001092457462.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b48f48d6d29c01988e469d0c95dbc6209112c5f Binary files /dev/null and b/content/zh/post/chenguang/figures/zh-cn_image_0000001092457462.jpg differ diff --git a/content/zh/post/chenguang/figures/zh-cn_image_0000001092457470.jpg b/content/zh/post/chenguang/figures/zh-cn_image_0000001092457470.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b46e20071a4cc4d8274024bf6c88a1a2bb66697 Binary files /dev/null and b/content/zh/post/chenguang/figures/zh-cn_image_0000001092457470.jpg differ diff --git a/content/zh/post/chenguang/figures/zh-cn_image_0000001092457472.jpg b/content/zh/post/chenguang/figures/zh-cn_image_0000001092457472.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4e4964dc3823d177343c5769af3cbadeaef94b47 Binary files /dev/null and b/content/zh/post/chenguang/figures/zh-cn_image_0000001092457472.jpg differ diff --git a/content/zh/post/chenguang/figures/zh-cn_image_0000001138679671.jpg b/content/zh/post/chenguang/figures/zh-cn_image_0000001138679671.jpg new file mode 100644 index 0000000000000000000000000000000000000000..16c152788586d65081e04c07925fbc7b4621d52f Binary files /dev/null and b/content/zh/post/chenguang/figures/zh-cn_image_0000001138679671.jpg differ diff --git a/content/zh/post/chenguang/figures/zh-cn_image_0000001138775125.jpg b/content/zh/post/chenguang/figures/zh-cn_image_0000001138775125.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7a51a44f816125a9b00dc72fbfea286bbde299dd Binary files /dev/null and b/content/zh/post/chenguang/figures/zh-cn_image_0000001138775125.jpg differ diff --git a/content/zh/post/chenguang/figures/zh-cn_image_0000001138775127.jpg b/content/zh/post/chenguang/figures/zh-cn_image_0000001138775127.jpg new file mode 100644 index 0000000000000000000000000000000000000000..39d4c456b87922ed21f880ba6bf25254f9928c74 Binary files /dev/null and b/content/zh/post/chenguang/figures/zh-cn_image_0000001138775127.jpg differ diff --git "a/content/zh/post/chenguang/openGauss\345\212\250\346\200\201\346\225\260\346\215\256\350\204\261\346\225\217.md" "b/content/zh/post/chenguang/openGauss\345\212\250\346\200\201\346\225\260\346\215\256\350\204\261\346\225\217.md" new file mode 100644 index 0000000000000000000000000000000000000000..ae39ce7a6416b5b2159cf12c05ff36d9df649476 --- /dev/null +++ "b/content/zh/post/chenguang/openGauss\345\212\250\346\200\201\346\225\260\346\215\256\350\204\261\346\225\217.md" @@ -0,0 +1,148 @@ ++++ + +title = "openGauss动态数据脱敏" + +date = "2021-03-24" + +tags = ["openGauss核心技术"] + +archives = "2021-03" + +author = "晨光" + +summary = "openGauss动态数据脱敏" + +img = "/zh/post/chenguang/title/img21.png" + +times = "16:30" + ++++ + +# openGauss动态数据脱敏 + +## 数据脱敏背景及介绍 + +**数据脱敏背景** + +伴随着互联网大数据、云服务等新兴技术的飞速发展和普及,业务上云、数据集中存储、数据共享等数据应用方式已成为未来发展的趋势,云数据库和云计算服务具有易部署、低成本、高效率、高可靠的优势,使得越来越多的消费者更倾向于将个人数据存储在云上而非个人移动硬盘中。实际上,由于数据使用场景愈加复杂,个人隐私数据泄露、被窃取的风险越来越高,仅近年来就发生数起重大数据库信息泄露事件,这对数据库安全提出了更高的要求。 + +数据脱敏,顾名思义就是将敏感数据通过变形、屏蔽等方式处理,其目的是保护隐私数据信息,防止数据泄露和恶意窥探。当企业或者机构收集用户个人身份数据、手机、银行卡号等敏感信息,然后将数据通过导出(非生产环境)或直接查询(结合生产环境)的方式投入使用时,按照隐私保护相关法律法规需将数据进行“脱敏”处理。 + +**数据脱敏介绍** + +数据脱敏主要分为静态脱敏和动态脱敏,静态数据脱敏(Static Data Masking)采用“先脱敏-后分发”的方式,一般是将生产环境数据拷贝到测试或开发库中,导出后的数据已经改变了原始数据的内容,使得脱敏后的数据成为了测试开发源数据。而动态数据脱敏(Dynamic Data Masking)是与生产环境紧密关联的,访问敏感数据时实时地进行脱敏,主要用于直接访问生产数据的场景,在屏蔽敏感信息的同时也保证了源数据的一致性和有效性。 + +![](../figures/zh-cn_image_0000001092137856.jpg) + +动态数据脱敏和静态数据脱敏适用于不同的场景,两者之间没有优劣之分,主要是以使用场景来选择合适的脱敏模式。openGauss最新版本已正式对外支持动态数据脱敏特性,下面的章节将围绕openGauss动态数据脱敏机制进行阐述。 + +目前主流的动态数据脱敏技术路线分为“结果集解析”和“语句改写”两条路径: + +- 结果集解析:不改写发给数据库的语句,需要提前获悉数据表结构,待数据库返回结果后再根据表结构判断集合内哪些数据需要脱敏,并逐条改写结果数据。 +- 语句改写:将包含敏感字段查询的语句改写,对于查询中涉及的敏感字段(表列)通过外层嵌套函数的方式改写,使得数据库运行查询语句时返回不包含敏感数据的结果集。 + +从性能上来说,结果集解析方法需要在数据库返回结果集后再逐行字段解析、规则匹配、数据脱敏,需要逐个修改结果集中每一行数据,因此脱敏耗时与结果集容量线性相关,整体性能损耗较大;而语句改写通过将较为简短的查询语句进行解析并重写的方式,对语句中的敏感列外嵌了一层脱敏函数,数据库执行命令时将自动执行脱敏函数实现数据脱敏,返回的结果集即为脱敏后的数据。该方式仅仅改写一条查询语句而不涉及结果集的解析,因此能够极大地降低性能损耗,openGauss便是采用了语句改写的方式,10万条敏感数据脱敏的性能损耗低于5%。 + +另外,对于比较复杂的命令,查询字段一般包含大量同名字段、表别名、嵌套查询等,基于结果集解析首先需要将结果集与真实查询列进行一一对应才可判断出该字段是否需要脱敏,查询越复杂识别难度越高,匹配的准确率就越低,而基于语句的改写可精确的对复杂查询涉及的字段嵌套脱敏函数。 + +综上分析,基于语句改写方法的数据脱敏无论对于性能还是准确性来说都是较为优秀的脱敏方案,openGauss基于语句改写思想,在查询解析获取查询树后,根据用户定义的脱敏策略识别查询树目标结点\(Node\),并对待脱敏结点进行改写构造“脱敏查询树”,再交由数据库内核执行最终返回脱敏后数据。 + +## openGauss 动态数据脱敏解决方案 + +动态数据脱敏功能在工业界通常以中间插件或数据脱敏系统\[T7\] \[l\(8\] (Data Masking System)的形态加载,通过在客户端与服务端数据库之间拦截命令或结果集来实现脱敏,而openGauss内置动态数据脱敏特性,使数据库无需借助外部插件就可以实现数据脱敏,有效地降低数据中间传输而导致敏感数据泄漏的风险。 + +openGauss从1.1.0版本定义了一套完整的内置安全策略模型,基于该模型用户可以定义资源标签来标识敏感数据,针对不同的资源标签类别和内容可定义相关的安全策略机制,而动态数据脱敏就是其中一种。 + +**内置安全策略** + +内置安全策略(Security Policy)模型,是指通过配置一系列安全策略来对用户行为进行识别和保护,提供了包括保护用户敏感数据的能力。 + +资源标签(Resource Label)是Security Policy的基础,它的本质是一系列数据库资源集合。为了能够统一管理数据库资源,数据管理者可以将多个数据库资源添加到同一个资源标签下,通过对资源标签配置策略来实现批量地对数据库资源进行管理的能力。 + +例如,多张数据表中均包含银行卡号“creditcard”这种敏感信息列,那么可以将这些列统一地划分到资源标签“creditcard\_label”中,随后管理员便可以通过对“creditcard\_label”配置脱敏策略以实现对所有相关敏感列的批量配置。 + +动态数据脱敏策略(Dynamic Data Masking)是Security Policy模型支持的一类安全策略,数据控制者对用户表中的敏感数据识别后\(敏感数据发现和识别不在该特性范围内\),对包含敏感列的资源标签配置数据脱敏策略,并依据不同的应用场景来限制用户对数据的访问行为和信息提取行为,以达到对敏感信息保护的能力。 + +总的来说,资源标签是用来归类数据库资源,并将这些资源统一地投入到各种安全策略中去管理。动态数据脱敏特性便是利用资源标签去识别敏感数据,然后匹配脱敏策略,实现对敏感数据的屏蔽。 + +**动态数据脱敏核心思路** + +openGauss中的动态数据脱敏是以内置安全插件(security plugin)的方式与数据库部署在一起的,业务方面无需额外适配就可使用,SQL的解析与脱敏策略匹配交由openGauss安全策略模块负责,业务在配置脱敏策略后即可生效。 + +**配置脱敏策略** + +脱敏策略的配置主要包括三个方面——脱敏方式(Masking Function)、脱敏对象(Resource Label)、用户过滤器(Masking Filter)。 + +- 脱敏方式,是指该脱敏策略使用何种方式对目标字段进行脱敏,目前openGauss预置了种脱\[z9\] \[l\(10\] 敏方式:creditcardmasking、basicemailmasking、fullemailmasking、alldigitsmasking、shufflemasking、randommasking、maskall。分别适用于不同的脱敏场景。 +- 脱敏对象,是指脱敏策略生效时作用的对象集合(LABEL),若查询目标字段存在于LABEL中,则该字段将会根据脱敏策略进行敏感数据脱敏,需要注意的是,openGauss动态数据脱敏特性支持对仅包含数据列的LABEL进行脱敏。 +- 用户过滤器,指出脱敏策略在何种用户场景下生效,主要涉及USER(用户名)\[z11\] \[l\(12\] 、APP(用户登录客户端名称)、IP(用户所处的ip)。当查询用户同时满足Masking Filter所指定的阈值时,数据脱敏策略才会生效。 + +以下案例演示了一个数据动态脱敏策略创建的基本过程。 + +1.数据准备。 + +确认内置安全策略总开关是否开启。 + +![](../figures/zh-cn_image_0000001092137858.jpg) + +准备两张包含敏感字段(creditcard、customername)的表。 + +![](../figures/zh-cn_image_0000001138775125.jpg) + +2.策略配置**。** + +策略管理员(拥有poladmin权限)登录数据库,将两张数据表的敏感字段分别添加到资源标签“creditcard\_label”、“customer\_label”中去管理。 + +![](../figures/zh-cn_image_0000001138775127.jpg) + +策略管理员创建两个脱敏策略,其作用如下: + +- 脱敏策略mask\_card\_pol:只有当用户“user1”在‘10.11.12.13’ip上使用gsql访问表时,标签creditcard\_label中的列将按照‘creditcardmasking’方式脱敏。 + +- 脱敏策略mask\_name\_pol:默认对于所有查询用户,标签customer\_label中的列将按照‘MASKALL’的方式脱敏。 + + +![](../figures/zh-cn_image_0000001138679671.jpg) + +**触发脱敏策略** + +当系统接收到查询命令时,security\_plugin将在解析器中拦截语义分析生成的查询树(Query),首先根据用户登录信息(用户名、客户端、IP)筛选出满足用户场景的脱敏策略。由于脱敏策略是基于(仅包含表列的)资源标签配置的,因此需要判断查询树的目标节点是否属于某个资源标签,然后将识别到的资源标签与脱敏策略相匹配,根据策略内容将查询树目标节点改写,最终将查询树返还给解析器。 + +security\_plugin模块由于内置查询树脱敏方式,数据访问者不会感知内置安全策略重写查询树的过程,如同执行普通查询一样去访问数据,同时保护数据隐私。 + +![](../figures/zh-cn_image_0000001092457462.jpg) + +基于配置脱敏策略小节举出的案例,我们可以通过查询数据表来触发脱敏策略。 + +**触发脱敏策略。** + +用户user1在满足mask\_card\_pol策略的情况下使用gsql登录数据查询敏感数据,系统将返回脱敏后的数据结果。而用户user2不满足该条策略,因此该用户查询的数据未做脱敏处理。 + +![](../figures/zh-cn_image_0000001092009378.jpg) + +而无论对于user1还是user2用户,他们查询order表时都会触发脱敏策略mask\_name\_pol,因此customername字段将会被脱敏处理。 + +![](../figures/zh-cn_image_0000001092009382.jpg) + +## openGauss动态数据脱敏优势 + +openGauss动态数据脱敏关注访问用户身份识别问题,用户过滤器\[z13\] \[l\(14\] (Masking Filter)的配置细化到指定用户、客户端工具、登录IP,策略管理员可以灵活地根据不同业务、不同用户场景来制定不同的脱敏策略,赋予他们不同级别的敏感数据访问能力以适用于各种复杂的生产环境。例如金融、医疗行业中前台服务人员只允许看到身份证、银行卡号部分信息,而运维管理员则可以查询并维护所有用户信息。在保证脱敏场景多样性的前提下,指定用户过滤器时系统会进行严格的互斥校验,避免同一用户同时满足多个用户过滤器而出现策略选择的二义性。 + +![](../figures/zh-cn_image_0000001092457470.jpg) + +其次,openGauss动态数据脱敏更加注重数据库资源\[T15\] \[l\(16\] 的批量管理,在安全策略模型中,将需要管控的数据库资源集中归类划分成标签,对标签的操作实际上就是对指定一簇资源的操作,极大的简化管理流程,提高管理效率。在其他数据库上的动态数据脱敏特性一般是基于单个列或单张表的,脱敏策略与数据库资源是一一对应的,即使想要采用相同的脱敏方式,不同的数据库资源也要配置多个对应的脱敏策略,这无疑是提高了策略配置成本,增加了后期运维、批量资源策略管理的难度。因此将需要批量管理的数据库资源划分到资源标签中是openGauss动态数据脱敏的基础,也是其优势之一。 + +![](../figures/zh-cn_image_0000001092457472.jpg) + +动态数据脱敏内置在openGauss内核中,能够在一定程度上保证数据传输路径上的安全性,而外部插件存在绕过风险:当外部插件将客户端发送的SQL或服务端返回的结果集改写后,攻击者依然可以绕过插件直接向数据库发送SQL或截取数据库返回的源数据结果集,导致脱敏插件失效。因此相较于外部插件脱敏的方式,openGauss动态数据脱敏在一定程度上也能降低传输路径上的敏感数据泄漏风险。 + +动态数据脱敏其目的是屏蔽结果集中的敏感数据,动态数据脱敏是与生产环境相结合,由业务方提供查询接口,再由接口触发数据脱敏,openGauss为了尽可能的保证敏感数据的安全性,在绝大多数场景包括带有RETURNING的增删改、Merge into、CTE、Subquery等场景也适配了脱敏策略,这样能够丰富业务方对敏感数据的操作接口而不单单只能提供数据查询接口。 + +openGauss为了提高易用性,提供了一套简洁的策略配置语法,涵盖了资源标签以及脱敏策略的增删改,用户使用定义语法可以轻松地配置脱敏策略,简化了管理员操作流程。 + +## openGauss 动态数据脱敏的展望 + +openGauss动态数据脱敏特性提供了相对简洁、灵活的策略配置方案,在一定程度上保用户隐私数据不被泄漏,是openGauss多层级安全防御架构中不可或缺的一环。 + +未来openGauss动态数据脱敏特性将开放更加灵活的策略配置手段包括UDF(User Defined Function)Masking以及条件脱敏(Conditional Masking)等,以期支持更加灵活、丰富的隐私保护场景。 + diff --git a/content/zh/post/chenguang/title/img21.png b/content/zh/post/chenguang/title/img21.png new file mode 100644 index 0000000000000000000000000000000000000000..1da9e55bd25cbc7cfc6fdef1800b4c95b077829b Binary files /dev/null and b/content/zh/post/chenguang/title/img21.png differ diff --git a/content/zh/post/chenxiaobin/images/extension_directory.png b/content/zh/post/chenxiaobin/images/extension_directory.png new file mode 100644 index 0000000000000000000000000000000000000000..ba5142db5e4378fc9f6b2a616e0147d8b75b5405 Binary files /dev/null and b/content/zh/post/chenxiaobin/images/extension_directory.png differ diff --git a/content/zh/post/chenxiaobin/images/usage.png b/content/zh/post/chenxiaobin/images/usage.png new file mode 100644 index 0000000000000000000000000000000000000000..a2689e4bfbf713c6d6a62c180d79b93c781cd012 Binary files /dev/null and b/content/zh/post/chenxiaobin/images/usage.png differ diff --git a/content/zh/post/chenxiaobin/title/img.png b/content/zh/post/chenxiaobin/title/img.png new file mode 100644 index 0000000000000000000000000000000000000000..86a420b92fb8289658d807d49f137b6d13862f6d Binary files /dev/null and b/content/zh/post/chenxiaobin/title/img.png differ diff --git "a/content/zh/post/chenxiaobin/\345\246\202\344\275\225\346\217\222\344\273\266\345\214\226\345\234\260\344\270\272openGauss\346\267\273\345\212\240\347\256\227\345\255\220.md" "b/content/zh/post/chenxiaobin/\345\246\202\344\275\225\346\217\222\344\273\266\345\214\226\345\234\260\344\270\272openGauss\346\267\273\345\212\240\347\256\227\345\255\220.md" new file mode 100644 index 0000000000000000000000000000000000000000..3cebc6b4601e4776e8db0b1cb38b347b0ccf9f33 --- /dev/null +++ "b/content/zh/post/chenxiaobin/\345\246\202\344\275\225\346\217\222\344\273\266\345\214\226\345\234\260\344\270\272openGauss\346\267\273\345\212\240\347\256\227\345\255\220.md" @@ -0,0 +1,537 @@ ++++ +title = "如何插件化地为openGauss添加算子" +date = "2021-08-17" +tags = ["openGauss插件化架构"] +archives = "2021-08" +author = "chenxiaobin" +summary = "如何插件化地为openGauss添加算子" +img = "/zh/post/chenxiaobin/title/img.png" +times = "16:30" ++++ + +# 1. openGauss算子概述 + +## 1.1 openGauss执行算子汇总 + +openGauss的算子按类型可分为四类:控制算子、连接算子、扫描算子和物化算子。下面汇总了当前(openGauss2.0.0)已有的算子。 + +| 算子 | 文件 | 类型 | +| --------------- | ----------------------- | ------------ | +| Agg | nodeAgg.cpp | 物化算子 | +| Append | nodeAppend.cpp | 控制算子 | +| BitmapAnd | nodeBitmapAnd.cpp | 控制算子 | +| BitmapHeapscan | nodeBitmapHeapscan.cpp | 扫描算子 | +| BitmapIndexscan | nodeBitmapIndexscan.cpp | 扫描算子 | +| BitmapOr | nodeBitmapOr.cpp | 控制算子 | +| Ctescan | nodeCtescan.cpp | 扫描算子 | +| Foreignscan | nodeForeignscan.cpp | 扫描算子 | +| Functionscan | nodeFunctionscan.cpp | 扫描算子 | +| Group | nodeGroup.cpp | 物化算子 | +| Hash | nodeHash.cpp | 物化算子 | +| Hashjoin | nodeHashjoin.cpp | 连接算子 | +| Indexonlyscan | nodeIndexonlyscan.cpp | 扫描算子 | +| Indexscan | nodeIndexscan.cpp | 扫描算子 | +| Limit | nodeLimit.cpp | 物化算子 | +| LockRows | nodeLockRows.cpp | 控制算子 | +| Material | nodeMaterial.cpp | 物化算子 | +| MergeAppend | nodeMergeAppend.cpp | 控制算子 | +| Mergejoin | nodeMergejoin.cpp | 连接算子 | +| ModifyTable | nodeModifyTable.cpp | 控制算子 | +| Nestloop | nodeNestloop.cpp | 连接算子 | +| PartIterator | nodePartIterator.cpp | 连接算子 | +| Recursiveunion | nodeRecursiveunion.cpp | 控制算子 | +| Result | nodeResult.cpp | 控制算子 | +| Samplescan | nodeSamplescan.cpp | 扫描算子 | +| Seqscan | nodeSeqscan.cpp | 扫描算子 | +| SetOp | nodeSetOp.cpp | 物化算子 | +| Sort | nodeSort.cpp | 物化算子 | +| Stub | nodeStub.cpp | 控制算子 | +| Subplan | nodeSubplan.cpp | 控制算子 | +| Subqueryscan | nodeSubqueryscan.cpp | 扫描算子 | +| Tidscan | nodeTidscan.cpp | 扫描算子 | +| Unique | nodeUnique.cpp | 物化算子 | +| Valuesscan | nodeValuesscan.cpp | 扫描算子 | +| WindowAgg | nodeWindowAgg.cpp | 物化算子 | +| Worktablescan | nodeWorktablescan.cpp | 扫描算子 | +| Extensible | nodeExtensible.cpp | 用于扩展算子 | + +## 1.2 PG新增算子汇总 + +下面列出PG(14devel)相比于openGauss多了哪些算子。 + +| 算子 | 文件 | 类型 | +| ------------------- | ------------------------- | ---- | +| Custom | nodeCustom.c | | +| Gather | nodeGather.c | | +| GatherMerge | nodeGatherMerge.c | | +| IncrementalSort | nodeIncrementalSort.c | | +| Namedtuplestorescan | nodeNamedtuplestorescan.c | | +| ProjectSet | nodeProjectSet.c | | +| TableFuncscan | nodeTableFuncscan.c | | +| Tidrangescan | nodeTidrangescan.c | | + +# 2. 算子插件(TidRangeScan) + +1.1表格中的算子Extensible类似于PG的算子Custom,其作用是允许插件向数据库增加新的扫描类型。主要分为三步: + +首先,在路径规划期间生成插件增加的扫描路径(ExtensiblePath); + +然后,如果优化器选择该路径作为最优路径,那么需要生成对应的计划(ExtensiblePlan); + +最后,必须提供执行该计划的能力(ExtensiblePlanState)。 + +下面以TidRangeScan为示例,演示如何使用Extensible通过插件化的方式为openGauss新增一个执行算子。 + +## 2.1 功能介绍 + +openGauss中堆表由一个个page组成,每个page包含若干个tuple。tid是tuple的寻址地址,由两个字段组成:(pageid,itemid),pageid代表第几个数据块,itemid代表这个page内的第几条记录。例如tid=(10,1)表示第11个数据块中的第一条记录(pageid从0开始,itemid从1开始)。 + +PostgreSQL 14 devel新增了算子TidRangeScan,可以直接通过tid来范围访问某个page的全部数据。(带来的好处:如果需要更新一张表所有数据时,可以开启多个会话并行去更新不同的page,提高效率。) + +本次展示将该特性通过插件的方式移植到openGauss,插件化的增加一个执行算子。 + +## 2.2 使用说明 + +tidrangescan插件定义了一个bool类型的guc参数:enable_tidrangescan,控制优化器对tidrangescan扫描算子的使用,on表示使用,off表示不使用。 + +![img](../images/usage.png) + +## 2.3 插件边界 + +本小节主要列举调用了哪些内核接口,当内核演进过程中修改了这些接口,有可能会影响插件的使用。 + +| 接口名 | 文件 | 模块 | +| ----------------------------- | ---------------- | ------ | +| ExecInitExpr | execQual.cpp | 优化层 | +| clauselist_selectivity | clausesel.cpp | 优化层 | +| cost_qual_eval | costsize.cpp | 优化层 | +| get_tablespace_page_costs | spccache.cpp | 优化层 | +| get_baserel_parampathinfo | relnode.cpp | 优化层 | +| add_path | pathnode.cpp | 优化层 | +| extract_actual_clauses | restrictinfo.cpp | 优化层 | +| heap_getnext | heapam.cpp | 执行层 | +| ExecClearTuple | execTuples.cpp | 执行层 | +| ExecStoreTuple | execTuples.cpp | 执行层 | +| ExecScanReScan | execScan.cpp | 执行层 | +| heap_beginscan | heapam.cpp | 执行层 | +| heap_rescan | heapam.cpp | 执行层 | +| ExecScan | execScan.cpp | 执行层 | +| heap_endscan | heapam.cpp | 执行层 | +| make_ands_explicit | clauses.cpp | 执行层 | +| deparse_context_for_planstate | ruleutils.cpp | 执行层 | +| deparse_expression | ruleutils.cpp | 执行层 | +| ExplainPropertyText | explain.cpp | 执行层 | + +## 2.4 设计实现 + +本节提到的hook在第3章《hook点总述》会做详细说明。 + +附社区PR:https://gitee.com/opengauss/Plugin/pulls/1 + +### 2.4.1 插件开发通用流程 + +#### 2.4.1.1 Makefile + +在openGauss源码的contrib目录下新建开发插件的目录,这里为tidrangescan。在该目录下新建Makefile文件。 + +```makefile +# contrib/tidrangescan/Makefile +MODULES = tidrangescan # 模块名 +EXTENSION = tidrangescan # 扩展的名称 +REGRESS = tidrangescan # 回归测试 +REGRESS_OPTS = --dlpath=$(top_builddir)/src/test/regress -c 0 -d 1 --single_node # 回归测试相关的选项 +DATA = tidrangescan--1.0.sql # 插件安装的SQL文件 + +override CPPFLAGS :=$(filter-out -fPIE, $(CPPFLAGS)) –fPIC # fPIC选项 + +# 以下是openGauss构建插件相关的命令,保留即可 +ifdef USE_PGXS +PG_CONFIG = pg_config +PGXS := $(shell $(PG_CONFIG) --pgxs) +include $(PGXS) +else +subdir = contrib/tidrangescan +top_builddir = ../.. +include $(top_builddir)/src/Makefile.global +include $(top_srcdir)/contrib/contrib-global.mk +endif +``` + +#### 2.4.1.2 control文件 + +新建控制文件,这里为tidrangescan.control。内容如下: + +``` +# tidrangescan extension +comment = 'example implementation for custom-scan-provider interface' default_version = '1.0' # 与Makefile里DATA属性的sql文件名的版本保持一致 module_pathname = '$libdir/tidrangescan' +relocatable = true +``` + +#### 2.4.1.3 sql文件 + +sql文件命名格式为*extensionName*--*version*.sql,*version*即为上述版本号,这里为`tidrangescan--1.0.sql`。在这里编写所需的函数。 + +```sql +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION tidrangescan" to load this file. \quit + +CREATE FUNCTION pg_catalog.tidrangescan_invoke() RETURNS VOID AS '$libdir/tidrangescan','tidrangescan_invoke' LANGUAGE C STRICT; +``` + +#### 2.4.1.4 回归测试用例 + +创建sql和expected目录,分别存放测试用例的sql脚本和预期输出,例如这里为tidrangescan.sql和tidrangescan.out。 + +#### 2.4.1.5 源文件及插件目录总览 + +创建插件的头文件和cpp文件,这是实现插件的核心,下文主要介绍该插件代码层的设计与实现。 + +至此插件总体目录概览如下。 + +![img](../images/extension_directory.png) + +### 2.4.2 优化器 + +#### 2.4.2.1 添加路径 + +将set_rel_pathlist_hook赋值为SetTidRangeScanPath,该函数解析扫描表的查询条件,当存在tid范围查询时调用add_path添加ExtensiblePath,计算代价,并将创建计划的接口tidrangescan_path_methods存入path中。 + +```cpp +static void SetTidRangeScanPath(PlannerInfo *root, RelOptInfo *baserel, Index rtindex, RangeTblEntry *rte) +{ + ... + tidrangequals = TidRangeQualFromRestrictInfoList(baserel->baserestrictinfo, baserel); + ... + if (tidrangequals != NIL) { + cpath = (ExtensiblePath*)palloc0(sizeof(ExtensiblePath)); + cpath->path.type = T_ExtensiblePath; + cpath->path.pathtype = T_ExtensiblePlan; + cpath->path.parent = baserel; + cpath->extensible_private = tidrangequals; + cpath->methods = &tidrangescan_path_methods; + + cost_tidrangescan(&cpath->path, root, baserel, tidrangequals, cpath->path.param_info); + add_path(root, baserel, &cpath->path); + } +} + +static ExtensiblePathMethods tidrangescan_path_methods = { + "tidrangescan", /* ExtensibleName */ + PlanTidRangeScanPath, /* PlanExtensiblePath */ +}; +``` + +#### 2.4.2.2 创建计划 + +上述的tidrangescan_path_methods定义了创建计划函数PlanTidRangeScanPath,根据最优路径生成计划ExtensiblePlan,同时将创建计划状态节点接口tidrangescan_scan_methods存入plan。 + +```cpp +static Plan *PlanTidRangeScanPath(PlannerInfo *root, RelOptInfo *rel, ExtensiblePath *best_path, List *tlist, List *clauses, List *custom_plans) +{ + ExtensiblePlan *node = makeNode(ExtensiblePlan); + Plan *plan = &node->scan.plan; + List *tidrangequals = best_path->extensible_private; + ... + node->extensible_exprs = tidrangequals; + node->scan.plan.startup_cost = best_path->path.startup_cost; + node->scan.plan.total_cost = best_path->path.total_cost; + node->scan.plan.plan_rows = best_path->path.rows; + node->scan.plan.plan_width = rel->width; + node->methods = &tidrangescan_scan_methods; + return plan; +} + +static ExtensiblePlanMethods tidrangescan_scan_methods = { + "tidrangescan", /* ExtensibleName */ + CreateTidRangeScanState, /* CreateExtensiblePlanState */ +}; +``` + +### 2.4.3 执行器 + +#### 2.4.3.1 创建计划状态节点 + +上述的tidrangescan_scan_methods定义了创建PlanState函数CreateTidRangeScanState,根据传入的plan返回PlanState,同样将后续执行器执行的若干方法结构体tidrangescan_exec_methods存入PlanState。 + +```cpp +Node *CreateTidRangeScanState(ExtensiblePlan *custom_plan) +{ + TidRangeScanState *tidrangestate; + /* + * create state structure + */ + tidrangestate = (TidRangeScanState*)palloc0(sizeof(TidRangeScanState)); + NodeSetTag(tidrangestate, T_ExtensiblePlanState); + tidrangestate->css.methods = &tidrangescan_exec_methods; + /* + * mark scan as not in progress, and TID range as not computed yet + */ + tidrangestate->trss_inScan = false; + return (Node*)&tidrangestate->css; +} + +static ExtensibleExecMethods tidrangescan_exec_methods = { + "tidrangescan", /* ExtensibleName */ + BeginTidRangeScan, /* BeginExtensiblePlan */ + ExecTidRangeScan, /* ExecExtensiblePlan */ + EndTidRangeScan, /* EndExtensiblePlan */ + ExecReScanTidRangeScan, /* ReScanExtensiblePlan */ + ExplainTidRangeScan /* ExplainExtensiblePlan */ +}; +``` + +#### 2.4.3.2 执行层hook + +tidrangescan_exec_methods定义了五个接口,分别是执行层各个阶段的主函数:BeginTidRangeScan、ExecTidRangeScan、EndTidRangeScan、ExecReScanTidRangeScan、ExplainTidRangeScan。 + +```cpp +static void BeginTidRangeScanScan(ExtensiblePlanState *node, EState *estate, int eflags) +{ + TidRangeScanState *ctss = (TidRangeScanState *) node; + ExtensiblePlan *cscan = (ExtensiblePlan *) node->ss.ps.plan; + ctss->css.ss.ss_currentScanDesc = NULL; /* no table scan here */ + /* + * initialize child expressions + */ + ctss->css.ss.ps.qual = (List*)ExecInitExpr((Expr*)cscan->scan.plan.qual, (PlanState *)ctss); + TidExprListCreate(ctss); +} + +static TupleTableSlot * ExecTidRangeScan(ExtensiblePlanState *pstate) +{ + return ExecScan(&pstate->ss, (ExecScanAccessMtd) TidRangeNext, (ExecScanRecheckMtd) TidRangeRecheck); +} + +static void EndTidRangeScan(ExtensiblePlanState *node) +{ + TableScanDesc scan = node->ss.ss_currentScanDesc; + if (scan != NULL) + heap_endscan(scan); + /* + * Free the exprcontext + */ + ExecFreeExprContext(&node->ss.ps); + + /* + * clear out tuple table slots + */ + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + ExecClearTuple(node->ss.ss_ScanTupleSlot); +} + +static void ExecReScanTidRangeScan(ExtensiblePlanState *node) +{ + /* mark scan as not in progress, and tid range list as not computed yet */ + ((TidRangeScanState*)node)->trss_inScan = false; + /* + * We must wait until TidRangeNext before calling table_rescan_tidrange. + */ + ExecScanReScan(&node->ss); +} + +static void ExplainTidRangeScan(ExtensiblePlanState *node, List *ancestors, ExplainState *es) +{ + TidRangeScanState *ctss = (TidRangeScanState *) node; + ExtensiblePlan *cscan = (ExtensiblePlan *) ctss->css.ss.ps.plan; + /* logic copied from show_qual and show_expression */ + if (cscan->extensible_exprs) { + bool useprefix = es->verbose; + Node *qual; + List *context; + char *exprstr; + /* Convert AND list to explicit AND */ + qual = (Node *) make_ands_explicit(cscan->extensible_exprs); + /* Set up deparsing context */ + context = deparse_context_for_planstate((Node*)ctss, ancestors, es->rtable); + /* Deparse the expression */ + exprstr = deparse_expression(qual, context, useprefix, false); + /* And add to es->str */ + ExplainPropertyText("tid range quals", exprstr, es); + } +} +``` + +### 2.4.4 改造点 + +#### 2.4.4.1 无法调用内核static函数 + +在移植过程中,受限于插件实现的方式,无法调用内核的static函数,需要拷贝到插件侧或者对原有的代码作改造。 + +执行层获取单个tuple阶段,PG在heapam.c中定义了一个函数heap_getnextslot_tidrange,其中调用了static函数heapgettup_pagemode和heapgettup。在将heap_getnextslot_tidrange搬到openGauss插件时,由于无法调用这两个static函数,需要将其改为调用heap_getnext,通过heap_getnext访问heapgettup_pagemode和heapgettup。 + +# 3. hook点总述 + +## 3.1 优化器 + +### 3.1.1 添加路径 + +通常用来产生ExtensiblePath对象,并使用add_path把它们加入到rel中。 + +插入位置所在的函数:set_rel_pathlist + +```cpp +typedef void (*set_rel_pathlist_hook_type) (PlannerInfo *root, + RelOptInfo *rel, + Index rti, + RangeTblEntry *rte); +extern THR_LOCAL PGDLLIMPORT set_rel_pathlist_hook_type set_rel_pathlist_hook; +``` + +ExtensiblePath定义如下。 + +```cpp +typedef struct ExtensiblePath { + Path path; + uint32 flags; /* mask of EXTENSIBLEPATH_* flags */ + List* extensible_paths; /* list of child Path nodes, if any */ + List* extensible_private; + const struct ExtensiblePathMethods* methods; +} ExtensiblePath; +``` + +* flags是一个标识,如果该自定义的路径支持反向扫描,则它应该包括EXTENSIBLEPATH_SUPPORT_BACKWARD_SCAN,如果支持标记和恢复则包括EXTENSIBLEPATH_SUPPORT_MARK_RESTORE。 + +* extensible_paths是这个自定义路径节点的子Path节点列表 + +* extensible_private可用来存储该自定义路径的私有数据。 + +* methods必须包含根据该路径生成计划的方法。ExtensiblePathMethods结构如下,主要实现PlanExtensiblePath。 + +```cpp +typedef struct ExtensiblePathMethods { + const char* ExtensibleName; + + /* Convert Path to a Plan */ + struct Plan* (*PlanExtensiblePath)(PlannerInfo* root, RelOptInfo* rel, struct ExtensiblePath* best_path, + List* tlist, List* clauses, List* extensible_plans); +} ExtensiblePathMethods; +``` + +### 3.1.2 添加连接路径 + +提供连接路径,同样创建ExtensiblePath路径。 + +插入位置所在的函数:add_paths_to_joinrel + +```cpp +typedef void (*set_join_pathlist_hook_type) (PlannerInfo *root, + RelOptInfo *joinrel, + RelOptInfo *outerrel, + RelOptInfo *innerrel, + JoinType jointype, + SpecialJoinInfo *sjinfo, + Relids param_source_rels, + SemiAntiJoinFactors *semifactors, + List *restrictlist); +extern THR_LOCAL PGDLLIMPORT set_join_pathlist_hook_type set_join_pathlist_hook; +``` + +### 3.1.3 创建计划 + +调用上述ExtensiblePath中的methods定义的接口PlanExtensiblePath,将自定义路径转换为一个完整的计划,返回ExtensiblePlan。 + +插入位置所在的函数:create_scan_plan->create_extensible_plan + +```cpp +typedef struct ExtensiblePlan { + Scan scan; + + uint32 flags; /* mask of EXTENSIBLEPATH_* flags, see relation.h */ + + List* extensible_plans; /* list of Plan nodes, if any */ + + List* extensible_exprs; /* expressions that extensible code may evaluate */ + + List* extensible_private; /* private data for extensible code */ + + List* extensible_plan_tlist; /* optional tlist describing scan + * tuple */ + Bitmapset* extensible_relids; /* RTIs generated by this scan */ + + ExtensiblePlanMethods* methods; +} ExtensiblePlan; +``` + +* 和ExtensiblePath一样,flags同样是一个标识。 + +* extensible_plans可以用来存放子Plan节点 + +* extensible_exprs用来存储需要由setrefs.cpp和subselect.cpp修整的表达式树。 + +* extensible_private用来存储只有该自定义算子使用的私有数据。 + +* extensible_plan_tlist描述目标列 + +* extensible_relids为该扫描节点要处理的关系集合 + +* methods必须包含生成该计划对应的计划节点PlanState的方法。ExtensiblePlanMethods结构如下,主要实现CreateExtensiblePlanState。 + +```cpp +typedef struct ExtensiblePlanMethods { + char* ExtensibleName; + + /* Create execution state (ExtensiblePlanState) from a ExtensiblePlan plan node */ + Node* (*CreateExtensiblePlanState)(struct ExtensiblePlan* cscan); +} ExtensiblePlanMethods; +``` + +## 3.2 执行器 + +### 3.2.1 创建计划状态节点 + +调用上述ExtensiblePlanMethods中的methods定义的接口CreateExtensiblePlanState,为这个ExtensiblePlan分配一个ExtensiblePlanState。 + +插入位置所在的函数:ExecInitNodeByType->ExecInitExtensiblePlan + +```cpp +typedef struct ExtensiblePlanState { + ScanState ss; + uint32 flags; /* mask of EXTENSIBLEPATH_* flags, see relation.h */ + List* extensible_ps; /* list of child PlanState nodes, if any */ + const ExtensibleExecMethods* methods; +} ExtensiblePlanState; +``` + +* flags含义同ExtensiblePath和ExtensiblePlan一样 + +* extensible_ps为该计划节点的子节点。 + +* methods为包含多个执行所需接口的结构体ExtensibleExecMethods,在下文做具体介绍。 + +### 3.2.2 执行层hook + +上面CustomScanState的成员CustomExecMethods定义了几个hook点 + +```cpp +typedef struct ExtensibleExecMethods { + const char* ExtensibleName; + + /* Executor methods: mark/restore are optional, the rest are required */ + void (*BeginExtensiblePlan)(struct ExtensiblePlanState* node, EState* estate, int eflags); + TupleTableSlot* (*ExecExtensiblePlan)(struct ExtensiblePlanState* node); + void (*EndExtensiblePlan)(struct ExtensiblePlanState* node); + void (*ReScanExtensiblePlan)(struct ExtensiblePlanState* node); + void (*ExplainExtensiblePlan)(struct ExtensiblePlanState* node, List* ancestors, struct ExplainState* es); +} ExtensibleExecMethods; +``` + + 1) BeginExtensiblePlan完成所提供的ExtensiblePlanState的初始化。标准的域已经被ExecInitExtensiblePlan初始化,但是任何私有的域应该在这里被初始化。 + + 插入位置所在的函数:ExecInitNodeByType->ExecInitExtensiblePlan + + 2) ExecExtensiblePlan执行扫描,取下一个扫描元组,如果还有任何元组剩余,它应该用当前扫描方向的下一个元组填充ps_ResultTupleSlot,并且接着返回该元组槽。如果没有,则用NULL填充或者返回一个空槽。 + + 插入位置所在的函数:ExecProcNode->ExecProcNodeByType + + 3) EndExtensiblePlan清除任何与ExtensiblePlanState相关的私有数据。这个方法是必需的,但是如果没有相关的数据或者相关数据将被自动清除,则它不需要做任何事情。 + + 插入位置所在的函数:ExecEndNodeByType->ExecEndExtensiblePlan + + 4) ReScanExtensiblePlan把当前扫描倒回到开始处,并且准备重新扫描该关系。 + + 插入位置所在的函数:ExecReScan->ExecReScanByType + + 5) ExplainExtensiblePlan为一个自定义扫描计划节点的EXPLAIN输出额外的信息。这个回调函数是可选的。即使没有这个回调函数,被存储在`ScanState中的公共的数据(例如目标列表和扫描关系)也将被显示,但是该回调函数允许显示额外的信息(例如私有状态)。 + + 插入位置所在的函数:ExplainNode->show_pushdown_qual \ No newline at end of file diff --git "a/content/zh/post/chenxiaobin/\345\260\206PostgreSQL\346\217\222\344\273\266\347\247\273\346\244\215\345\210\260openGauss\346\214\207\345\257\274.md" "b/content/zh/post/chenxiaobin/\345\260\206PostgreSQL\346\217\222\344\273\266\347\247\273\346\244\215\345\210\260openGauss\346\214\207\345\257\274.md" new file mode 100644 index 0000000000000000000000000000000000000000..3952a38412f6d3aae561aa2f352dfde3084634b0 --- /dev/null +++ "b/content/zh/post/chenxiaobin/\345\260\206PostgreSQL\346\217\222\344\273\266\347\247\273\346\244\215\345\210\260openGauss\346\214\207\345\257\274.md" @@ -0,0 +1,247 @@ ++++ +title = "将PostgreSQL插件移植到openGauss指导" +date = "2021-08-10" +tags = ["openGauss插件化架构"] +archives = "2021-08" +author = "chenxiaobin" +summary = "将PostgreSQL插件移植到openGauss指导" +img = "/zh/post/chenxiaobin/title/img.png" +times = "16:30" ++++ + +## 1 概述 + +PostgreSQL社区提供了丰富的插件,但由于openGauss和PostgreSQL存在一定的差异,如线程/进程模型、系统表和视图等,无法直接为openGauss所用,不可避免的需要在插件上做整改。 + +本文档主要对Postgresql插件移植到openGauss的过程提供指导说明,旨在让开发人员对PG插件所需要的修改有一个具体的了解,基于该文档,可基本实现PG插件移植到openGauss。 + +## 2 约束 + +由于openGauss与PostgreSQL在内核上存在不少差异,这篇文档未能覆盖所有这些差异,因此仅依赖该文档有可能无法实现PG插件的完全迁移,部分差异需要开发者深入内核源码识别,然后可将识别出来的差异补充到该博客的第9章对应小节的表格中(博客对应的gitee地址:[将PostgreSQL插件移植到openGauss指导](https://gitee.com/opengauss/blog/blob/master/content/zh/post/chenxiaobin/%E5%B0%86PostgreSQL%E6%8F%92%E4%BB%B6%E7%A7%BB%E6%A4%8D%E5%88%B0openGauss%E6%8C%87%E5%AF%BC.md),具体操作可见blog仓库的`README.md`),有任何问题可在博客下方留言讨论。 + +## 3 移植步骤 + +1) 将PG插件的代码拷贝到openGauss源码的contrib目录下 + +2) 配置环境变量,需要将数据库的bin和lib加在操作系统的环境变量PATH和LD_LIBRARY_PATH中 + +3) 到插件目录下,执行`make && make install`,编译安装插件。 + +4) 编译成功后,到数据库中执行`create extension extension_name`即可使用。 + +通常步骤3和4不会直接成功,需要一些必须的修改。下面分类别说明移植PG插件所需要做的修改。 + +## 4 Makefile文件 + +1) 当前有两种方式支持插件编译,一种是依赖源码编译,一种是用pgxs的方式编译,支持插件在一个已经安装的数据库服务上进行编译。建议选择前者的方式,如果采用后者,需要定义USE_PGXS,但是可能出现部分头文件找不到的问题,这时候需要到源码拷贝头文件到目标目录。 + +```makefile +ifdef USE_PGXS +PG_CONFIG = pg_config +PGXS := $(shell $(PG_CONFIG) --pgxs) +include $(PGXS) +else +subdir = contrib/pg_freespacemap +top_builddir = ../.. +include $(top_builddir)/src/Makefile.global +include $(top_srcdir)/contrib/contrib-global.mk +endif +``` + +2) -fPIC 作用于编译阶段,告诉编译器产生与位置无关代码(Position-Independent Code)。使用-fPIC,可以使得动态库可以被多个程序共享。不加fPIC加载的so,要在加载时根据加载到的位置再次重定位。 + +```makefile +override CPPFLAGS :=$(filter-out -fPIE, $(CPPFLAGS)) -fPIC +``` + +## 5 类型转换 + +1) ANSI C规定,void指针可以复制给其他任意类型的指针,其他任意类型的指针也可以复制给void指针,他们之间复制不需要强制类型转换。但是c++不支持,需要做强制类型转换。 + +``` +buffer = palloc(MAX_LINESIZE); -> buffer = (char*)palloc(MAX_LINESIZE); +``` + + 2) 部分c++编译器不支持const char\*到char\*的隐式转换,需要做强制类型转换。 + +## 6 函数声明 + +1) C语言中并没有重载和类这些特性,编译出的符号与C++不同,例如print(int i),不会被编译为_print_int,而是直接编译为_print等。因此如果直接在C++中调用C的函数会失败,例如调用print(3),c++中实际上会去找_print_int(3),这样就会找不到。加上extern “C”,指示编译器这部分代码按C语言来进行编译,而不是C++。 + +```c++ +extern PGDLLEXPORT Datum orafce_to_char_timestamp(PG_FUNCTION_ARGS); -> +extern "C" PGDLLEXPORT Datum orafce_to_char_timestamp(PG_FUNCTION_ARGS); +``` + +可以通过nm -D so文件查看生成的符号。 + +## 7 安全函数整改 + +1) 推荐使用安全函数(可见securec.h),并对安全函数的返回值作检查,openGauss定义了几个常用的检查宏,如下。 + +```c++ +#define check_memcpy_s(r) securec_check_c((r), "", "") +#define check_memmove_s(r) securec_check_c((r), "", "") +#define check_memset_s(r) securec_check_c((r), "", "") +#define check_strcpy_s(r) securec_check_c((r), "", "") +#define check_strncpy_s(r) securec_check_c((r), "", "") +#define check_strcat_s(r) securec_check_c((r), "", "") +#define check_strncat_s(r) securec_check_c((r), "", "") +#define check_gets_s(r) securec_check_ss_c((r), "", "") +#define check_sprintf_s(r) securec_check_ss_c((r), "", "") +#define check_snprintf_s(r) securec_check_ss_c((r), "", "") +#define check_scanf_s(r) securec_check_ss_c((r), "", "") +``` + +下面是安全函数整改的示例。 + +```c++ +memcpy(d, u, clen); -> check_memcpy_s(memcpy_s(d, strlen(d), u, clen)); +``` + +为了方便和完全地作安全函数整改,这里提供一个查找危险函数的正则表达式。 + +``` +(wmemcpy\()|(wmemove\()|(memmove\()|(wcscpy\()|(wcsncpy\()|(strcat\()|(wcscat\()|(strncat\()|(wcsncat\()|(strtok\()|(wcstok\()|(sprintf\()|(swprintf\()|(vsprintf\()|(vswprintf\()|(snprintf\()|(vsnprintf\()|(vsnprintf_truncated\()|(snprintf_truncated\()|(scanf\()|(wscanf\()|(vscanf\()|(vwscanf\()|(fscanf\()|(fwscanf\()|(vfscanf\()|(vfwscanf\()|(sscanf\()|(swscanf\()|(vsscanf\()|(vswscanf\()|(gets\()|(strcpy\()|(strcpy\()|(strncpy\()|(strncpy\()|(strcat\()|(strncat\()|(memcpy\()|(memcpy\()|(memset\()|(memset\() +``` + +## 8 变量转换 + +1) 对比PostgreSQL,openGauss收集了原有的全局变量,将其收集在了g_instance、t_thrd、u_sess(分别是全局变量、线程变量和会话变量)等结构体内,因此需要作相应替换(通过编译报错体现,需要到内核代码层面查看变量具体存放位置)。插件的全局变量可通过nm -D so | grep ‘B’排查。(具体见7.7) + +```c++ +econtext = error_context_stack; -> econtext = t_thrd.log_cxt.error_context_stack; +``` + + 2) PG采用进程模型,用户会话进来时会创建一个独立的进程去处理,此时插件定义的全局变量在该进程内就是唯一的会话变量。而openGauss采用线程模型,所有会话共享同一份全局变量,因此需要将全局变量修改为会话变量。对于只读的全局变量,保持原样即可,而对于多次修改的变量,需要作如下修改。 + +a. 如果不考虑在线程池模式下使用插件,将全局变量修改为THR_LOCAL变量,即线程变量,因为用户会话进来会创建一个独立的线程。 + +b. 如果需要线程池,就需要作额外的修改。线程池模式下,一个用户会话可能会切换多个线程,单纯的将全局变量改为线程变量,在切换线程时会丢失对该变量的修改。openGauss提供了插件自定义会话变量的方式,具体实现如下。(以dblink为例) + +1. 内核侧在u_sess中定义一个指针数组`extension_session_vars_array`,和标识数组大小的变量`extension_session_vars_array_size`,数组用于存放插件会话变量的结构体。 + +```c++ +typedef struct knl_session_attr_common { + … + uint32 extension_session_vars_array_size; + void** extension_session_vars_array; +} knl_session_attr_common; +``` + +2. 插件侧需定义一个全局的下标变量,用于获取数组元素,并且提供`set_extension_index`函数,内核侧会调用该函数来设置下标。示例如下。 + +```c++ +static uint32 dblink_index; +void set_extension_index(uint32 index) { + dblink_index = index; +} +``` + +3. 此外,插件侧还需要定义步骤1提到的会话变量结构体,存放该插件自身所有的会话变量,以及提供函数init_session_vars,主要是初始化该结构体,并把指针存放在数组的对应下标位置。示例如下。 + +```c++ +#include "commands/extension.h" +typedef struct dblink_session_context { + remoteConn* pconn; + HTAB* remoteConnHash; +} dblink_session_context; + +void init_session_vars(void) +{ + RepallocSessionVarsArrayIfNecessary(); + dblink_session_context* psc = (dblink_session_context*)MemoryContextAllocZero(u_sess->self_mem_cxt, sizeof(dblink_session_context)); + u_sess->attr.attr_common.extension_session_vars_array[dblink_index] = psc; + psc->pconn = NULL; + psc->remoteConnHash = NULL; +} +``` + +4. 最终,在插件使用会话变量时,根据下标到数组中获取对应的结构体指针即可。 + +```c++ +dblink_session_context* get_session_context() +{ + if (u_sess->attr.attr_common.extension_session_vars_array[dblink_index] == NULL) { + init_session_vars(); + } + return (dblink_session_context*)u_sess->attr.attr_common.extension_session_vars_array[dblink_index]; +} +void example() +{ + remoteConn* pconn = get_session_context()->pconn; +} +``` + +具体方案实现可见社区PR(https://gitee.com/opengauss/openGauss-server/pulls/1101),插件整改可参考其中对dblink的整改。 + +## 9 其他 + +除了上述修改点,还存在很多一些较为细节的地方,其中包括有C和C++的差异,例如在C++中new关键字不能作标识符等;大多数还是openGauss和PostgreSQL内核上的差异,下文会对这些差异作详细说明。此外,有些插件可能是基于PG内核新特性开发的,openGauss并不支持,可以考虑将特性整合到插件,必要时修改内核。 + +下面列举openGauss和PostgreSQL(REL_13_STABLE)内核上的差异,第2章中提到该部分需要不断更新完善,目前仅列出极少部分。 + +### 9.1 API + +| 序号 | API_01 | +| -------------- | ------------------------------------------------------------ | +| **PostgreSQL** | void table_close(Relation relation, LOCKMODE lockmode); | +| **openGauss** | #define heap_close(r,l) relation_close(r,l) void relation_close(Relation relation, LOCKMODE lockmode); | +| **作用** | close any relation | +| **差异** | 名称不同 | + + + +| **序号** | API_02 | +| -------------- | ------------------------------------------------------------ | +| **PostgreSQL** | Relation table_open(Oid relationId, LOCKMODE lockmode) | +| **openGauss** | Relation heap_open(Oid relationId, LOCKMODE lockmode, int2 bucketid=-1); | +| **作用** | open a heap relation by relation OID | +| **差异** | 名称不同;openGauss的heap_open增加了一个可选参数bucketid | + +### 9.2 系统表 + +|
**序号**
| SYSTAB_01 | +| ---------------------------------------- | ------------------------------------------------------------ | +| **系统表** | pg_class | +| **差异** | openGauss新增字段:reltoastidxid, reldeltarelid, reldeltaidx, relcudescrelid, relcudescidx, relhasoids, relhaspkey, relcmprs, relhasclusterkey, relrowmovement, parttype, relfrozenxid64, relbucket, relbucketkey
PostgreSQL 新增字段:relrowsecurity, relforcerowsecurity, relispopulated, relispartition, relrewrite , relminmxid , relpartbound relkind
字段可选值差异:PostgreSQL中用p和I表示分区表和分区索引,openGauss用字段parttype表示。 | +| **备注** | 具体描述可见《开发者指南》-系统表和系统视图-系统表-PG_CLASS | + +### 9.3 系统视图 + +| **序号** | SYSVIEW_01 | +| ---------- | ------------------------------------------------------------ | +| **系统表** | pg_tables | +| **差异** | openGauss新增字段:tablecreator, created, last_ddl_time PostgreSQL 新增字段:rowsecurity | +| **备注** | 具体描述可见《开发者指南》-系统表和系统视图-系统视图-PG_TABLES | + +### 9.4 系统函数 + +### 9.5 LOCK + +### 9.6 Memory Context + +### 9.7 全局变量 + +| **PostgreSQL** | **openGauss** | **作用域** | +| ------------------- | ------------------------------------ | -------------- | +| error_context_stack | t_thrd.log_cxt.error_context_stack | Thread | +| WalSndCaughtUp | t_thrd.walsender_cxt.walSndCaughtUp | Thread | +| disable_cost | g_instance.cost_cxt.disable_cost | Instance | +| cpu_tuple_cost | u_sess->attr.attr_sql.cpu_tuple_cost | cpu_tuple_cost | + +## 10 常见错误信息 + +1) 编译安装时报错:dangerous relocation: unsupported relocation + +解决方法:参考4.2,在Makefile中添加下面一句。 + + override CPPFLAGS :=$(filter-out -fPIE, $(CPPFLAGS)) -fPIC + +2) 编译安装时报错:error: invalid conversion from ‘void’ to ‘char’ [-fpermissive] + +解决方法:参考5类型转换 + +3) create extension时报错:could not find function "xxx" in file "xxx.so" + +解决方法:参考6函数声明。 \ No newline at end of file diff --git a/content/zh/post/cym/.DS_Store b/content/zh/post/cym/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..9b88a574476e8fbfaf07e75d4759acf5cd3f1038 Binary files /dev/null and b/content/zh/post/cym/.DS_Store differ diff --git "a/content/zh/post/cym/OpenGauss\344\270\255\347\232\204AI\346\212\200\346\234\257\342\200\224\342\200\224\346\205\242SQL\347\232\204\346\243\200\346\265\213.md" "b/content/zh/post/cym/OpenGauss\344\270\255\347\232\204AI\346\212\200\346\234\257\342\200\224\342\200\224\346\205\242SQL\347\232\204\346\243\200\346\265\213.md" new file mode 100644 index 0000000000000000000000000000000000000000..c774613e5da7326e82ff9464fa9ee5ec845d9c06 --- /dev/null +++ "b/content/zh/post/cym/OpenGauss\344\270\255\347\232\204AI\346\212\200\346\234\257\342\200\224\342\200\224\346\205\242SQL\347\232\204\346\243\200\346\265\213.md" @@ -0,0 +1,386 @@ ++++ + +title = "openGauss中的AI技术————慢SQL的检测" +date = "2021-11-30" +tags = ["openGauss社区开发入门"] +archives = "2021-11" +author = "chen-yiming-cs" +summary = "openGauss社区开发入门" + ++++ + +# 一、概念部分 + + 本文主要是介绍OpenGauss中AI技术——慢SQL发现技术的分析。 +## (1)慢SQL相关概念 + 慢SQL:效率慢的SQL语句 + 慢SQL发现:基于历史SQL语句信息进行模型训练,并用训练好的模型进行SQL语句的预测,利用预测结果判断该SQL语句是否是潜在的慢SQL。当发现潜在的慢SQL后,开发者便可以进行针对性优化或者风险评估,以防业务上线后发生问题。 + 慢SQL的产生原因:SQL编写问题、锁、业务实例相互干绕对 IO/CPU 资源争用、服务器硬件、MYSQL BUG,其中编写问题可以说是占了主要,所以发现慢SQL并提示用户更改或者程序自动优化是有很必要的。 + +## (2)慢SQL的三个阶段 + +- 阶段1(发现问题): + 对用户输入的一批业务SQL语句进行分析,推断SQL语句执行时间的快慢,进而可以将评估为慢SQL的语句识别出来。(目前OpenGauss已经具备这方面的能力,比如本次报告介绍的慢SQL发现技术) + +- 阶段2(找到原因): + 对识别出的潜在慢SQL进行根因诊断,判断这些SQL语句是因为什么慢,例如比较常见的原因可能是数据量过大、SQL语句自身过于复杂、容易产生并发的锁冲突、没有创建索引导致全表扫描等等。 + +- 阶段3(给出处理建议): + 对于已经识别出来的慢SQL语句的可能问题源,给出针对性的解决方案,譬如可以提示用户进行SQL语句的改写、创建索引等。(OpenGauss有部分相关的功能,如索引推荐等) + + + +## (3)慢SQL发现的AI技术 + +**①基于执行的在线SVM模型** + + +![img](./image/2.png "#left") + + 第一种技术是利用了SVM模型来预测是否为慢SQL。 + 这里主要是有两个环节,分别是训练阶段和测试阶段。 + **训练阶段**: + Data Collection模块执行作为训练集的语句,Data Extraction模块收集执行的语句特征及执行时间,包括执行计划及算子级别的信息。Model Building模块基于计划级别特征与算子级别信息分别训练SVM模型,再将两模型通过误差分布结合,生成最终的预测模型。 + + **测试阶段:** + Query Planning模块生成待预测语句的执行计划,Feature Extraction抽取这些计划中的特征,整合后投入训练阶段生成的模型中产生预测结果。 + +整体的算法流程图如下: + + +![img](./image/3.png "#left") + + +**②基于执行计划的DNN模型** + +![img](./image/4.png "#left") + + 此算法与第一种相似,区别就是其中的Model Building模块中选择的算法不同。如上图所示,是此技术的算法架构图,算法的概述如下。 + 该算法依然是将执行计划中的算子信息输入到深度学习网络中,从而对执行时间进行预测的。对于每个算子,收集左右子树的向量化特征、优化器代价及执行时间,输入与之对应的模型中,预测该算子的向量化特征及执行时间等。上图中显示了一个join操作的预测流程,其左右子树均为Scan算子,将两个Scan算子通过对应的模型预测出的向量化特征、执行时间,以及该join算子的优化器评估代价作为入参,输出join算子模型得到该操作的向量化特征及预测出的执行时间。上述过程是个自底向上的过程。 +此技术流程图如下: + +![img](./image/5.png "#left") + +当然,此技术也有很多缺点: + 需要通过已预测算子不断修正模型,预测过程会较慢。 + 对环境变化感知差,如数据库参数变化会使得原模型几乎完全失效。 + 预测过程依赖待测语句的执行计划,加重了数据库的负荷,对于OLTP场景格外不适用。 + + +## (4)总体流程 + + 程序开始后,会先去获取SQL流水,然后根据我们设置的初始化参数来确定是使用模板法还是深度学习方法,如果是模板化方法,具体流程如下: +1°检测本地是否存在对应实例的历史模板信息,如果存在,则加载该模板信息,如果不存在,则对该模板进行初始化。 +2°基于SQL数据,提取SQL的粗粒度模板信息。粗粒度模板表示将SQL中表名、列名和其他敏感信息去除之后的SQL语句模板,该模板只保留最基本的SQL语句骨架。 +3° 基于SQL数据,提取SQL细粒度的模板信息。细粒度模板表示在粗粒度模板信息的基础上保留表名、列名等关键信息的SQL语句模板。细粒度模板相对粗粒度模板保留了更多SQL语句的信息。 +4°执行训练过程时,首先构造SQL语句的基于粗粒度模板和细粒度模板信息,例如粗粒度模板ID、执行平均时间、细模板执行时间序列、执行平均时间和基于滑动窗口计算出的平均执行时间等。最后将上述模板信息进行储存。 +5°执行预测过程时,首先导入对应实例的模板信息,如果不存在该模板信息,则直接报错退出;否则继续检测是否存在该SQL语句的粗粒度模板信息,如果不存在,则基于模板相似度计算方法在所有粗粒度模板里面寻找最相似的N条模板,之后基于KNN(k nearest neighbor,K近邻)算法预测出执行时间;如果存在粗粒度模板,则接着检测是否存在近似的细粒度模板,如果不存在,则基于模板相似度计算方法在所有细粒度模板里面寻找最相似的N条模板,之后基于KNN预测出执行时间;如果存在匹配的细粒度模板,则基于当前模板数据,直接返回对应的执行时间。 + +如果是选择了深度学习,具体流程如下所示: +1°在训练过程中,首先判断是否存在历史模型,如果存在,则导入模型进行增量训练;如果不存在历史模型,则首先利用word2vector算法对SQL语句进行向量化,即图8-11中的SQL embeding过程。而后创建深度学习模型,将该SQL语句向量化的结果作为输入特征。基于训练数据进行训练,并将模型保存到本地。值得一提的是,该深度学习模型的最后一个全连接层网络的输出结果作为该SQL语句的特征向量。 +2°在预测过程中,首先判断是否存在模型,如果模型不存在,则直接报错退出;如果存在模型,则导入模型,并利用word2vector算法将待预测的SQL语句进行向量化,并将该向量输入到深度学习网络中,获取该神经网络的最后一个全连接层的输出结果,即为该SQL语句的特征向量。最后,利用余弦相似度在样本数据集中进行寻找,找到相似度最高的SQL语句,将该结果返回即为该待预测SQL语句的预估执行时间。当然,如果是基于最新SQL语句执行时间数据集训练出的深度学习模型,则模型的回归预测结果也可以作为预估执行时间。 + + +# 二、具体项目代码分析 + + +## 1、整体项目结构 + +项目路径:openGauss-server/src/gausskernel/dbmind/tools/sqldiag +首先是,本项目的整体架构如下所示: + +![img](./image/6.png "#left") + +其中,重要的文件/文件夹主要有: + preprocessing.py SQL预处理方法 + requirements.txt 依赖第三方库列表等 + main.p 入口文件 + algorithm 项目核心代码 + algorithm/sql_similarity 相似度计算方法 + + +## 2、核心代码分析 + + +### (1)首先是main函数 + + +```python +def main(args): +    logging.basicConfig(level=logging.WARNING)//初始化参数 +    if not is_valid_conf(args.config_file): +        logging.fatal('The [--config-file] parameter is incorrect') +        sys.exit(1) + // 下面就是根据我们初始化的参数来生成对应模型 +    model = SQLDiag(args.model, get_config(args.config_file)) +    if args.mode in ('train', 'finetune')://如果检测到是要训练/微调,就执行这个分支,主要是读取训练数据,然后做训练,再保存模型 +        if not args.csv_file: +            logging.fatal('The [--csv-file] parameter is required for train mode') +            sys.exit(1) +        train_data = LoadData(args.csv_file).train_data +        if args.mode == 'train': +            model.fit(train_data) +        else: +            model.fine_tune(args.model_path, train_data) +        model.save(args.model_path) +    else: //不然的话就是执行测试分支,这里就不用读取训练数据,而是直接读取模型,然后用这个模型来预测结果 +        model.load(args.model_path) +        if args.csv_file and not args.query: +            predict_data = LoadData(args.csv_file).predict_data +        elif args.query and not args.csv_file: +            predict_data = split_sql(args.query) +        else: +            logging.error('The predict model only supports [--csv-file] or [--query] at the same time.') +            sys.exit(1) +        args.threshold = -100 if not args.threshold else float(args.threshold)//这里是要设置一个阈值来划分 +        pred_result = model.transform(predict_data) +        if args.predicted_file: +            if args.model == 'template': +                info_sum = [] +                    if _info: +                        _info = list(filter(lambda item: item[1]>=args.threshold, _info)) +                        for item in _info: +                            item.insert(1, stats) +                        info_sum.extend(_info) +                ResultSaver().save(info_sum, args.predicted_file) +            else: +                pred_result = list(filter(lambda item: float(item[1])>=args.threshold, pred_result)) +                ResultSaver().save(pred_result, args.predicted_file) +        else: +            from prettytable import PrettyTable + +            display_table = PrettyTable() +            if args.model == 'template': +                display_table.field_names = ['sql', 'status', 'predicted time', 'most similar template'] +                display_table.align = 'l' +                status = ('Suspect illegal SQL', 'No SQL information', 'No SQL template found', 'Fine match') +                for stats in status: +                    if pred_result[stats]: +                        for sql, predicted_time, similariest_sql in pred_result[stats]: +                            if predicted_time >= args.threshold or stats == 'Suspect illegal sql': +                                display_table.add_row([sql, stats, predicted_time, similariest_sql]) +            else: +                display_table.field_names = ['sql', 'predicted time'] +                display_table.align = 'l' +                for sql, predicted_time in pred_result: +                    if float(predicted_time) >= args.threshold: +                        display_table.add_row([sql, predicted_time]) +            print(display_table.get_string()) + + +``` + +### (2)然后来分析下模板化算法的框架 + +![img](./image/7.png "#left") + + 模板化方法类的结构就如上所示,其中fit方法就是训练的,transform方法是预测的,load是加载模型,save是储存模型,predict_duration_time是计算执行时间,在transform中会被调用。 + +```python +# fit方法: +def fit(self, data): +        # 对每条sql语句按照粗、细粒度进行标准化,生成模板 +        for sql, duration_time in data: +            if check_illegal_sql(sql): +                continue +            sql_template = templatize_sql(sql) +            sql_prefix = sql_template.split()[0] +            if sql_prefix not in self.__hash_table: +                sql_prefix = 'OTHER'#如果不存在,说明没有此模板,就标记下来,后续插入 +            +            if sql_template not in self.__hash_table[sql_prefix]: +                self.__hash_table[sql_prefix][sql_template] = dict(time_list=[], count=0, mean_time=0.0, iter_time=0.0) +            self.__hash_table[sql_prefix][sql_template]['count'] += 1 +            self.__hash_table[sql_prefix][sql_template]['time_list'].append(duration_time) +        # 更新模板信息,也就相当于是训练的过程了 +        for sql_prefix, sql_prefix_info in self.__hash_table.items(): +            for sql_template, sql_template_info in sql_prefix_info.items(): +                del sql_template_info['time_list'][:-self.time_list_size] +                sql_template_info['mean_time'] = sum(sql_template_info['time_list']) / len(sql_template_info['time_list']) +                sql_template_info['iter_time'] = reduce(lambda x, y: (x+y)/2, sql_template_info['time_list']) + +# Predict_duration_time方法,就是去从一堆模板中找到最相似的k个,然后算均值(KNN方法)。 +def predict_duration_time(self, sql): +        top_similarity_sql = None +        if check_illegal_sql(sql): +            predict_time = -1 +            status = 'Suspect illegal sql' +            return sql, status, predict_time, top_similarity_sql + +        sql_template = templatize_sql(sql) +        # get 'sql_template' of SQL +        sql_prefix = sql_template.strip().split()[0] +        if sql_prefix not in self.__hash_table: +            sql_prefix = 'OTHER' +        if not self.__hash_table[sql_prefix]: +            status = 'No SQL information' +            predict_time = -1 +        elif sql_template not in self.__hash_table[sql_prefix]: +            similarity_info = [] +            """ +            if the template does not exist in the hash table, +            then calculate the possible execution time based on template +            similarity and KNN algorithm in all other templates +            """ +            status = 'No SQL template found' +            for local_sql_template, local_sql_template_info in self.__hash_table[sql_prefix].items(): +                similarity_info.append( +                    (self.similarity_algorithm(sql_template, local_sql_template), +                     local_sql_template_info['mean_time'], local_sql_template)) +            topn_similarity_info = heapq.nlargest(self.knn_number, similarity_info) #找相似度最高的模板的信息 +            sum_similarity_scores = sum(item[0] for item in topn_similarity_info) +            if not sum_similarity_scores: +                sum_similarity_scores = self.bias +            top_similarity_sql = '\n'.join([item[2] for item in topn_similarity_info]) +            similarity_proportions = [item[0] / sum_similarity_scores for item in +                                      topn_similarity_info] +            topn_duration_time = [item[1] for item in topn_similarity_info] +            predict_time = reduce(lambda x, y: x + y, +                                  map(lambda x, y: x * y, similarity_proportions, +                                      topn_duration_time)) + +        else: +            status = 'Fine match' +            predict_time = self.__hash_table[sql_prefix][sql_template]['iter_time'] +            top_similarity_sql = sql_template + +        return sql, status, predict_time, top_similarity_sql + + + +``` + + +### (3)模板化算法-相似度计算算法 + +主要有四个算法: + +```python +# 1°余弦距离 + +![img](./image/8.png "#left") + + +def distance(str1, str2): +    c_1 = Counter(str1) +    c_2 = Counter(str2) +    c_union = set(c_1).union(c_2) +    dot_product = sum(c_1.get(item, 0) * c_2.get(item, 0) for item in c_union) +    mag_c1 = math.sqrt(sum(c_1.get(item, 0)**2 for item in c_union)) +    mag_c2 = math.sqrt(sum(c_2.get(item, 0)**2 for item in c_union)) +    return dot_product / (mag_c1 * mag_c2) + +# 2°编辑距离 + + +![img](./image/9.png "#left") + +def distance(str1, str2): +    """ +    func: calculate levenshtein distance between two strings. +    :param str1: string1 +    :param str2: string2 +    :return: distance +    """ +    len_str1 = len(str1) + 1 +    len_str2 = len(str2) + 1 +    +    mat = [[0]*len_str2 for i in range(len_str1)] +    mat[0][0] = 0 +    for i in range(1,len_str1): +        mat[i][0] = mat[i-1][0] + 1 +    for j in range(1,len_str2): +        mat[0][j] = mat[0][j-1]+1 +    for i in range(1,len_str1): +        for j in range(1,len_str2): +            if str1[i-1] == str2[j-1]: +                mat[i][j] = mat[i-1][j-1] +            else: +                mat[i][j] = min(mat[i-1][j-1],mat[i-1][j],mat[i][j-1])+1 +    +    return 1 / mat[len_str1-1][j-1] + + +# 3°列表距离 + +大致就是两个部分:长度相似度+内容相似度 +def distance(str1, str2): +    sql_distance = 0.0 +    list1 = str1.split() +    list2 = str2.split() +    sorted_list1 = sorted(list1) +    sorted_list2 = sorted(list2) +    max_len = max(len(sorted_list1), len(sorted_list2)) +    min_len = min(len(sorted_list1), len(sorted_list2)) +    short_list = sorted_list1 if len(sorted_list1) < len(sorted_list2) else sorted_list2 +    long_list = sorted_list1 if len(sorted_list1) > len(sorted_list2) else sorted_list2 +    for item in short_list: +        if item in long_list: +            sql_distance += 1.0 +    length_similarity = float(min_len / max_len) +    return sql_distance + length_similarity +# 4°解析树距离(这里没看懂,就不叙述了) + + +``` + + + +## 3、DNN算法 + + +![img](./image/10.png "#left") +![img](./image/11.png "#left") + + +DNN模型主要有一个word2vec模块,然后是接上一个KerasRegression,KerasRegression结构如下: +  +```python +from tensorflow.keras import Input, Model +        from tensorflow.keras.layers import Dense +        inputs = Input(shape=(shape,)) +        layer_dense1 = Dense(128, activation='relu', +            kernel_initializer='he_normal')(inputs) +        layer_dense2 = Dense(256, activation='relu', +            kernel_initializer='he_normal')(layer_dense1) +        layer_dense3 = Dense(256, activation='relu', +            kernel_initializer='he_normal')(layer_dense2) +        layer_dense4 = Dense(256, activation='relu', +            kernel_initializer='he_normal', name='vectors')(layer_dense3) +        y_pred = Dense(encoding_dim)(layer_dense4) +        model = Model(inputs=inputs, outputs=y_pred) +        model.compile(optimizer='adam', loss='mse', metrics=['mae']) +        return model + + +``` +就是一个简单的5层全连接网络。 + +预测过程: +  +```python +feature_list = [] +        data_backup = list(data) +        error_list = [] +        for idx_error, sql in enumerate(data_backup): +            if check_illegal_sql(sql): +                error_list.append(idx_error) +                continue +            filter_template = templatize_sql(sql) +            vector = self.w2v.str2vec(filter_template) +            feature_list.append(vector) + +        features = np.array(feature_list) +        predictions = self.regression.predict(features) +        predictions = np.abs(predictions) +        score = self.scaler.inverse_transform(predictions) +        if error_list: +            for item in error_list: +                score = np.insert(score, item, -1) +        score = np.hstack((np.array(data_backup).reshape(-1, 1), score.reshape(-1, 1))).tolist() +        return score + + +``` + +训练过程就相当于预测过程多一个反向传播。 diff --git "a/content/zh/post/cym/OpenGauss\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\344\270\216\344\275\277\347\224\250.md" "b/content/zh/post/cym/OpenGauss\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\344\270\216\344\275\277\347\224\250.md" new file mode 100644 index 0000000000000000000000000000000000000000..725bd1cb8cb702c1390deb669b9880cef9113032 --- /dev/null +++ "b/content/zh/post/cym/OpenGauss\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\344\270\216\344\275\277\347\224\250.md" @@ -0,0 +1,385 @@ ++++ + +title = "openGauss数据库安装与使用" +date = "2021-11-30" +tags = ["openGauss社区开发入门"] +archives = "2021-11" +author = "chen-yiming-cs" +summary = "openGauss社区开发入门" +times = "17:30" + ++++ + +# 一、相关说明 + 使用VMware虚拟机,安装CentOS7.9 64位系统(因为找不到7.6的安装包了),设置此系统的基础参数如下(这里内存需要设置大一点,不然可能会无法运行OpenGauss): + +![img](./image/1.png "#left") + + 此外,还需要修改/etc/redhat-release文件中系统的版本为:CentOS Linux release 7.6(Core),这是因为不安装7.6,可能出现gauss与os不匹配的问题,但是目前已经没有7.6的包了。所以选择了7.9,然后吧版本号改掉。 + + + +# 二、具体安装步骤 + + +## 1.准备操作 +(1)配置yum源 + 删除自带的yum源 + +```bash +rm -rf /etc/yum.repos.d/* +``` + + 下载阿里云的yum源 + +```bash +wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo +``` + + 然后更新缓存 +```bash +yum makecache +``` +(2)安装一些依赖项 + +``` +yum clean all +yum install -y lksctp* +yum install -y java-1.8.0-openjdk* psmisc bzip2 python3 python3-devel +yum install -y libaio-devel flex bison ncurses-devel glibc-devel patch redhat-lsb-core +``` + + + +(3)配置ip和hostname + +``` +hostname && ifconfig |grep broadcast|awk '{print $2}' +sed -i '/MasterG/d' /etc/hosts +echo "192.168.2.131 MasterG ##Gauss OM IP Hosts Mapping" >> /etc/hosts +cat /etc/hosts|grep Gauss + + + +``` +(4)关闭防火墙然后重启 + +``` +systemctl status firewalld +systemctl disable firewalld.service +systemctl stop firewalld.service +sed -i '/SELINUX=/d' /etc/selinux/config +echo "SELINUX=disabled" >> /etc/selinux/config +cat /etc/selinux/config|grep -v ^#|grep -v '^$' +reboot + + +``` + +(5)配置时区 + +``` +echo "export LANG=en_US.UTF-8" >> ~/.bash_profile +source ~/.bash_profile +env|grep LANG +rm -fr /etc/localtime +ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime +ll /etc/localtime + + +``` + +(6)关闭SWAP + +``` +swapoff -a + + +``` + +(7)关闭透明页并重启 + +``` +echo never > /sys/kernel/mm/transparent_hugepage/enabled +echo never > /sys/kernel/mm/transparent_hugepage/defrag +sed -i '/^GRUB_CMDLINE_LINUX/d' /etc/default/grub +echo "GRUB_CMDLINE_LINUX=\"rhgb quiet transparent_hugepage=never\"" >> /etc/default/grub +grub2-mkconfig -o /boot/grub2/grub.cfg + +reboot + + +``` + +(8)修改系统资源限制 + +``` +echo "* soft stack 3072" >> /etc/security/limits.conf +echo "* hard stack 3072" >> /etc/security/limits.conf +echo "* soft nofile 1000000" >> /etc/security/limits.conf +echo "* hard nofile 1000000" >> /etc/security/limits.conf +echo "* soft nproc unlimited" >> /etc/security/limits.d/90-nproc.conf +tail -n 4 /etc/security/limits.conf +tail -n 1 /etc/security/limits.d/90-nproc.conf + + +``` + +## 2.详细安装 + +(1)下载安装包、创建用户组和目录 + +``` +groupadd dbgrp +useradd -g dbgrp -d /home/omm -m -s /bin/bash omm + echo "omm" | passwd -‐stdin omm +mkdir -p /opt/software/openGauss +chmod 755 -R /opt/software +chown -R omm:dbgrp /opt/software/openGauss +cd /opt/software/openGauss/ +wgethhttps://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.0/x86/openGauss-2.0.0-CentOS-64bit-all.tar.gz +tar -zxvf openGauss-2.0.0-CentOS-64bit-all.tar.gz +tar -zxvf openGauss-2.0.0-CentOS-64bit-om.tar.gz + + +``` +(2)配置XML文件 + +``` +cp script/gspylib/etc/conf/cluster_config_template.xml . +具体配置文件为(标红的地方要设置成自己的): + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +(3)添加lib库 +在 .bashrc文件中添加如下: + +``` +export GPHOME=/opt/huawei/install/om +export PATH=$GPHOME/script/gspylib/pssh/bin:$GPHOME/script:$PATH +export LD_LIBRARY_PATH=$GPHOME/lib:$LD_LIBRARY_PATH +export PYTHONPATH=$GPHOME/lib +export GAUSSHOME=/opt/huawei/install/app +export PATH=$GAUSSHOME/bin:$PATH +export LD_LIBRARY_PATH=$GAUSSHOME/lib:$LD_LIBRARY_PATH +export S3_CLIENT_CRT_FILE=$GAUSSHOME/lib/client.crt +export GAUSS_VERSION=2.0.0 +export PGHOST=/opt/huawei/tmp +export GAUSSLOG=/opt/huawei/log/omm +umask 077 +export GAUSS_ENV=2 +export GS_CLUSTER_NAME=singlenode + + + +``` +(4)交互式安装 +首先是预安装: + +``` +cd /opt/software/openGauss/script +python3 gs_preinstall -U omm -G dbgrp -X +/opt/software/openGauss/cluster_config_template.xml + + + +``` +正常的话,会出现如下信息: +_Parsing the configuration file. +Successfully parsed the configuration file. +Installing the tools on the local node. +Successfully installed the tools on the local node. +Setting pssh path +Successfully set core path. +Are you sure you want to create the user[omm] and create trust for it (yes)? yes +Preparing SSH service. +Successfully prepared SSH service. +Checking OS software. +Successfully check os software. +Checking OS version. +Successfully checked OS version. +Creating cluster's path. +Successfully created cluster's path. +Setting SCTP service. +Successfully set SCTP service. +Set and check OS parameter. +Setting OS parameters. +Successfully set OS parameters. +Warning: Installation environment contains some warning messages. +Please get more details by "/opt/software/openGauss/script/gs_checkos -i A -h node1 --detail". +Set and check OS parameter completed. +Preparing CRON service. +Successfully prepared CRON service. +Setting user environmental variables. +Successfully set user environmental variables. +Setting the dynamic link library. +Successfully set the dynamic link library. +Setting Core file +Successfully set core path. +Setting pssh path +Successfully set pssh path. +Set ARM Optimization. +No need to set ARM Optimization. +Fixing server package owner. +Setting finish flag. +Successfully set finish flag. +Preinstallation succeeded. + +到这里说明预安装完成。 + +然后是正式安装: + +``` + cd script/ + gs_install -X /opt/software/openGauss/cluster_config_template.xml + + + +``` +正常的话会出现如下信息: +Parsing the configuration file. +Check preinstall on every node. +Successfully checked preinstall on every node. +Creating the backup directory. +Successfully created the backup directory. +begin deploy.. +Installing the cluster. +begin prepare Install Cluster.. +Checking the installation environment on all nodes. +begin install Cluster.. +Installing applications on all nodes. +Successfully installed APP. +begin init Instance.. +encrypt cipher and rand files for database. +Please enter password for database: +Please repeat for database: +begin to create CA cert files +The sslcert will be generated in /opt/huawei/install/app/sslcert/om +Cluster installation is completed. +Configuring. +Deleting instances from all nodes. +Successfully deleted instances from all nodes. +Checking node configuration on all nodes. +Initializing instances on all nodes. +Updating instance configuration on all nodes. +Check consistence of memCheck and coresCheck on database nodes. +Configuring pg_hba on all nodes. +Configuration is completed. +Successfully started cluster. +Successfully installed application. +end deploy. + +输入 +``` +gsql -d postgres -p 26000 + + +``` +使数据库在本地运行,没有出现报错信息即说明安装成功。 + + +3.连接设置 +(1)安装jdk1.8 +(2)下载好jdbc压缩包后,解压至: + +``` +/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64/jre/lib/ext + + +``` +(3)登陆到omm用户上,然后登录数据库主节点,执行如下指令: +NodeName为当前节点名称,还要注意localhost改成对应的。 + +``` +gs_guc reload -N node1-I all -c "listen_addresses='localhost,192.168.2.131’” + + +``` +(4)执行下列指令,在数据库主节点配置文件中增加一条规则: + +``` +gs_guc reload -N all -I all -h "host all user 192.168.17.129/32 sha256" + + +``` +(5)然后通过java程序就可以链接了。 + +```java +import java.sql.*; +public class java_connect_opengauss{ +    public static Connection getConnect(String username, String passwd){ +        String driver = "org.postgresql.Driver"; +        String sourceURL = "jdbc:postgresql://127.0.0.1:26000/postgres"; +        Connection conn = null; +        try{ +            Class.forName(driver); +        } +        catch( Exception e ){ +            e.printStackTrace(); +            return null; +        } +        +        try{ +            conn = DriverManager.getConnection(sourceURL, username, passwd); +            System.out.println("Connection succeed!"); +        } +        catch(Exception e) +        { +            e.printStackTrace(); +            return null; +        } +        +        return conn; +    }; +    public static void main(String[] args) { +        //输入数据库的用户名和密码 +        Connection conn = getConnect("username", "password"); +        try { +            conn.close();   +        } catch (SQLException e) {     +            e.printStackTrace();   +        } +    } +} + + +``` + + + +``` +Javac java_connect_opengauss.java +java java_connect_opengauss + + +``` +然后会出出现Connection succeed! +即代表连接成功 diff --git a/content/zh/post/cym/image/.DS_Store b/content/zh/post/cym/image/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..a6a4fe557a05fd2620e5e9da1c79633210899c20 Binary files /dev/null and b/content/zh/post/cym/image/.DS_Store differ diff --git a/content/zh/post/cym/image/1.png b/content/zh/post/cym/image/1.png new file mode 100644 index 0000000000000000000000000000000000000000..12f7e47eaecd1e3ea3ba6e9c3cf0af4cc8eba165 Binary files /dev/null and b/content/zh/post/cym/image/1.png differ diff --git a/content/zh/post/cym/image/10.png b/content/zh/post/cym/image/10.png new file mode 100644 index 0000000000000000000000000000000000000000..9f81824a56e649e4cef920fbecd81ab7024ad083 Binary files /dev/null and b/content/zh/post/cym/image/10.png differ diff --git a/content/zh/post/cym/image/11.png b/content/zh/post/cym/image/11.png new file mode 100644 index 0000000000000000000000000000000000000000..5b9be26d351657076304a1dfdd29d820d3795997 Binary files /dev/null and b/content/zh/post/cym/image/11.png differ diff --git a/content/zh/post/cym/image/2.png b/content/zh/post/cym/image/2.png new file mode 100644 index 0000000000000000000000000000000000000000..76d4f76397298809180be5c09a2a8da34ab66398 Binary files /dev/null and b/content/zh/post/cym/image/2.png differ diff --git a/content/zh/post/cym/image/3.png b/content/zh/post/cym/image/3.png new file mode 100644 index 0000000000000000000000000000000000000000..effe0a05fb0afc8512cd814f015633c250ea8076 Binary files /dev/null and b/content/zh/post/cym/image/3.png differ diff --git a/content/zh/post/cym/image/4.png b/content/zh/post/cym/image/4.png new file mode 100644 index 0000000000000000000000000000000000000000..396189943eebbc7c1f72cd63a47afbc21543e6d9 Binary files /dev/null and b/content/zh/post/cym/image/4.png differ diff --git a/content/zh/post/cym/image/5.png b/content/zh/post/cym/image/5.png new file mode 100644 index 0000000000000000000000000000000000000000..31b676783cf52b162709ddab9fe7cc98ad400284 Binary files /dev/null and b/content/zh/post/cym/image/5.png differ diff --git a/content/zh/post/cym/image/6.png b/content/zh/post/cym/image/6.png new file mode 100644 index 0000000000000000000000000000000000000000..15fc38c45863190be8c9b571a252974923a79779 Binary files /dev/null and b/content/zh/post/cym/image/6.png differ diff --git a/content/zh/post/cym/image/7.png b/content/zh/post/cym/image/7.png new file mode 100644 index 0000000000000000000000000000000000000000..14ea49859e4f212a321e21fc948f6578724ccaac Binary files /dev/null and b/content/zh/post/cym/image/7.png differ diff --git a/content/zh/post/cym/image/8.png b/content/zh/post/cym/image/8.png new file mode 100644 index 0000000000000000000000000000000000000000..36278f99b666cf93c9d2ce86463b205766322a98 Binary files /dev/null and b/content/zh/post/cym/image/8.png differ diff --git a/content/zh/post/cym/image/9.png b/content/zh/post/cym/image/9.png new file mode 100644 index 0000000000000000000000000000000000000000..e6a6115bb37a33b82f4d0c6e479c329c03556352 Binary files /dev/null and b/content/zh/post/cym/image/9.png differ diff --git a/content/zh/post/default.png b/content/zh/post/default.png new file mode 100644 index 0000000000000000000000000000000000000000..70823878926531de19293cb21a7b135cd8f59798 Binary files /dev/null and b/content/zh/post/default.png differ diff --git a/content/zh/post/douxin/JDBC_usage_compile.md b/content/zh/post/douxin/JDBC_usage_compile.md new file mode 100644 index 0000000000000000000000000000000000000000..0e0aa5931520a532e34e8ef7a22ab07a00c4c5a8 --- /dev/null +++ b/content/zh/post/douxin/JDBC_usage_compile.md @@ -0,0 +1,285 @@ ++++ +title = "JDBC使用及源码编译" +date = "2021-12-18" +tags = ["JDBC"] +archives = "2021-12" +author = "douxin" +summary = "openGauss社区开发入门" +img="/zh/post/douxin/title/img1.png" +times = "17:30" + ++++ + +# JDBC使用及源码编译 + +## 1. JDBC简介 + +- JDBC是Java DataBase Connectivity的缩写,它是Java程序访问数据库的标准接口。 + +- JDBC接口是Java标准库自带的,具体的JDBC驱动是由数据库厂商提供的,JDBC驱动也是由Java语言编写的,为一个jar包,真正实现JDBC接口中的类。 + +- openGauss数据库源自postgres,openGauss JDBC以PostgreSQL JDBC Driver 42.2.5为基准,适配openGauss数据库,增加新特性。 + +- openGauss JDBC下载地址: + + - Jar包下载路径: + + (1)[官网下载](https://opengauss.org/zh/download.html) + + (2)[华为鲲鹏maven仓库](https://repo.huaweicloud.com/kunpeng/maven/org/opengauss/opengauss-jdbc/) + + (3)[maven中央仓库](https://mvnrepository.com/artifact/org.opengauss/opengauss-jdbc) + + - 源码下载路径: + + [源码下载](https://gitee.com/opengauss/openGauss-connector-jdbc) + +## 2. JDBC使用 + +- 参考链接 + + openGauss JDBC源码中的API可以参考:[API链接](https://jdbc.postgresql.org/documentation/publicapi/) + + 用户指导手册可参考:[用户手册](https://impossibl.github.io/pgjdbc-ng/docs/current/user-guide/) + +- 执行流程 + + 通过JDBC对数据库进行操作,执行流程大体如下: + (1)连接数据源 + (2)为数据库传递查询和更新指令 + (3)处理数据库相应并返回结果 + +- 完整示例 + + ``` + import java.sql.Connection; + import java.sql.DriverManager; + import java.sql.PreparedStatement; + import java.sql.ResultSet; + import java.sql.Statement; + + public class Main { + + public static void main(String[] args) { + String driver = "org.postgresql.Driver"; + String sourceURL = "jdbc:postgresql://127.0.0.1:5432/postgres"; + String userName = "tpcc"; + String password = "password"; + + try { + // 1. 加载驱动程序 + Class.forName(driver); + + // 2. 获得数据库连接 + Connection conn = DriverManager.getConnection(sourceURL, userName, password); + + // 3. 创建表 + String sql = "create table test(id int, name varchar);"; + Statement statement = conn.createStatement(); + statement.execute(sql); + + // 4. 插入数据,预编译SQL,减少SQL执行, + String insertSql = "insert into test values (?, ?)"; + PreparedStatement ps = conn.prepareStatement(insertSql); + ps.setInt(1, 10); + ps.setString(2, "test10"); + ps.execute(); + + // 5. 查询结果集 + String selectSql = "select * from test"; + PreparedStatement psSelect = conn.prepareStatement(selectSql); + ResultSet rs = psSelect.executeQuery(); + while (rs.next()) { + System.out.println("id = " + rs.getInt(1)); + System.out.println("name = " + rs.getString(2)); + } + } catch (SQLException e) { + e.printStackTrace(); + } + } + } + ``` + +- 引入JDBC驱动 + + (1)通过java工程引入依赖库Referenced Libaries + 将openGauss JDBC jar包放置于工程路径下,并通过Build Path -> Add to Build Path引入至依赖库中 + (2)通过maven工程引入依赖dependency + + openGauss JDBC驱动已经上传至华为鲲鹏仓库和maven中央仓库,包含1.1.0,2.0.0,2.0.1-compatibility三个版本,依赖配置如下: + + ``` + + + org.opengauss + opengauss-jdbc + version_num + + + ``` + + 若添加华为鲲鹏maven镜像,可将仓库配置在`/conf/setting.xml`文件或者Maven工程的pom.xml文件中 + + [华为鲲鹏-Maven镜像](https://mirrors.huaweicloud.com/home) + + 仓库配置如下: + + ``` + + + kunpengmaven + kunpeng maven + https://repo.huaweicloud.com/kunpeng/maven + + + ``` + +## 3. JDBC源码编译 + +- linux下编译Jar包 + + 从码云上下载openGauss JDBC源码,linux下可一键式编译,生成jar包 + + 编译命令为 + + ``` + sh build.sh -3rd $openGauss-connector-jdbc/open-source + ``` + + 其中$openGauss-connector-jdbc为JDBC源码路径,生成的Jar包位于$openGauss-connector-jdbc/output路径下 + + ![image-20211218161436265](../image/jdbc/png0.png) + + 上面的两个jar包虽名称不同,但本质上是两个相同的Jar包。 + +- windows下编译Jar包 + + 通过maven对源码进行打包,打包命令为: + + ``` + mvn clean package -Dmaven.test.skip=true + ``` + + 直接执行上面的命令会报如下的错误,下面给出常见问题及解决方案: + + - 问题1:Child module does not exist + + ![image-20211218153454262](../image/jdbc/png1.png) + + 解决方案:修改根目录下的pom.xml文件中jdbc为pgjdbc + + ``` + jdbc + 修改为 + pgjdbc + ``` + + - 问题2:缺少com.huawei:demo-0.0.1-SNAPSHOT.pom + + 解决方案:在仓库中增加demo-0.0.1-SNAPSHOT包 + + 方法1:将linux编译成功生成的demo-0.0.1-SNAPSHOT-0.0.1.jar包拷贝至用户本地maven仓库中即可 + + 方法2:执行下面的脚本可生成demo-0.0.1-SNAPSHOT-0.0.1.jar包 + + ``` + sh prepare_maven.sh + sh prepare_demo.sh + ``` + + - 问题3:编码GBK的不可映射字符 + + ![image-20211218153845040](../image/jdbc/png2.png) + + 解决方案: + + maven-compiler-plugin插件增加UTF-8,共两个文件 + (1) pgjdbc/pom.xml + (2) pom.xml + + ``` + + maven-compiler-plugin + 3.1 + + 1.8 + 1.8 + UTF-8 + true + + -Xlint:all + + + + ``` + + - 问题4:程序包javax.xml.bind不存在 + + ![image-20211218154319533](../image/jdbc/png3.png) + + 解决方案:pgjdbc/pom.xml中增加如下依赖 + + ``` + + javax.xml.bind + jaxb-api + 2.3.0 + + + com.sun.xml.bind + jaxb-core + 2.3.0 + + + com.sun.xml.bind + jaxb-impl + 2.3.0 + + ``` + + - 问题5:StreamWrapper.java未报告的异常错误java.lang.Throwable + + ![image-20211218154522156](../image/jdbc/png4.png) + + 解决方案:修改StreamWrapper.java文件,抛出Throwable异常 + + ![image-20211218154605098](../image/jdbc/png5.png) + + - 问题6:隐藏的包找不到 + + 程序包com.huawei.shade.org.slf4j不存在 + + ![image-20211218154854549](../image/jdbc/png6.png) + + 解决方案:删掉本地com.huawei.demo-0.0.1-SNAPSHOT.jar包,重新进行打包 + + 打包方法: + + ``` + sh prepare_maven.sh + sh prepare_demo.sh + ``` + + - 问题7:zip工具找不到 + + 通过问题6中的两个sh脚本编译生成com.huawei.demo-0.0.1-SNAPSHOT.jar包时,zip工具必需处于环境变量中,可下载zip、unzip工具[zip/unzip下载链接](http://www.stahlworks.com/dev/index.php?tool=zipunzip)并添加至环境变量中即可。 + +- 执行测试用例 + + - pgjdbc/pom.xml增加junit依赖 + + ``` + + junit + junit + 4.12 + test + + ``` + + - 增加配置文件build.local.properties + + ![image-20211218155542929](../image/jdbc/png7.png) + + 在根目录下增加配置文件build.local.properties,并配置数据库相关的信息(ip,port,user,password),即可在本地执行测试用例 + diff --git a/content/zh/post/douxin/database-sync_for_openGauss.md b/content/zh/post/douxin/database-sync_for_openGauss.md new file mode 100644 index 0000000000000000000000000000000000000000..09b7725c1d252e590befde22bdb5474cbf449962 --- /dev/null +++ b/content/zh/post/douxin/database-sync_for_openGauss.md @@ -0,0 +1,183 @@ ++++ +title = "database-sync适配openGauss使用指导书" +date = "2021-03-29" +tags = ["openGauss社区开发入门"] +archives = "2021-03" +author = "douxin" +summary = "openGauss社区开发入门" +img="/zh/post/douxin/title/img1.png" +times = "17:30" + ++++ + +## 一、database-sync简介 + +database-sync作为一种开源辅助工具,用于数据库之间的表同步,更确切的说法是复制,可以从一个数据库复制表到另一个数据库 + +- 该工具支持的功能如下: + (1)自动同步表字段,如:源表扩字段,目标表自动扩字段 + + (2)支持增量或全量同步数据 + + (3)支持指定字段同步,只同步关心的字段 + +- 支持的关系型数据库: + + mysql、db2、postgresql、oracle、sqlserver + +- 源代码下载: + + database-sync代码托管在github上 + + 源代码下载:[下载](https://gitee.com/somenzz/database-sync) + +## 二、database-sync适配openGauss + +目标:database-sync适配openGauss数据库,可支持openGauss与其他数据库之间实现数据同步 + +源代码下载:适配openGauss的源代码已上传到个人仓库下的“douxin_master”分支 + +下载链接:[下载](https://gitee.com/ywzq1161327784/database-sync) + +## 三、程序使用说明 + +前提:已安装Java JDK 1.8或更高版本,安装maven + +### Step 1:下载代码并打包 + +git clone下载 douxin_master分支的代码,并进行打包 + +``` +git clone https://gitee.com/ywzq1161327784/database-sync.git +cd database-sync/ +git checkout origin/douxin_master +mvn clean package -Dmaven.test.skip=true +``` + +打包后,target目录内容如下: + +![1616832540491](../image/database-sync/image1.png) + +### Step 2:构建可执行工程目录 + +将mylib目录、target下的lib目录和database-sync-1.3.jar包复制到同一目录下,并创建一个config目录,在其下创建config.json文件写入配置信息,目录结构如下所示: + +![1616845721141](../image/database-sync/image2.png) + +### Step 3:编写配置文件 + +数据库连接信息以json格式配置在config/config.json文件中,每个节点包含type、driver、url、user、password、tbspace_ddl、encoding信息, + +其中type指定数据库类型,取值为db2、postgres、mysql、oracle、sqlserver、openGauss,特别地,对于openGauss数据库需指定“type”:“openGauss”; + +tbspace_ddl指定表空间语句,encoding指定编码方式。 + +配置文件示例: + +``` +{ + "database1":{ + "type":"openGauss", + "driver":"org.postgresql.Driver", + "url":"jdbc:postgresql://ip:port/postgres", + "user": "****", + "password":"******", + "tbspace_ddl": "", + "encoding":"utf-8" + }, + + "database2":{ + "type":"postgres", + "driver":"org.postgresql.Driver", + "url":"jdbc:postgresql://ip:5432/postgres", + "user": "****", + "password":"******", + "tbspace_ddl":"WITH (compression=no, orientation=orc, version=0.12)\ntablespace hdfs\n", + "encoding":"utf-8" + }, + + "database3":{ + "type":"oracle", + "driver":"oracle.jdbc.driver.OracleDriver", + "url":"jdbc:oracle:thin:@localhost:1521:orcl", + "user": "****", + "password":"****", + "tbspace_ddl": "", + "encoding":"utf-8" + }, + + "database4":{ + "type":"db2", + "driver":"com.ibm.db2.jcc.DB2Driver", + "url":"jdbc:db2://ip:port/wbsj", + "user": "****", + "password":"****", + "tbspace_ddl": "", + "encoding":"utf-8" + }, + + "database5":{ + "type":"mysql", + "driver":"com.mysql.cj.jdbc.Driver", + "url":"jdbc:mysql://localhost:3306/aarondb", + "user": "****", + "password":"****", + "encoding":"utf-8" + }, + + "buffer-rows": 100000 +} +``` + +### Step 4:查看程序帮助信息 + +在步骤2中的目录下,执行`java -jar database-sync-1.3.jar -h`命令可查看程序运行的帮助信息 + +![1616847412381](../image/database-sync/image3.png) + +其中[]表示可选参数,包括如下几种: + +--version或者-v表示打印版本信息并退出; + +--help或者-h表示打印帮助信息并退出; + +--sync-ddl或者-sd 表示自动同步表结果,默认情况下不会自动同步表结构,因此不指定该参数时,同步表时下述的目标表{toDB}需存在; + +--from_fields=col1,col2或者-ff=col3,col4表示指定原表的字段序列; + +--to_fields=col1,col2或者-tf=col3,col4表示指定目标表的字段序列; + +--no-feture或者-nf表示不使用特定数据库的特性; + +[whereClause]表示where条件,用于增量更新。 + +**{}表示必选参数**,共有6个,分别说明如下: + +{fromDB}表示原表所在的数据库信息,在Step3所示的配置文件中,可以取值为database1,database2等; + +{fromSchema}表示原表的模式名; + +{fromTable}表示原表的表名; + +{toDB}表示目标表所在的数据库信息; + +{toSchema}表示目标表的模式名; + +{toTable}表示目标表的表名。 + +### Step 5:跨数据库间实现表数据同步 + +eg 1:从postgres到openGauss实现表同步 + +`java -jar database-sync-1.3.jar -sd postgres public test_0322 opengauss1 public test_0322_1` + +![1616848683463](../image/database-sync/image4.png) + +eg 2:从openGauss到postgres实现表同步 + +`java -jar database-sync-1.3.jar -sd opengauss1 public test_0322_1 postgres public test_0322` + +![1616848847482](../image/database-sync/image5.png) + +至此,可实现openGauss数据库与其他数据库间的表数据同步 + diff --git a/content/zh/post/douxin/haproxy_for_opengauss.md b/content/zh/post/douxin/haproxy_for_opengauss.md new file mode 100644 index 0000000000000000000000000000000000000000..edb2f42521cb0d3d532aa2fd13becb6ce776a08e --- /dev/null +++ b/content/zh/post/douxin/haproxy_for_opengauss.md @@ -0,0 +1,156 @@ ++++ +title = "HAProxy适配openGauss使用指导书" +date = "2021-08-31" +tags = ["openGauss分布式解决方案"] +archives = "2021-08" +author = "douxin" +summary = "openGauss社区开发入门" +img="/zh/post/douxin/title/img1.png" +times = "17:30" + ++++ + +## 一、HAProxy简介 + +- HAProxy是一个开源的项目,其代码托管在Github上,代码链接如下:[HAProxy代码链接](https://github.com/haproxy/haproxy)。 +- HAProxy提供高可用性、负载均衡以及基于TCP和HTTP应用的代理,支持虚拟主机,它是免费、快速并且可靠的一种解决方案。 +- HAProxy实现了一种事件驱动, 单一进程模型,此模型支持非常大的并发连接数。 + +## 二、HAProxy实现openGauss集群的读写分离和负载均衡 + +- HAProxy实现openGauss集群的读写分离和负载均衡,前提条件需由Patroni管理openGauss数据库集群,关键点在于配置文件的配置。 + +- HAProxy 配置中分成五部分内容,分别如下: + + ``` + - global:设置全局配置参数,属于进程的配置,通常是和操作系统相关。 + + - defaults:配置默认参数,这些参数可以被用到frontend,backend,listen组件; + + - frontend:接收请求的前端虚拟节点,frontend可以更加规则直接指定具体使用后端的backend; + + - backend:后端服务集群的配置,是真实服务器,一个backend对应一个或者多个实体服务器; + + - listen :frontend和backend的组合体。 + ``` + +- 在HAProxy配置文件中,[HAProxy完整配置文件链接](../image/haproxy/haproxy.cfg),我们定义了两个listen模块,名称分别为opengauss和opengauss_balance,对应集群主机的写操作和备机的读操作及负载均衡。在listen模块中,使用server关键字设置后端服务器,即设置Patroni管理的openGauss集群中各个数据库节点的ip和端口号,即可将数据库节点的信息加入到HAProxy的管理中。 + +### 2.1 主机的写操作配置 + +``` +listen opengauss # 用于主机 + bind *:5000 #开放的端口之一,用于连接主机 + option httpchk + # 开启对后端服务器的健康检测,接受健康监测[check] + http-check expect status 200 + default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions + # 监测的间隔时间[inter 3s], 监测失败多少次后被认为后端服务器是不可用的[fall 3],监测正常多少次后被认为后端服务器是可用的[rise 2],当标记为down时,关闭HAProxy到后台服务器的连接[on-marked-down shutdown-sessions] + server opengauss_ip1_port1 ip1:port1 maxconn 100 check port 8008 + server opengauss_ip2_port2 ip2:port2 maxconn 100 check port 8008 + server opengauss_ip3_port3 ip3:port3 maxconn 100 check port 8008 + server opengauss_ip4_port4 ip4:port4 maxconn 100 check port 8008 + # 使用server关键字设置后端服务器,为后端服务器所设置的内部名称[opengauss_ip1_port1], 该名称将会呈现在日志或警报中,后端服务器的IP地址,支持端口映射[ip1:port1] +``` + +**原理分析:** + +HAProxy配置中调用了健康监测REST API端点,通过Patroni获取集群中的主机备机信息。 + +Patroni有一个丰富的REST API(Representational State Transfer,表现层状态转化),所谓REST API,其是前后端分离的最佳实践,是开发的一套标准或者是一套规范,其特点总结如下: + +``` +(1) 每一个URI代表一种资源; + +(2) 客户端和服务器之间,传递这种资源的表现层; + +(3) 客户端通过四个HTTP动词,对服务器端资源进行操作,实现“表现层状态转化”。 +``` + +在HTTP协议中,四个表示操作方式的动词为:GET、POST、PUT、DELETE,它们分别对应四种基本的操作:GET用来获取资源,POST用来新建资源(也可以用于更新资源),PUT用来更新资源,DELETE用来删除资源。 + +Patroni中的REST API,有以下几种使用场景:参考链接:[Patroni REST API](https://patroni.readthedocs.io/en/latest/rest_api.html) + +``` +(1) 由Patroni自身使用用以leader竞选; + +(2) 由patronictl工具使用用以执行 failovers、switchovers、reinitialize、restarts、reloads操作; + +(3) 由HAProxy或者其他负载均衡器进行HTTP健康监测,或者监控。 +``` + +本文中HAProxy即利用Patroni中的REST API进行健康监测,进而识别集群中的主机,备机,以及各个节点的健康状态。 + +对于健康监测中的GET请求,Patroni返回一个包含节点状态、HTTP状态码的JSON文档。如果不需要复杂的JSON文档,只保留一些关键信息,可以用OPTIONS代替GET。 + +对于下列的请求:当Patroni节点拥有leader锁,且作为primary节点running时,Patroni REST API将返回HTTP状态码200: + +``` +(1) GET / + +(2) GET /master + +(3) GET /primary + +(4) GET /read-write +``` + +上述配置中,`option httpchk `相当于调用了`GET / `请求,`http-check expect status 200`相当于过滤出健康监测返回的状态码应为200,对于所配置的数据库,当为主机时,其状态码为200,于是上面的配置即选出了数据库集群中的主机,用HAProxy的ip和5000端口号即可代理集群中的主机。在openGauss集群中,通过gsql命令即可连接到集群的主机 + +``` +gsql -d postgres -h HAProxy_ip -p 5000 -U user -W password +``` + +### 2.2 备机的读操作及负载均衡配置 + +``` +listen opengauss_balance #用于备机 + bind *:5001 #开放的端口之一,用于连接备机 + mode tcp + option tcplog + balance roundrobin #balance定义负载均衡算法,roundrobin表示基于权重进行轮询,在服务器的处理时间保持均匀分布时,这是最平衡、最公平的算法。此算法是动态的,这表示某权重可以在运行时进行调整 + option httpchk OPTIONS /replica + http-check expect status 200 + default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions + server opengauss_ip1_port1 ip1:port1 maxconn 100 check port 8008 inter 5000 rise 2 fall 2 + server opengauss_ip2_port2 ip2:port2 maxconn 100 check port 8008 inter 5000 rise 2 fall 2 + server opengauss_ip3_port3 ip3:port3 maxconn 100 check port 8008 inter 5000 rise 2 fall 2 + server opengauss_ip4_port4 ip4:port4 maxconn 100 check port 8008 inter 5000 rise 2 fall 2 +``` + +**原理分析:** + +对于`GET /replica`请求,当Patroni节点为running状态,角色为replica,未设置noloadbalance标签时,http返回状态码为200。 + +上述配置中,`option httpchk OPTIONS /replica`即调用了`OPTIONS /replica`请求,并以OPTIONS代替GET简化返回的信息,`http-check expect status 200`相当于过滤出健康监测返回的状态码应为200,因此当所配置的数据库为集群中的备机时,其状态码为200,于是上面的配置即选出了数据库集群中的备机,同时配置`balance roundrobin`,即定义负载均衡算法,对于读请求,将轮询发送于各个运行中的备机,因此,上述的配置可以用HAProxy的ip和5001端口号代理集群中的备机,且实现负载均衡。 + +在openGauss集群中,通过gsql命令即可连接到集群的备机 + +``` +gsql -d postgres -h HAProxy_ip -p 5001 -U user -W password +``` + +### 2.3 监控界面描述 + +除此之外,我们还配置了一个HAProxy的监控界面,通过访问该界面可以查看集群中各个节点的状态。 + +``` +listen stats #定义一个名为stats的部分 + mode http + # 定义为HTTP模式 + bind *:7000 #开放的端口之一,用于监控 + # 定义监听的套接字 + stats enable + # stats是HAProxy的一个统计页面的套接字 + stats uri / + # 设置统计页面的uri为/ +``` + +上述配置中,访问http://ip:7000/即可查看监控界面,其中ip为部署HAProxy机器的ip,页面信息如下图所示: + +![image-20210624190839625](../image/haproxy/image1.png) + +上图中,对应一主三备集群,第一个模块openGauss对应写操作,绿色的一栏表示集群中的主机,第二个模块opengauss_balance对应读操作,绿色的栏表示集群中的备机。 + +至此,可通过HAProxy实现Patroni管理的openGauss集群的读写分离和负载均衡。 + diff --git a/content/zh/post/douxin/image/database-sync/image1.png b/content/zh/post/douxin/image/database-sync/image1.png new file mode 100644 index 0000000000000000000000000000000000000000..d1f4a28c6091d05829457f0fa8a15dd2bb71a589 Binary files /dev/null and b/content/zh/post/douxin/image/database-sync/image1.png differ diff --git a/content/zh/post/douxin/image/database-sync/image2.png b/content/zh/post/douxin/image/database-sync/image2.png new file mode 100644 index 0000000000000000000000000000000000000000..e85528341885b975d47b2efec40fd5d2ba4bbb44 Binary files /dev/null and b/content/zh/post/douxin/image/database-sync/image2.png differ diff --git a/content/zh/post/douxin/image/database-sync/image3.png b/content/zh/post/douxin/image/database-sync/image3.png new file mode 100644 index 0000000000000000000000000000000000000000..56b1c5b18f9f2718cdd7e5bfc892995755517371 Binary files /dev/null and b/content/zh/post/douxin/image/database-sync/image3.png differ diff --git a/content/zh/post/douxin/image/database-sync/image4.png b/content/zh/post/douxin/image/database-sync/image4.png new file mode 100644 index 0000000000000000000000000000000000000000..e7e8bce68cc9089b0f9c23797d8a41b7289bccdb Binary files /dev/null and b/content/zh/post/douxin/image/database-sync/image4.png differ diff --git a/content/zh/post/douxin/image/database-sync/image5.png b/content/zh/post/douxin/image/database-sync/image5.png new file mode 100644 index 0000000000000000000000000000000000000000..7335e5691fe236ab2eef1126f0b72a87726b0717 Binary files /dev/null and b/content/zh/post/douxin/image/database-sync/image5.png differ diff --git a/content/zh/post/douxin/image/haproxy/haproxy.cfg b/content/zh/post/douxin/image/haproxy/haproxy.cfg new file mode 100644 index 0000000000000000000000000000000000000000..0ccf2f2764a194d079467379b4b60d49b48b43d4 --- /dev/null +++ b/content/zh/post/douxin/image/haproxy/haproxy.cfg @@ -0,0 +1,51 @@ + +global + maxconn 100 + +defaults + log global + mode tcp + retries 2 + timeout client 30m + timeout connect 4s + timeout server 30m + timeout check 5s + +listen stats + mode http + bind *:7000 + stats enable + stats uri / + +listen opengauss + bind *:5000 + option httpchk + http-check expect status 200 + default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions + # server opengauss_ip0_port0 ip0:port0 maxconn 100 check port 8008 + # server opengauss_ip1_port1 ip1:port1 maxconn 100 check port 8008 + # server opengauss_ip2_port2 ip2:port2 maxconn 100 check port 8008 + # server opengauss_ip3_port3 ip3:port3 maxconn 100 check port 8008 + # server opengauss_ip4_port4 ip4:port4 maxconn 100 check port 8008 + # server opengauss_ip5_port5 ip5:port5 maxconn 100 check port 8008 + # server opengauss_ip6_port6 ip6:port6 maxconn 100 check port 8008 + # server opengauss_ip7_port7 ip7:port7 maxconn 100 check port 8008 + # server opengauss_ip8_port8 ip8:port8 maxconn 100 check port 8008 + +listen opengauss_balance + bind *:5001 + mode tcp + option tcplog + balance roundrobin + option httpchk OPTIONS /replica + http-check expect status 200 + default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions + # server opengauss_ip0_port0 ip0:port0 maxconn 100 check port 8008 inter 5000 rise 2 fall 2 + # server opengauss_ip1_port1 ip1:port1 maxconn 100 check port 8008 inter 5000 rise 2 fall 2 + # server opengauss_ip2_port2 ip2:port2 maxconn 100 check port 8008 inter 5000 rise 2 fall 2 + # server opengauss_ip3_port3 ip3:port3 maxconn 100 check port 8008 inter 5000 rise 2 fall 2 + # server opengauss_ip4_port4 ip4:port4 maxconn 100 check port 8008 inter 5000 rise 2 fall 2 + # server opengauss_ip5_port5 ip5:port5 maxconn 100 check port 8008 inter 5000 rise 2 fall 2 + # server opengauss_ip6_port6 ip6:port6 maxconn 100 check port 8008 inter 5000 rise 2 fall 2 + # server opengauss_ip7_port7 ip7:port7 maxconn 100 check port 8008 inter 5000 rise 2 fall 2 + # server opengauss_ip8_port8 ip8:port8 maxconn 100 check port 8008 inter 5000 rise 2 fall 2 diff --git a/content/zh/post/douxin/image/haproxy/image1.png b/content/zh/post/douxin/image/haproxy/image1.png new file mode 100644 index 0000000000000000000000000000000000000000..7475ad54420fae4fc26f178502b1cf4868d31919 Binary files /dev/null and b/content/zh/post/douxin/image/haproxy/image1.png differ diff --git a/content/zh/post/douxin/image/jdbc/png0.png b/content/zh/post/douxin/image/jdbc/png0.png new file mode 100644 index 0000000000000000000000000000000000000000..e454b3e4cece71da2cdb811db3accf9de120eb27 Binary files /dev/null and b/content/zh/post/douxin/image/jdbc/png0.png differ diff --git a/content/zh/post/douxin/image/jdbc/png1.png b/content/zh/post/douxin/image/jdbc/png1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb7729397967b994f6f7a4677498aad6f44f2e8f Binary files /dev/null and b/content/zh/post/douxin/image/jdbc/png1.png differ diff --git a/content/zh/post/douxin/image/jdbc/png2.png b/content/zh/post/douxin/image/jdbc/png2.png new file mode 100644 index 0000000000000000000000000000000000000000..955b4af795e307008f04b368958aea711dd469d3 Binary files /dev/null and b/content/zh/post/douxin/image/jdbc/png2.png differ diff --git a/content/zh/post/douxin/image/jdbc/png3.png b/content/zh/post/douxin/image/jdbc/png3.png new file mode 100644 index 0000000000000000000000000000000000000000..8cb55eaee98b1d79368b9b046337bf62ac1f2983 Binary files /dev/null and b/content/zh/post/douxin/image/jdbc/png3.png differ diff --git a/content/zh/post/douxin/image/jdbc/png4.png b/content/zh/post/douxin/image/jdbc/png4.png new file mode 100644 index 0000000000000000000000000000000000000000..935697f0cbd3153c555a883ca1cb6e0b05dc4060 Binary files /dev/null and b/content/zh/post/douxin/image/jdbc/png4.png differ diff --git a/content/zh/post/douxin/image/jdbc/png5.png b/content/zh/post/douxin/image/jdbc/png5.png new file mode 100644 index 0000000000000000000000000000000000000000..46e215bd0bfb0f6b34f0784509d6d94b713ad8b9 Binary files /dev/null and b/content/zh/post/douxin/image/jdbc/png5.png differ diff --git a/content/zh/post/douxin/image/jdbc/png6.png b/content/zh/post/douxin/image/jdbc/png6.png new file mode 100644 index 0000000000000000000000000000000000000000..192d9248d95afcab4ffc0875653a81824f18c4e0 Binary files /dev/null and b/content/zh/post/douxin/image/jdbc/png6.png differ diff --git a/content/zh/post/douxin/image/jdbc/png7.png b/content/zh/post/douxin/image/jdbc/png7.png new file mode 100644 index 0000000000000000000000000000000000000000..cda608e09bb5f98249dfcb724d0d41002ab371de Binary files /dev/null and b/content/zh/post/douxin/image/jdbc/png7.png differ diff --git a/content/zh/post/douxin/image/sm3/image1.png b/content/zh/post/douxin/image/sm3/image1.png new file mode 100644 index 0000000000000000000000000000000000000000..7191ba77b86241a7388983442ba811a84a12fc2b Binary files /dev/null and b/content/zh/post/douxin/image/sm3/image1.png differ diff --git a/content/zh/post/douxin/image/sm3/image10.png b/content/zh/post/douxin/image/sm3/image10.png new file mode 100644 index 0000000000000000000000000000000000000000..c6bcab86885267de1ba5c1842191dfda57d64f5c Binary files /dev/null and b/content/zh/post/douxin/image/sm3/image10.png differ diff --git a/content/zh/post/douxin/image/sm3/image11.png b/content/zh/post/douxin/image/sm3/image11.png new file mode 100644 index 0000000000000000000000000000000000000000..7de7abad590c2246b95d3f559a7d0bfa1f291692 Binary files /dev/null and b/content/zh/post/douxin/image/sm3/image11.png differ diff --git a/content/zh/post/douxin/image/sm3/image12.png b/content/zh/post/douxin/image/sm3/image12.png new file mode 100644 index 0000000000000000000000000000000000000000..54dc20ba7b906ff980fd8c78029fc3475e0e33f6 Binary files /dev/null and b/content/zh/post/douxin/image/sm3/image12.png differ diff --git a/content/zh/post/douxin/image/sm3/image13.png b/content/zh/post/douxin/image/sm3/image13.png new file mode 100644 index 0000000000000000000000000000000000000000..23105e8c997e1127c35cf871d1ac0d235a95aed1 Binary files /dev/null and b/content/zh/post/douxin/image/sm3/image13.png differ diff --git a/content/zh/post/douxin/image/sm3/image2.png b/content/zh/post/douxin/image/sm3/image2.png new file mode 100644 index 0000000000000000000000000000000000000000..50f6b660a383f2190e3f0e0c1424825ab2d15cd5 Binary files /dev/null and b/content/zh/post/douxin/image/sm3/image2.png differ diff --git a/content/zh/post/douxin/image/sm3/image3.png b/content/zh/post/douxin/image/sm3/image3.png new file mode 100644 index 0000000000000000000000000000000000000000..1decdffa2c685fbae2f39fbc25b2b664524f46aa Binary files /dev/null and b/content/zh/post/douxin/image/sm3/image3.png differ diff --git a/content/zh/post/douxin/image/sm3/image4.png b/content/zh/post/douxin/image/sm3/image4.png new file mode 100644 index 0000000000000000000000000000000000000000..df0575d8469a6813bfa359d5bf596a80933ccba5 Binary files /dev/null and b/content/zh/post/douxin/image/sm3/image4.png differ diff --git a/content/zh/post/douxin/image/sm3/image5.png b/content/zh/post/douxin/image/sm3/image5.png new file mode 100644 index 0000000000000000000000000000000000000000..550d3e815c93582cd76716d6e5f87ecdf5ca5cd7 Binary files /dev/null and b/content/zh/post/douxin/image/sm3/image5.png differ diff --git a/content/zh/post/douxin/image/sm3/image6.png b/content/zh/post/douxin/image/sm3/image6.png new file mode 100644 index 0000000000000000000000000000000000000000..ffb003500fa31856aa2a32afb4fa34a11e51838b Binary files /dev/null and b/content/zh/post/douxin/image/sm3/image6.png differ diff --git a/content/zh/post/douxin/sm3_for_openGauss.md b/content/zh/post/douxin/sm3_for_openGauss.md new file mode 100644 index 0000000000000000000000000000000000000000..1cffaef95dee7b42889db838b07947bc97c624e3 --- /dev/null +++ b/content/zh/post/douxin/sm3_for_openGauss.md @@ -0,0 +1,120 @@ ++++ +title = "openGauss支持国密SM3和SM4算法" +date = "2021-09-26" +tags = ["国密算法"] +archives = "2021-09" +author = "douxin" +summary = "openGauss社区开发入门" +img="/zh/post/douxin/title/img1.png" +times = "17:30" + ++++ + +## 1. 国密算法介绍 + +国密即国家密码局认定的国产密码算法,主要有SM1,SM2,SM3,SM4。密钥长度和分组长度均为128位。针对银行客户对数据库安全能力的诉求以及提高产品安全竞争力的要求,进行数据库企业级安全能力增强,openGauss 自2.0.0版本支持了国密算法,主要包括用户认证支持国密SM3算法[sm3算法](http://www.gmbz.org.cn/main/viewfile/20180108023812835219.html),支持SM4国密算法加解密函数[sm4算法](http://www.gmbz.org.cn/main/viewfile/20180108015408199368.html)。 + +## 2. 国密SM3算法——用户认证 + +## 2.1 使用方法 + +openGauss现支持四种用户认证方式,其通过postgresql.conf文件中的参数password_encryption_type确定,认证方式与该参数的对应关系如下表所示: + +| 认证方式 | 参数 | +| ---------- | -------------------------- | +| md5 | password_encryption_type=0 | +| sha256+md5 | password_encryption_type=1 | +| sha256 | password_encryption_type=2 | +| sm3 | password_encryption_type=3 | + +其中SM3认证算法目前只支持gsql、 JDBC、 ODBC三种连接方式。 + +创建SM3认证方式的用户的步骤: + +(1)在postgresql.conf文件中配置password_encryption_type=3,并重启数据库使该参数生效 + +![image-20210922104810991](../image/sm3/image1.png) + +(2)创建用户 + +如下示例中,创建了test用户,通过系统表pg_authid的rolpassword字段可以查看用户创建时对应的加密方式,图示即对应sm3加密 + +![image-20210922102744761](../image/sm3/image2.png) + +(3)在pg_hba.conf文件中配置认证方式为sm3 + +![image-20210922103113193](../image/sm3/image3.png) + +此时test用户远程登录方可认证通过 + +![image-20210922104158312](../image/sm3/image4.png) + +对于创建其他认证方式的用户,过程与SM3类似,此处不再赘述,需注意加密方式与认证方式对应即可。 + +## 2.2 实现原理 + +openGauss使用RFC5802口令认证方案 + +- 用户秘钥生成 + + RFC5802秘钥衍生过程如下图所示: + + ![image-20210922105633941](../image/sm3/image5.png) + + ``` + SaltedPassword := PBKDF2 (password, salt, i) + ClientKey := HMAC(SaltedPassword, "Client Key") + StoredKey := Hash(ClientKey) + ``` + + 服务器端存的是StoredKey和ServerKey: + + 1)StoredKey是用来验证Client客户身份的 + + 服务端认证客户端通过计算ClientSignature与客户端发来的ClientProof进行异或运算,从而恢复得到ClientKey,然后将其进行hash运算,将得到的值与StoredKey进行对比。如果相等,证明客户端验证通过。 + + 2)ServerKey是用来向客户端表明自己身份的 + + 类似的,客户端认证服务端,通过计算ServerSignature与服务端发来的值进行比较,如果相等,则完成对服务端的认证。 + + 3)在认证过程中,服务端可以计算出来ClientKey,验证完后直接丢弃不必存储。 + + 要做到合法的登录,必须知道Password、SaltedPassword或者ClientKey。如果StoryKey和ServerKey泄露,无法做到合法登录。 + +- 认证流程 + + 标准RFC5802口令认证流程如下图所示: + + ![image-20210922110211249](../image/sm3/image6.png) + + 1、客户端发送username给服务端。 + + 2、服务端返回给客户端AuthMessage 和计算出来的ServerSignature。 + + 3、客户端收到信息后,首先利用认证信息AuthMessage中的salt和iteration-count(迭代次数),从password计算得到SaltedPassword,然后计算得到下层所有的key。计算HMAC(ServerKey, AuthMessage) == ServerSignature是否相等,如果相等,则client完成对服务端的认证。 + + 4、客户端将计算得到的ClientProof发送给服务端。 + + 5、服务端使用其保存的StoredKey和AuthMessage计算HMAC,在和接收的client发送的ClientProof进行异或,得到ClientKey,在对ClientKey进行哈希,和其保存的StoredKey进行比较是否一致。如果一致,则客户端的认证通过。 + + 服务器端收到客户端请求后,根据pg_hba.conf 配置的认证方式,与客户端进行相应的认证交互。 + +## 3. 国密SM4算法——数据加解密 + +SM4国密算法可用于对表中的某一列数据进行加解密。参考gs_encrypt_aes128加密函数、gs_decrypt_aes128解密函数,新增的加密函数gs_encrypt,解密函数gs_decrypt支持aes128、sm4的加解密,可以兼容aes128。其中SM4算法调用openssl中的EVP_sm4_cbc()接口。 + +gs_encrypt_aes128和gs_decrypt_aes128函数示意: + +![image-20210922144545491](../image/sm3/image10.png) + +gs_encrypt和gs_decrypt函数示意: + +![image-20210922144124180](../image/sm3/image11.png) + +利用SM4算法对表中数据进行加解密示意图: + +![1632817246028](../image/sm3/image12.png) + +![1632817344288](../image/sm3/image13.png) + +至此,openGauss支持使用国密SM3算法进行用户认证,SM4算法进行数据加解密。 \ No newline at end of file diff --git a/content/zh/post/douxin/title/img1.png b/content/zh/post/douxin/title/img1.png new file mode 100644 index 0000000000000000000000000000000000000000..65e2d4c4751f069c64357704715e2ba99beb511a Binary files /dev/null and b/content/zh/post/douxin/title/img1.png differ diff --git "a/content/zh/post/duomibabi/Oracle\346\225\260\346\215\256\350\241\250\346\220\254\350\277\201\345\210\260openGauss.md" "b/content/zh/post/duomibabi/Oracle\346\225\260\346\215\256\350\241\250\346\220\254\350\277\201\345\210\260openGauss.md" new file mode 100644 index 0000000000000000000000000000000000000000..514676751c02ab309aae163b040722f97734cffb --- /dev/null +++ "b/content/zh/post/duomibabi/Oracle\346\225\260\346\215\256\350\241\250\346\220\254\350\277\201\345\210\260openGauss.md" @@ -0,0 +1,368 @@ ++++ + +title = "Oracle数据表搬迁到openGauss" + +date = "2021-05-10" + +tags = ["openGauss数据迁移"] + +archives = "2021-05" + +author = "多米爸比" + +summary = "Oracle数据表搬迁到openGauss" + +img = "/zh/post/duomibabi/title/img31.png" + +times = "12:30" + ++++ + +# Oracle数据表搬迁到openGauss + +## 搬迁方法 + +Oracle数据表搬迁到openGauss数据库比较容易想到的两个工具是oracle\_fdw及Ora2Pg。 + +### **oracle\_fdw** + +oracle\_fdw是嵌入在目标端数据库里的一个扩展插件,允许在目标端数据库里访问其他异构数据库的表,openGauss数据库目前也支持这一插件。 + +oracle\_fdw在openGauss数据库的使用可以参考我写的这篇文章:[<\>](https://www.modb.pro/db/37650) + +如下所示,目标端建立了到源端数据表的映射,可以在目标端用create table as select方式进行拷贝。 + +``` +CREATE FOREIGN TABLE public.f_oracle_test ( + id int, + info character varying +) +SERVER server_ora +OPTIONS ( + schema 'scott', + "table" 'AA' +); +``` + +### **Ora2Pg** + +Ora2Pg是一个开源的迁移转换工具,它可以连接Oracle数据库并进行扫描,自动提取结构或数据,然后生成可加载到PostgreSQL数据库中的SQL脚本。 + +本文介绍使用Ora2Pg工具先搬迁数据表及数据到PostgreSQL,然后再从PostgreSQL搬迁到openGauss。 + +Ora2Pg工具的安装可以参考附录一,搬迁前常见的操作命令可以参考附录二。 + +下面演示具体操作步骤: + +**1.初始一个搬迁目录** + +在postgres用户下操作 + +``` +ora2pg --init_project project20210507 \ +--project_base /home/postgres/data +``` + +该目录用于分类存放源端不同的对象类型,如table、view、package、function等,以及Oracle端原生的SQL脚本及Ora2Pg转换后的脚本等。 + +**2.定制Ora2Pg配置文件** + +在postgres用户下操作vi project20210507/config/ora2pg.conf + +``` +NO_HEADER +ORACLE_DSN dbi:Oracle:host=x.x.x.x;port=1521;sid=xxx +ORACLE_USER xxx +ORACLE_PWD xxx +PG_VERSION 12 +PG_DSN dbi:Pg:host=127.0.0.1;port=6000;dbname=xxx +PG_USER xxx +PG_PWD xxx +SCHEMA xxx +ALLOW T_.* UL_.* +EXCLUDE TMP_.* .*_BACKUP +DATA_TYPE VARCHAR2:character varying +SKIP fkeys,indexes +COMPILE_SCHEMA 0 +DISABLE_COMMENT 1 +DROP_FKEY 1 +``` + +注意参数值大小写问题,上面配置Oracle及PG连接参数值可以为小写,SCHEMA配置Oracle端的模式名称需要配置为大写。ALLOW与EXCLUDE一般只使用一种,不要同时配置。上面ALLOW配置白名单,只搬迁以“T\_”开头和“UL\_”开头的表,EXCLUDE配置黑名单,不搬迁以“TMP\_”开头及以“\_BACKUP”结尾的表。 + +**3.使用Ora2Pg导出oracle端指定列表的表结构文件** + +如果我们要完整搬迁所有的对象,包括table、view、package、function等,可以使用脚本文件export\_schema.sh进行导出,并对导出的脚本进行导入测试修正。 + +多数情况下我们先需要按对象类型逐一手工操作一遍,手工配置处理不兼容性,多次处理完成后最后使用脚本一次性把所有对象导出。 + +这里演示只导出部分表,我们使用allow参数直接指定,参数值之间使用逗号分割。在postgres用户下操作。 + +``` +ora2pg --conf config/ora2pg.conf \ +--basedir ./schema/tables \ +--type TABLE \ +--allow "T_TAB1,T_TAB2,UL_TAB1,UL_TAB2..." \ +--plsql \ +--out table.sql \ +--jobs 10 --copies 10 --parallel 10 +``` + +最后的三个并行参数代表ora2pg并行处理表的任务数,每个任务从Oracle端传输数据的并行数及每个任务ora2pg传输到PostgreSQL的并行数,可根据实际环境进行修改。 + +**4.使用psql工具导入表结构文件到PG** + +下面是导入到PostgreSQL的操作,在postgres用户下操作 + +``` +/opt/pg12/bin/psql -Umogdb mogdb -f ./schema/tables/table.sql +``` + +上面的操作需要先提前创建用户及数据库,参见如下步骤 + +``` +/opt/pg12/bin/psql +create user mogdb password 'xxx'; +drop database if exists mogdb; +create database mogdb owner mogdb; +\c mogdb postgres +drop schema public; +\c mogdb mogdb +create schema mogdb authorization mogdb; +create schema pkg1 authorization mogdb; +create schema pkg2 authorization mogdb; +``` + +上面的模式pkg1和pkg2用于package包的存储。 + +**5.使用gsql工具导入表结构文件到openGauss** + +导入到openGauss的操作与上面导入到PG类似,openGauss使用gsql工具进行导入。 + +``` +su - omm +gsql -U mogdb -f ./schema/tables/table.sql +``` + +如果有分区表语法需要单独处理下,openGauss与PostgreSQL分区表的语法差异可以参考:[openGauss与PostgreSQL分区策略语法测试](https://www.modb.pro/db/49865) + +**6.使用ora2pg传输oracle表数据到PG** + +先从oracle端查询下数据条数最大的十个表,在postgres用户下操作 + +``` +ora2pg --conf config/ora2pg.conf \ +--allow "T_TAB1,T_TAB2,UL_TAB1,UL_TAB2..." \ +--type SHOW_TABLE +``` + +上一步输出结果如下: + +``` +[1] TABLE T_TAB1 (owner: XXX, 2869 rows) +[2] TABLE T_TAB2 (owner: XXX, 785412 rows) +[3] TABLE UL_TAB1 (owner: XXX, 4153778 rows) +[4] TABLE UL_TAB2 (owner: XXX, 140 rows) +... +[x] TABLE xx (owner: XXX, 125793 rows) +---------------------------------------------------------- +Total number of rows: 1873163888 + +Top 10 of tables sorted by number of rows: + [1] TABLE xx has 395364413 rows + [2] TABLE xx has 379561355 rows + [3] TABLE xx has 363704131 rows + [4] TABLE xx has 237709148 rows + [5] TABLE xx has 105294544 rows + [6] TABLE xx has 99877964 rows + [7] TABLE xx has 54815152 rows + [8] TABLE xx has 49803085 rows + [9] TABLE xx has 45016399 rows + [10] TABLE xx has 25595242 rows +``` + +我们可以排除上面最大的10个表来提升整体的搬迁效率,大表可以单独配置任务来处理。提前使用–oracle\_speed和–ora2pg\_speed参数可以用来测试速度而并不实际搬迁数据,以便预估时间。 + +下面使用COPY方式搬迁数据,并修改了每次内存中缓存的数据条数为10万。 + +``` +ora2pg --conf config/ora2pg.conf \ +--type COPY \ +--allow "T_TAB1,T_TAB2,UL_TAB1,UL_TAB2..." \ +--limit 100000 \ +--jobs 10 --copies 10 --parallel 10 +``` + +**7.使用pg\_dumpall导出文本SQL数据文件** + +在postgres用户下操作 + +``` +/opt/pg12/bin/pg_dumpall \ +--username mogdb \ +--data-only \ +--exclude-database=template0,template1,postgres \ +--file pg_mogdb.sql +``` + +**8.使用gsql导入数据文件到openGauss** + +导入前先删除下文件里的set语句,比如row\_security等参数,这些参数再openGauss里不兼容。 + +``` +su - omm +gsql -U mogdb -f pg_mogdb.sql +``` + +至此我们批量把oracle中部分表搬迁至openGauss数据,中间借助PostgreSQL数据库转储表数据。 + +如果要搬迁package包,可以按包名使用ora2pg进行批量转换,在postgres用户下操作。 + +``` +ora2pg --conf config/ora2pg.conf \ +--basedir ./schema/packages \ +--type PACKAGE \ +--allow "PKG_XX1,PKG_XX2,..." \ +--plsql \ +--out package.sql +``` + +使用ora2pg对oracle端的package包进行转换后,我们进行到转换后的包目录,然后可以直接在openGauss中进行修改调试。 + +``` +cd schema/packages/pkg_xx1 +cd schema/packages/pkg_xx2 ... +``` + +## 附录一 Ora2Pg安装 + +**安装Perl** + +``` +# yum install -y perl perl-ExtUtils-CBuilder perl-ExtUtils-MakeMaker +``` + +**安装DBI** + +``` +$ wget https://cpan.metacpan.org/authors/id/T/TI/TIMB/DBI-1.643.tar.gz +$ tar -zxvf DBI-1.643.tar.gz +$ cd DBI-1.643 +$ perl Makefile.PL +$ make && make install +``` + +**安装oracle客户端** + +``` +# yum localinstall oracle-instantclient-basic-10.2.0.5-1.x86_64.rpm +# yum localinstall oracle-instantclient-devel-10.2.0.5-1.x86_64.rpm +``` + +**安装DBD-Oracle** + +注意配置oracle客户端动态库,例如: + +``` +export LD_LIBRARY_PATH=/usr/lib/oracle/10.2.0.5/client64/lib:/usr/local/lib +$ wget https://cpan.metacpan.org/authors/id/M/MJ/MJEVANS/DBD-Oracle-1.80.tar.gz +$ tar -zxvf DBD-Oracle-1.80.tar.gz +$ cd DBD-Oracle-1.80 +$ perl Makefile.PL +$ make && make install +``` + +**安装DBD-PG** + +需要先安装并配置好PostgreSQL环境变量,如果本机没有安装PostgreSQL,请参考[源码编译安装PostgreSQL 12](https://www.modb.pro/db/13514) + +确保环境变量配置正确,例如: + +``` +export PATH=$PATH:/opt/pgsql/bin +$ wget https://cpan.metacpan.org/authors/id/T/TU/TURNSTEP/DBD-Pg-3.14.2.tar.gz +$ tar -zxvf DBD-Pg-3.14.2.tar.gz +$ cd DBD-Pg-3.14.2 +$ perl Makefile.PL +$ make && make install +``` + +**安装Ora2Pg** + +``` +$ wget https://github.com/darold/ora2pg/archive/refs/tags/v21.1.tar.gz +$ tar zxvf v21.1.tar.gz +$ cd ora2pg-21.1 +$ perl Makefile.PL +$ make # sudo make install +``` + +**检查版本** + +``` +$ cd ~ +$ ./perl5/bin/ora2pg --version +Ora2Pg v21.1 +``` + +## 附录二 Ora2Pg迁移前常见操作 + +**查看源端Oracle服务器版本** + +``` +ora2pg --conf config/ora2pg.conf \ +--type SHOW_VERSION +``` + +**查看有哪些schema模式(需要dba权限)** + +``` +ora2pg --conf config/ora2pg.conf \ +--type SHOW_SCHEMA +``` + +**查看表信息\(每个表的owner和行数以及表行top10\)** + +``` +ora2pg --conf config/ora2pg.conf \ +--type SHOW_TABLE +``` + +结果会输出表的数量及所有表的记录总数以及记录数最多的top10。 + +**查看列映射信息** + +``` +ora2pg --conf config/ora2pg.conf \ +--type SHOW_COLUMN > check_colum.txt +``` + +观察日志是否有错误警告等信息,找出对应的错误进行配置处理,例如Oracle端有关键字命中,可以在ora2pg.conf文件进行转换处理,使用配置参数REPLACE\_COLS 。 + +``` +REPLACE_COLS SCOTT.T_DEPT(isnull:isnulls) +REPLACE_COLS SCOTT.T_PERSON(using:usings) +``` + +上面SCOTT模式下T\_DEPT表的isnull我们映射到PostgreSQL改为isnulls。同理T\_PERSON表的using字段我们改为usings。 + +**生成迁移报告\(HTML\)** + +``` +ora2pg --conf config/ora2pg.conf \ +--estimate_cost --dump_as_html \ +--parallel 8 --jobs 2 \ +--type SHOW_REPORT > html_report.html +``` + +**迁移后验证** + +迁移测试,验证源端与目标端的表数据行数是否一致 + +``` +ora2pg --conf config/ora2pg.conf \ +--parallel 8 --jobs 2 \ +--count_rows \ +--type TEST > check_tables_count.txt +``` + diff --git "a/content/zh/post/duomibabi/openGauss\344\270\216PostgreSQL\345\210\206\345\214\272\347\255\226\347\225\245\350\257\255\346\263\225\346\265\213\350\257\225.md" "b/content/zh/post/duomibabi/openGauss\344\270\216PostgreSQL\345\210\206\345\214\272\347\255\226\347\225\245\350\257\255\346\263\225\346\265\213\350\257\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..f06af6204b75190626fb138d260ade1d31655940 --- /dev/null +++ "b/content/zh/post/duomibabi/openGauss\344\270\216PostgreSQL\345\210\206\345\214\272\347\255\226\347\225\245\350\257\255\346\263\225\346\265\213\350\257\225.md" @@ -0,0 +1,378 @@ ++++ + +title = "openGauss与PostgreSQL分区策略语法测试" + +date = "2021-04-19" + +tags = ["openGauss与PostgreSQL对比"] + +archives = "2021-04" + +author = "多米爸比" + +summary = "openGauss与PostgreSQL分区策略语法测试" + +img = "/zh/post/duomibabi/title/img26.png" + +times = "16:30" + ++++ + +# openGauss与PostgreSQL分区策略语法测试 + +## 父子继承表 + +目前openGauss还不支持inherits继承特性。 + +``` +omm=# CREATE TABLE tab_t2(age int) inherits(tab_t1); +ERROR: CREATE TABLE ... INHERITS is not yet supported. +``` + +PostgreSQL支持继承,版本10之前的分区表都是通过继承特性来实现,每个分区实际上都是一个独立的表。数据更新可通过触发器trigger或者规则rule来实现 + +下面演示PostgreSQL中的继承特性: + +``` +CREATE TABLE tab_t1(id int primary key,name varchar(20) not null); +CREATE TABLE tab_t2(age int) inherits(tab_t1); +``` + +对父表增加字段 + +``` +alter table tab_t1 add create_date date; +``` + +查看表结构 + +``` +postgres=# \d tab_t1 + Table "public.tab_t1" + Column | Type | Collation | Nullable | Default +-------------+-----------------------+-----------+----------+--------- + id | integer | | not null | + name | character varying(20) | | not null | + create_date | date | | | +Indexes: + "tab_t1_pkey" PRIMARY KEY, btree (id) +Number of child tables: 1 (Use \d+ to list them.) + +postgres=# \d tab_t2 + Table "public.tab_t2" + Column | Type | Collation | Nullable | Default +-------------+-----------------------+-----------+----------+--------- + id | integer | | not null | + name | character varying(20) | | not null | + age | integer | | | + create_date | date | | | +Inherits: tab_t1 +``` + +我们不通过触发器或者规则路由数据,直接插入数据 + +``` +INSERT INTO tab_t1 VALUES (1,'data 1 in tab_t1',now()); +INSERT INTO tab_t1 VALUES (2,'data 2 in tab_t1',now()); +INSERT INTO tab_t2 VALUES (3,'data 3 in tab_t2',18,now()); +INSERT INTO tab_t2 VALUES (4,'data 4 in tab_t2',20,now()); +``` + +从父表中查询数据将显示父表及子表的所有数据 + +``` +postgres=# SELECT * from tab_t1; +id | name | create_date +----+------------------+------------- + 1 | data 1 in tab_t1 | 2021-04-11 + 2 | data 2 in tab_t1 | 2021-04-11 + 3 | data 3 in tab_t2 | 2021-04-11 + 4 | data 4 in tab_t2 | 2021-04-11 +(4 rows) +``` + +通过ONLY关键字实现只对父表的查询 + +``` +postgres=# SELECT * from ONLY tab_t1; + id | name | create_date +----+------------------+------------- + 1 | data 1 in tab_t1 | 2021-04-11 + 2 | data 2 in tab_t1 | 2021-04-11 +(2 rows) +``` + +从子表中查询只显示子表中的数据 + +``` +postgres=# select * from tab_t2; + id | name | age | create_date +----+------------------+-----+------------- + 3 | data 3 in tab_t2 | 18 | 2021-04-11 + 4 | data 4 in tab_t2 | 20 | 2021-04-11 +(2 rows) +``` + +继承特性使用注意点: + +- 子表并不能完全继承父表的所有属性,比如唯一约束、主键、外键,检查约束与非空约束可以继承。 +- 修改父表的结构,子表结构同时被修改。 +- 父表不存数据时,不建议在父表上创建索引和或唯一约束,应该在每个子表上分别创建。 + +## 声明式分区:范围分区 + +将数据基于范围映射到每一个分区,这个范围是由创建分区表时指定的分区键决定的。这种分区方式较为常用,并且分区键经常采用日期。 + +PostgreSQL从版本10开始支持,范围分区声明式语法分两步: + +1.通过指定PARTITION BY子句把表创建为分区表,包括分区方法以及用作分区键的column列表。 + +``` +CREATE TABLE measurement ( + city_id int not null, + logdate date not null, + peaktemp int, + unitsales int +) PARTITION BY RANGE (logdate) +``` + +2.创建分区,每个分区的定义必须指定对应于父表的分区方法和分区键的边界。 + +``` +CREATE TABLE measurement_y2006m02 PARTITION OF measurement + FOR VALUES FROM ('2006-02-01') TO ('2006-03-01'); + +CREATE TABLE measurement_y2006m03 PARTITION OF measurement + FOR VALUES FROM ('2006-03-01') TO ('2006-04-01'); +... +``` + +openGauss范围分区声明式语法可以一步完成,范围分区从句语法有两种格式 + +- VALUES LESS THAN语法格式\(范围分区策略的分区键最多支持4列\) +- START END语法格式\(范围分区策略的分区键仅支持1列\) + +注意上面两种从句语法不能混用,START END语法格式使用gs\_dump时会转变为VALUES LESS THAN语法格式。 + +openGauss范围分区例子 + +``` +CREATE TABLE tab_part ( + id int not null, + create_date date not null +) PARTITION BY RANGE(create_date) +( +PARTITION p_20210401 VALUES LESS THAN(to_date('2021-04-01','yyyy-mm-dd')), +PARTITION p_20210402 VALUES LESS THAN(to_date('2021-04-02','yyyy-mm-dd')), +PARTITION p_max VALUES LESS THAN(MAXVALUE) +); +``` + +查看系统表可看到分区策略为“r”,range分区。 + +``` +omm=# select relname,partstrategy from pg_partition where relname='tab_part'; + relname | partstrategy +----------+-------------- + tab_part | r +(1 row) +``` + +查看分区及边界 + +``` +select relname,parttype,parentid,boundaries +from pg_partition +where parentid in(select oid from pg_class where relname='tab_part'); + relname | parttype | parentid | boundaries +------------+----------+----------+------------------------- + tab_part | r | 16412 | + p_20210401 | p | 16412 | {"2021-04-01 00:00:00"} + p_20210402 | p | 16412 | {"2021-04-02 00:00:00"} + p_max | p | 16412 | {NULL} +(4 rows) +``` + +接下来插入三条数据 + +``` +insert into tab_part values(1,'2021-03-31'); +insert into tab_part values(2,'2021-04-01'); +insert into tab_part values(3,'9999-12-31'); +``` + +查询分区,按分区名p\_20210402,也可以按分区边界值(PARTITION FOR) + +``` +omm=# select * from tab_part PARTITION (p_20210402); + id | create_date +----+--------------------- + 2 | 2021-04-01 00:00:00 +(1 row) +``` + +## 声明式分区:列表分区 + +通过显式地列出每一个分区中出现的键值来划分表。 + +与前面范围分区一样,PostgreSQL列表分区声明式语法也是两步,从版本10开始支持,openGauss只需一步完成,从版本1.1.0开始支持。 + +openGauss列表分区例子 + +``` +CREATE TABLE tab_list( + dept_no number, + part_no varchar2(20), + country varchar2(20), + dtime date, + amount number +) +PARTITION BY LIST(country)( + PARTITION europe VALUES('FRANCE', 'ITALY'), + PARTITION asia VALUES('INDIA', 'PAKISTAN'), + PARTITION americas VALUES('US', 'CANADA') +); +``` + +查看系统表可看到分区策略为“l”,list分区。 + +``` +omm=# select relname,partstrategy from pg_partition where relname='tab_list'; + relname | partstrategy +----------+-------------- + tab_list | l +(1 row) +``` + +查看分区及边界 + +``` +select relname,parttype,parentid,boundaries +from pg_partition +where parentid in(select oid from pg_class where relname='tab_list'); + relname | parttype | parentid | boundaries +----------+----------+----------+------------------ + tab_list | r | 16389 | + americas | p | 16389 | {US,CANADA} + asia | p | 16389 | {INDIA,PAKISTAN} + europe | p | 16389 | {FRANCE,ITALY} +(4 rows) +``` + +## 声明式分区:哈希分区 + +将数据通过哈希映射到每一个分区,每一个分区中存储了具有相同哈希值的记录。 + +PostgreSQL哈希分区声明式语法也是两步,从版本11开始支持,openGauss只需一步完成,从版本1.1.0开始支持。 + +openGauss哈希分区例子 + +``` +CREATE TABLE tab_hash( + dept_no number, + part_no varchar2(20), + country varchar2(20), + dtime date, + amount number +)PARTITION BY HASH(part_no)( + PARTITION p1, + PARTITION p2, + PARTITION p3 +); +``` + +查看系统表可看到分区策略为“h”,hash分区。 + +``` +omm=# select relname,partstrategy from pg_partition where relname='tab_hash'; + relname | partstrategy +----------+-------------- + tab_hash | h +(1 row) +``` + +查看分区及边界 + +``` +select relname,parttype,parentid,boundaries +from pg_partition +where parentid in(select oid from pg_class where relname='tab_hash'); + relname | parttype | parentid | boundaries +----------+----------+----------+------------ + tab_hash | r | 16405 | + p3 | p | 16405 | {2} + p2 | p | 16405 | {1} + p1 | p | 16405 | {0} +(4 rows) +``` + +## 基于范围分区的自动扩展间隔分区 + +间隔分区(Interval-Partition)是针对Range类型分区的一种功能拓展。对连续数据类型的Range分区,如果插入的新数据值与当前分区均不匹配,Interval-Partition特性可以实现自动的分区创建。分区字段必须是时间类型\(date或timestamp\)。 + +PostgreSQL目前还不支持该语法,openGauss从版本1.1.0开始支持。 + +openGauss间隔分区例子 + +``` +CREATE TABLE tab_range_interval ( + id int not null, + create_date date not null +) PARTITION BY RANGE(create_date) INTERVAL('1 month') +( +PARTITION p1 VALUES LESS THAN(to_date('2021-01-29','yyyy-mm-dd')) +); +``` + +查看系统表可看到分区策略为“i”,interval分区。 + +``` +omm=# select relname,partstrategy,interval from pg_partition where relname='tab_range_interval'; + relname | partstrategy | interval +----------+--------------+----------- + tab_part | i | {"1 month"} +(1 row) +``` + +接下来插入三条数据 + +``` +insert into tab_range_interval values(1,'2021-01-29'); +insert into tab_range_interval values(2,'2021-02-28'); +insert into tab_range_interval values(3,'2022-03-29'); +``` + +插入数据后检查是否自动创建了相应的分区 + +``` +omm=# select relname,parttype,parentid,boundaries +from pg_partition +where parentid in(select oid from pg_class where relname='tab_range_interval'); + relname | parttype | parentid | boundaries +--------------------+----------+----------+-------------- + tab_range_interval | r | 16572 | + p1 | p | 16572 | {2021-01-29} + sys_p1 | p | 16572 | {2021-02-28} + sys_p2 | p | 16572 | {2021-03-28} + sys_p3 | p | 16572 | {2022-04-28} +(5 rows) +``` + +可以看到sys\_p1,sys\_p2,sys\_p3为系统自动生成的分区,并且自动处理了月末问题。 + +注意: + +1.上面是在opengauss 1.1.0版本上测试的,从2.0.0版本开始,模板库默认字符集由SQL\_ASCII改为了UTF8,同时数据库兼容性由ORACLE改为PG,对本测试的影响是date数据类型。 + +2.目前只支持INTERVAL-RANGE,其它方式不支持。 + +3.间隔分区字段必须是时间类型\(date或timestamp\)。 + +## 总结 + +1.openGauss目前只支持声明式分区,支持范围分区、列表分区、哈希分区以及INTERVAL-RANGE的自动扩展间隔分区。PostgreSQL支持继承及声明式分区,不支持自动扩展间隔分区。 + +2.自动扩展间隔分区的分区字段目前只支持时间类型\(date或timestamp\)。 + +3.对于声明式分区的分区来说,分区必须具有和分区表正好相同的列集合,表结构必须严格一致,而在表继承中,子表可以有父表中没有出现过的额外列,同时表继承允许多继承。 + diff --git "a/content/zh/post/duomibabi/openGauss\344\270\216PostgreSQL\345\257\271\346\257\224\346\265\213\350\257\225SSL\344\271\213\350\207\252\347\255\276\345\220\215CA\350\257\201\344\271\246\345\215\225\345\220\221\350\256\244\350\257\201\346\265\213\350\257\225.md" "b/content/zh/post/duomibabi/openGauss\344\270\216PostgreSQL\345\257\271\346\257\224\346\265\213\350\257\225SSL\344\271\213\350\207\252\347\255\276\345\220\215CA\350\257\201\344\271\246\345\215\225\345\220\221\350\256\244\350\257\201\346\265\213\350\257\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..495b964dadb9d586b857b07ca0c973090904b843 --- /dev/null +++ "b/content/zh/post/duomibabi/openGauss\344\270\216PostgreSQL\345\257\271\346\257\224\346\265\213\350\257\225SSL\344\271\213\350\207\252\347\255\276\345\220\215CA\350\257\201\344\271\246\345\215\225\345\220\221\350\256\244\350\257\201\346\265\213\350\257\225.md" @@ -0,0 +1,216 @@ ++++ + +title = "openGauss与PostgreSQL对比测试SSL之自签名CA证书单向认证测试" + +date = "2021-03-31" + +tags = ["openGauss与PostgreSQL对比"] + +archives = "2021-03" + +author = "多米爸比" + +summary = "openGauss与PostgreSQL对比测试SSL之自签名CA证书单向认证测试" + +img = "/zh/post/duomibabi/title/img26.png" + +times = "16:30" + ++++ + +# openGauss与PostgreSQL对比测试SSL之自签名CA证书单向认证测试 + +本文测试自签名CA证书的单向认证: 客户端只验证服务器证书的有效性,而服务器端不验证客户端证书的有效性。服务器加载证书信息并发送给客户端,客户端使用根证书来验证服务器端证书的有效性。 + +## 服务端证书的客户端认证模式 + +客户端SSLMODE设置为verify-ca仅校验数据库证书真伪。 + +客户端SSLMODE设置为verify-full校验数据库证书真伪及通用名CN匹配数据库连接的hostname。 + +## 自签名CA证书单向认证测试 + +**1.创建CA证书** + +CA证书用于给数据库服务器证书签名,同时需要把CA证书发送给数据库客户端,客户端使用CA证书验证数据库服务器证书。 + +``` +$ openssl req -new -x509 -days 365 -nodes \ +-config openssl.cnf \ +-out ca.crt -keyout ca.key -subj "/CN=FooCA" +``` + +**2.生成数据库服务器证书请求文件** + +``` +$ openssl req -new -nodes -text \ +-config openssl.cnf \ +-out server.csr \ +-keyout server.key \ +-subj "/CN=192.168.137.5" +``` + +将证书请求文件\(包含用户信息\)和证书签名分开操作,证书请求文件可重用,因为后面可能需要重新生成签名信息。 + +**3.使用CA证书对证书请求文件签名** + +``` +$ openssl x509 -req -in server.csr -text -days 5 \ +-CA ca.crt \ +-CAkey ca.key \ +-CAcreateserial \ +-out server.crt +``` + +这里设置有效期为5天,可以观察在服务器证书有效期小于7天的时候,连接登录后会在日志中产生告警提醒。 + +**4.传输数据库服务器证书及未加密的私钥文件至数据库服务器** + +修改文件权限以符合安全设置。 + +``` +$ chmod 0600 server.crt server.key +``` + +传输文件到数据库服务器PGDATA目录。 + +``` +$ cp server.crt server.key $PGDATA +``` + +注意:如果PostgreSQL使用-g, --allow-group-access + +开启了组访问权限,则需要拷贝文件到PGDATA目录之外以符合安全设置。 + +**5.数据库SSL参数配置** + +pg\_hba.conf文件配置hostssl条目,认证方法保持md5或者scram不变。 + +``` +hostssl all all 0.0.0.0/0 md5 +``` + +说明:也可以按原来的host连接类型,同时支持非ssl和ssl连接,配置为hostssl只支持hostssl,这里配置为hostssl。 + +postgreql.conf文件配置参数 + +``` +ssl=on +ssl_cert_file= 'server.crt' +ssl_key_file= 'server.key' +``` + +然后重启数据库服务。 + +**6.发送CA证书到数据库客户端** + +本文数据库客户端使用linux下psql,证书文件的默认路径为$HOME/.postgresql/root.crt。 + +``` +cat ca.crt > ~/.postgresql/root.crt +chmod 0600 ~/.postgresql/root.crt +``` + +**测试一** + +数据库客户端未配置证书测试,删除上面第6步的文件。 + +openGauss + +``` +gsql "sslmode=verify-ca" -p6432 -h 192.168.137.5 -Upostgres +gsql: root certificate file "/home/omm/.postgresql/root.crt" does not exist +Either provide the file or change sslmode to disable server certificate verification. +``` + +PostgreSQL + +``` +psql "sslmode=verify-ca" -h192.168.137.11 +psql: error: root certificate file "/home/postgres/.postgresql/root.crt" does not exist +Either provide the file or change sslmode to disable server certificate verification. +``` + +可以看到设置sslmode=verify-ca后,客户端需要验证服务器证书,未配置默认root.crt问题,提示文件不存在,符合预期。 + +**测试二** + +人为修改数据库客户端证书内容。 + +openGauss + +``` +gsql "sslmode=verify-ca" -p6432 -h 192.168.137.5 -Upostgres +gsql: could not read root certificate file "/home/omm/.postgresql/root.crt": too long +gsql "sslmode=verify-ca" -p6432 -h 192.168.137.5 -Upostgres +gsql: could not read root certificate file "/home/omm/.postgresql/root.crt": wrong tag +``` + +PostgreSQL + +``` +psql "sslmode=verify-ca" -h192.168.137.11 +psql: error: could not read root certificate file "/home/postgres/.postgresql/root.crt": +bad base64 decode +psql "sslmode=verify-ca" -p7000 -h192.168.137.11 +psql: error: could not read root certificate file "/home/postgres/.postgresql/root.crt": too long +``` + +可以看到root.crt证书文件内容如果被篡改也是有相应的报错提示,符合预期。 + +**测试三** + +测试验证数据库服务器证书,将正确的证书文件发送至数据库客户端,参考上面第6步配置。 + +openGauss + +``` +gsql "sslmode=verify-ca" -p6432 -h 192.168.137.5 -Upostgres +Password for user postgres: +gsql ((GaussDB Kernel V500R001C20 build ) compiled at 2021-03-09 18:30:51 commit 0 last mr ) +SSL connection (cipher: DHE-RSA-AES128-GCM-SHA256, bits: 128) +Type "help" for help. + +postgres=> +``` + +PostgreSQL + +``` +psql "sslmode=verify-ca" -h192.168.137.11 +Password for user postgres: +psql (12.6) +SSL connection (protocol: TLSv1.2, cipher: ECDHE-RSA-AES256-GCM-SHA384, bits: 256, compression: off) +Type "help" for help. + +postgres=# \q +``` + +使用sslmode=verify-ca仅验证服务器证书真伪,符合预期。 + +**测试四** + +测试数据库服务器证书设置的通用名CN是否匹配客户端连接的hostname。 + +openGauss + +``` +gsql "sslmode=verify-full" -p6432 -h opengauss1 -Upostgres +gsql: server common name "192.168.137.5" does not match host name "opengauss1" +``` + +PostgreSQL + +``` +psql "sslmode=verify-full" -hnode11 +psql: error: server certificate for "192.168.137.11" does not match host name "node11" +``` + +分别使用ip地址及主机名测试,与通用名CN匹配的ip地址可成功登录,使用主机名连接报错,报错提示如上,符合预期。 + +## 总结 + +1.数据库服务器证书的客户端认证需要在客户端配置服务器证书签名的CA证书,服务器设置支持hostssl连接,客户端使用sslmode连接参数。 + +2.sslmode连接参数设置为verify-ca仅校验数据库证书真伪,设置为verify-full校验数据库证书真伪及通用名CN匹配数据库连接的hostname。 + diff --git "a/content/zh/post/duomibabi/openGauss\344\270\216PostgreSQL\345\257\271\346\257\224\346\265\213\350\257\225SSL\344\271\213\350\207\252\347\255\276\345\220\215CA\350\257\201\344\271\246\345\217\214\345\220\221\350\256\244\350\257\201\346\265\213\350\257\225.md" "b/content/zh/post/duomibabi/openGauss\344\270\216PostgreSQL\345\257\271\346\257\224\346\265\213\350\257\225SSL\344\271\213\350\207\252\347\255\276\345\220\215CA\350\257\201\344\271\246\345\217\214\345\220\221\350\256\244\350\257\201\346\265\213\350\257\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..f6d12f1525bc79a9f8ad6eff3e8d443792e3a638 --- /dev/null +++ "b/content/zh/post/duomibabi/openGauss\344\270\216PostgreSQL\345\257\271\346\257\224\346\265\213\350\257\225SSL\344\271\213\350\207\252\347\255\276\345\220\215CA\350\257\201\344\271\246\345\217\214\345\220\221\350\256\244\350\257\201\346\265\213\350\257\225.md" @@ -0,0 +1,367 @@ ++++ + +title = "openGauss与PostgreSQL对比测试SSL之自签名CA证书双向认证测试" + +date = "2021-03-31" + +tags = ["openGauss与PostgreSQL对比"] + +archives = "2021-03" + +author = "多米爸比" + +summary = "openGauss与PostgreSQL对比测试SSL之自签名CA证书双向认证测试" + +img = "/zh/post/duomibabi/title/img27.png" + +times = "16:30" + ++++ + +# openGauss与PostgreSQL对比测试SSL之自签名CA证书双向认证测试 + +本文测试自签名CA证书的双向认证: 客户端验证服务器证书的有效性,同时服务器端也要验证客户端证书的有效性,只有认证成功,连接才能建立。 + +## 服务端证书的客户端认证模式 + +1.客户端SSLMODE设置为verify-ca仅校验数据库证书真伪。 + +2.客户端SSLMODE设置为verify-full校验数据库证书真伪及通用名CN匹配数据库连接的hostname。 + +## 客户端证书的服务器认证模式 + +1.数据库认证文件pg\_hba.conf配置认证选项clientcert=verify-ca仅验证客户端证书真伪,认证方法可选。 + +2.数据库认证文件pg\_hba.conf配置认证选项clientcert=verify-full验证客户端证书真伪及CN匹配数据库连接用户名或映射匹配,认证方法可选。 + +3.数据库认证文件pg\_hba.conf配置认证方法cert,免密验证客户端证书真伪及CN匹配数据库连接用户名或映射匹配 + +## 自签名CA证书双向认证测试 + +**1.创建CA证书** + +用于给数据库服务器证书签名的CA证书:ca\_server.crt + +``` +$ openssl req -new -x509 -days 365 -nodes \ +-config openssl.cnf \ +-out ca_server.crt -keyout ca_server.key -subj "/CN=FooServerCA" +``` + +用于给客户端证书签名的CA证书:ca\_client.crt + +``` +$ openssl req -new -x509 -days 365 -nodes \ +-config openssl.cnf \ +-out ca_client.crt -keyout ca_client.key -subj "/CN=FooClientCA" +``` + +**2.生成数据库服务器证书请求文件并签名** + +``` +$ openssl req -new -nodes -text \ +-config openssl.cnf \ +-out server.csr \ +-keyout server.key \ +-subj "/CN=192.168.137.5" +``` + +将证书请求文件\(包含用户信息\)和证书签名分开操作,证书请求文件可重用,因为后面可能需要重新生成证书。 + +使用ca\_server.crt对证书请求文件签名 + +``` +$ openssl x509 -req -in server.csr -text -days 5 \ +-CA ca_server.crt \ +-CAkey ca_server.key \ +-CAcreateserial \ +-out server.crt +``` + +这里设置有效期为5天,可以观察在服务器证书有效期小于7天的时候,连接登录后会在日志中产生告警提醒。 + +**3.生成客户端证书请求文件并签名** + +``` +$ openssl req -new -nodes -text \ +-config openssl.cnf \ +-out client.csr \ +-keyout client.key \ +-subj "/CN=dbuser1" +``` + +将证书请求文件\(包含用户信息\)和证书签名分开操作,证书请求文件可重用,因为后面可能需要重新生成证书。 + +使用ca\_client.crt对证书请求文件签名 + +``` +$ openssl x509 -req -in client.csr -text -days 30 \ +-CA ca_client.crt \ +-CAkey ca_client.key \ +-CAcreateserial \ +-out client.crt +``` + +**4.传输数据库服务器证书及未加密的私钥文件至数据库服务器** + +修改文件权限以符合安全设置 + +``` +$ chmod 0600 ca_client.crt server.crt server.key +``` + +传输文件到数据库服务器PGDATA目录 + +``` +$ cp ca_client.crt server.crt server.key $PGDATA +``` + +注意:如果PostgreSQL使用-g, --allow-group-access + +开启了组访问权限,则需要拷贝文件到PGDATA目录之外以符合安全设置。 + +**5.数据库SSL参数配置** + +postgreql.conf文件配置参数 + +``` +ssl=on +ssl_cert_file= 'server.crt' +ssl_key_file= 'server.key' +ssl_ca_file='ca_client.crt' +``` + +然后重启数据库服务。 + +**6.配置数据库客户端** + +客户端使用linux下psql,证书文件的默认路径为$HOME/.postgresql/ + +``` +chmod 0600 client.key client.crt +cp client.crt client.key ~/.postgresql/ +cat ca_server.crt > ~/.postgresql/root.crt +chmod 0600 ~/.postgresql/root.crt +``` + +**测试一** + +pg\_hba.conf文件配置hostssl条目。 + +``` +hostssl all all 0.0.0.0/0 md5 +``` + +测试验证数据库服务器证书 + +openGauss数据库 + +``` +gsql "sslmode=verify-ca" -p6432 -h 192.168.137.5 -Upostgres +Password for user postgres: +gsql ((GaussDB Kernel V500R001C20 build ) compiled at 2021-03-09 18:30:51 commit 0 last mr ) +SSL connection (cipher: DHE-RSA-AES128-GCM-SHA256, bits: 128) +Type "help" for help. + +postgres=> +``` + +PostgreSQL数据库 + +``` +psql "sslmode=verify-ca" -h192.168.137.11 +Password for user postgres: +psql (12.6) +SSL connection (protocol: TLSv1.2, cipher: ECDHE-RSA-AES256-GCM-SHA384, bits: 256, compression: off) +Type "help" for help. + +postgres=# \q +``` + +使用sslmode=verify-ca仅验证服务器证书真伪,符合预期。 + +**测试二** + +测试数据库服务器证书设置的通用名CN是否匹配客户端连接的hostname + +openGauss数据库 + +``` +gsql "sslmode=verify-full" -p6432 -h opengauss1 -Upostgres +gsql: server common name "192.168.137.5" does not match host name "opengauss1" +``` + +PostgreSQL数据库 + +``` +psql "sslmode=verify-full" -hnode11 +psql: error: server certificate for "192.168.137.11" does not match host name "node11" +``` + +分别使用ip地址及主机名测试,与通用名CN匹配的ip地址可成功登录,使用主机名连接报错,报错提示如上,符合预期。 + +**测试三** + +测试验证客户端证书 + +pg\_hba.conf文件配置hostssl条目。 + +``` +hostssl all all 0.0.0.0/0 md5 clientcert=verify-ca +``` + +此时数据库连接使用ip地址或者hostname均可连接。 + +openGauss数据库 + +``` +gsql "sslcert=/home/omm/.postgresql/client.crt sslkey=/home/omm/.postgresql/client.key" +-h192.168.137.5 -p6432 -Upostgres +Password for user postgres: +Warning: The client certificate will expire in 29 days. +gsql ((GaussDB Kernel V500R001C20 build ) compiled at 2021-03-09 18:30:51 commit 0 last mr ) +SSL connection (cipher: DHE-RSA-AES128-GCM-SHA256, bits: 128) +Type "help" for help. + +postgres=> +``` + +使用hostname也可连接 + +``` +gsql "sslcert=/home/omm/.postgresql/client.crt sslkey=/home/omm/.postgresql/client.key" +-hopengauss1 -p6432 -Upostgres +``` + +如果使用不正确的客户端证书,比如手工修改client.crt内容,测试会失败。 + +PostgreSQL数据库 + +``` +psql "sslcert=/home/postgres/.postgresql/client.crt sslkey=/home/postgres/.postgresql/client.key" -h192.168.137.11 +Password for user postgres: +psql (12.6) +SSL connection (protocol: TLSv1.2, cipher: ECDHE-RSA-AES256-GCM-SHA384, bits: 256, compression: off) +Type "help" for help. + +postgres=# \q +``` + +使用hostname也可连接。 + +``` +psql "sslcert=/home/postgres/.postgresql/client.crt sslkey=/home/postgres/.postgresql/client.key" -hnode11 +Password for user postgres: +psql (12.6) +SSL connection (protocol: TLSv1.2, cipher: ECDHE-RSA-AES256-GCM-SHA384, bits: 256, compression: off) +Type "help" for help. + +postgres=# \q +``` + +如果使用不正确的客户端证书,比如手工修改client.crt内容,测试会失败。 + +``` +psql "sslcert=/home/postgres/.postgresql/client.crt sslkey=/home/postgres/.postgresql/client.key" -h192.168.137.11 +psql: error: SSL error: tlsv1 alert unknown ca +FATAL: no pg_hba.conf entry for host "192.168.137.11", user "postgres", database "postgres", SSL off +``` + +分别使用ip地址及主机名测试clientcert=verify-ca选项,测试结果符合预期。 + +**测试四** + +测试验证客户端证书及CN匹配用户或用户映射。 + +pg\_hba.conf文件配置hostssl条目。 + +``` +hostssl all all 0.0.0.0/0 md5 clientcert=verify-full +``` + +此时数据库连接用户必须配置CN中配置的名称dbuser1 + +openGauss数据库 + +``` +gsql "dbname=postgres sslcert=/home/omm/.postgresql/client.crt sslkey=/home/omm/.postgresql/client.key" -h192.168.137.5 -p6432 -Udbuser1 +``` + +上面使用dbuser1可以登录成功,如果使用其他用户也能登录成功。 + +PostgreSQL数据库 + +``` +psql "dbname=postgres sslcert=/home/postgres/.postgresql/client.crt sslkey=/home/postgres/.postgresql/client.key" -h192.168.137.11 -p6000 -Udbuser1 +``` + +上面使用dbuser1可以登录成功,如果使用其他用户比如postgres则会出现下面的错误提示。 + +``` +psql: error: FATAL: password authentication failed for user "postgres" +FATAL: no pg_hba.conf entry for host "192.168.137.11", user "postgres", database "postgres", SSL off +``` + +**测试五** + +测试cert免密认证:验证客户端证书及CN匹配用户或用户映射 + +pg\_hba.conf文件配置hostssl条目。 + +``` +hostssl all all 0.0.0.0/0 cert +``` + +此时数据库连接用户必须配置CN中配置的名称dbuser1,同时不需要输入密码。 + +openGauss数据库 + +``` +gsql "dbname=postgres sslcert=/home/omm/.postgresql/client.crt sslkey=/home/omm/.postgresql/client.key" -h192.168.137.5 -p6432 -Udbuser1 +Warning: The client certificate will expire in 29 days. +gsql ((GaussDB Kernel V500R001C20 build ) compiled at 2021-03-09 18:30:51 commit 0 last mr ) +SSL connection (cipher: DHE-RSA-AES128-GCM-SHA256, bits: 128) +Type "help" for help. + +postgres=> +``` + +上面使用dbuser1用户可直接登录,不需要输入密码,如果使用其他用户比如postgres则会出现下面的错误提示。 + +``` +Warning: The client certificate will expire in 29 days. +gsql: FATAL: certificate authentication failed for user "postgres" +FATAL: no pg_hba.conf entry for host "192.168.137.5", user "postgres", database "postgres", SSL off +``` + +PostgreSQL数据库 + +``` +psql "dbname=postgres sslcert=/home/postgres/.postgresql/client.crt sslkey=/home/postgres/.postgresql/client.key" +-h192.168.137.11 -Udbuser1 +psql (12.6) +SSL connection (protocol: TLSv1.2, cipher: ECDHE-RSA-AES256-GCM-SHA384, bits: 256, compression: off) +Type "help" for help. + +postgres=> \q +``` + +上面使用dbuser1用户可直接登录,不需要输入密码,如果使用其他用户比如postgres则会出现下面的错误提示。 + +``` +psql: error: FATAL: certificate authentication failed for user "postgres" +FATAL: no pg_hba.conf entry for host "192.168.137.11", user "postgres", database "postgres", SSL off +``` + +## 总结 + +1.sslmode连接参数设置为verify-ca仅校验数据库证书真伪,设置为verify-full校验数据库证书真伪及通用名CN匹配数据库连接的hostname。 + +2.clientcert认证选项设置为verify-ca仅校验客户端证书真伪,设置为verify-full校验客户端证书真伪及通用名CN匹配数据库用户或用户映射。 + +3.使用clientcert认证选项时,连接类型可以设置为hostssl,但host类型也同时支持hostssl及hostnossl。 + +4.使用cert认证方法只能设置连接类型为hostssl。 + +5.客户端证书clientcert=verify-full认证方式,openGauss与PostgreSQL有差异,参见测试四。 + diff --git "a/content/zh/post/duomibabi/openGauss\344\270\216PostgreSQL\345\257\271\346\257\224\346\265\213\350\257\225SSL\344\271\213\350\207\252\347\255\276\345\220\215\347\247\201\346\234\211\350\257\201\344\271\246\346\265\213\350\257\225.md" "b/content/zh/post/duomibabi/openGauss\344\270\216PostgreSQL\345\257\271\346\257\224\346\265\213\350\257\225SSL\344\271\213\350\207\252\347\255\276\345\220\215\347\247\201\346\234\211\350\257\201\344\271\246\346\265\213\350\257\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..d9fde25a41b6b70dee5f00eca3d15c9939a824ff --- /dev/null +++ "b/content/zh/post/duomibabi/openGauss\344\270\216PostgreSQL\345\257\271\346\257\224\346\265\213\350\257\225SSL\344\271\213\350\207\252\347\255\276\345\220\215\347\247\201\346\234\211\350\257\201\344\271\246\346\265\213\350\257\225.md" @@ -0,0 +1,162 @@ ++++ + +title = "openGauss与PostgreSQL对比测试SSL之自签名私有证书测试" + +date = "2021-03-29" + +tags = ["openGauss与PostgreSQL对比"] + +archives = "2021-03" + +author = "多米爸比" + +summary = "openGauss与PostgreSQL对比测试SSL之自签名私有证书测试" + +img = "/zh/post/duomibabi/title/img31.png" + +times = "16:30" + ++++ + +# openGauss与PostgreSQL对比测试SSL之自签名私有证书测试 + +## SSL传输加密简介 + +SSL认证通过使用SSL证书确保客户端检查服务端证书或者服务器检查客户端证书,SSL认证除了加密数据,也可以识别目标端的真伪,防止网络中间人的伪装攻击。 + +## 单向认证与双向认证 + +**单向认证** + +单向认证一般是指客户端只验证服务器证书的有效性,而服务端不验证客户端证书的有效性。服务器加载服务端证书信息并发送给客户端,客户端使用根证书来验证服务器端证书的有效性。 + +**双向认证** + +双向认证是指客户端验证服务器证书的有效性,同时服务器端也要验证客户端证书的有效性,只有两端都认证成功,连接才能建立。 + +客户端验证服务器证书有如下两种模式: + +1. 客户端连接参数SSLMODE设置为verify-ca仅校验数据库证书真伪。 +2. 客户端连接参数SSLMODE设置为verify-full校验数据库证书真伪及。 + +通用名CN匹配数据库连接的hostname + +服务端验证客户端证书有如下三种模式: + +1. 数据库认证文件pg\_hba.conf配置认证选项clientcert=verify-ca仅验证客户端证书真伪,认证方法可选。 +2. 数据库认证文件pg\_hba.conf配置认证选项clientcert=verify-full验证客户端证书真伪及CN匹配数据库连接用户名或映射匹配,认证方法可选。 +3. 数据库认证文件pg\_hba.conf配置认证方法cert,免密验证客户端证书真伪及CN匹配数据库连接用户名或映射匹配。 + +cert认证实际是基于clientcert=verify-full认证选项的trust方法认证。 + +## SSL编译支持 + +PostgreSQL编译需打开如下选项,同时需要安装openssl。 + +``` +--with-openssl +--with-includes=/usr/include/openssl +``` + +openGauss源码编译时不需要打开上面两个选项,否则编译会报错。 + +客户端psql或者gsql需要检查libpq是否有ssl动态库的调用。 + +``` +$ ldd /opt/pgsql/lib/libpq.so |grep libssl + libssl.so.10 => /usr/lib64/libssl.so.10 (0x00007f7f29cc4000) +$ ldd /opt/og/lib/libpq.so |grep libssl + libssl.so.1.1 => /opt/og/lib/libssl.so.1.1 (0x00007f9d39dd2000) +``` + +## 自签名私有证书测试 + +测试环境下可以使用自签名私有证书,只用于测试传输加密,不验证身份,也可使用自签名CA证书来进行加密及身份验证。实际生产环境需要使用权威的CA认证中心签发的数字证书。 + +本文先测试最简单的自签名私有证书。 + +**生成私钥** + +``` +$ openssl genrsa -out server.key 2048 +``` + +**生成证书请求文件** + +``` +$ openssl req -new \ +-config openssl.cnf \ +-key server.key \ +-subj "/CN=foo" \ +-out server.csr +``` + +注意openGauss需要使用-config指定openssl.cnf文件,centos7下默认路径为/etc/pki/tls/openssl.cnf,PostgreSQL不需要使用-config选项。 + +**证书自签名** + +``` +$ openssl x509 -req -in server.csr -days 365 \ +-extfile openssl.cnf \ +-extensions v3_ca \ +-signkey server.key \ +-out server.crt +``` + +PostgreSQL不需要使用-config选项。 + +**传输证书及密钥文件至服务器名** + +``` +$ chmod 0600 server.crt server.key +$ cp server.crt server.key $PGDATA +``` + +PGDATA为实际openGauss或PostgreSQL数据目录。 + +**数据库配置** + +pg\_hba.conf文件配置hostssl条目,认证方法可选。 + +``` +hostssl all all 0.0.0.0/0 md5 +``` + +postgreql.conf文件配置如下参数。 + +``` +ssl=on +ssl_cert_file= 'server.crt' +ssl_key_file= 'server.key' +``` + +重启数据库服务,然后进行测试。 + +先测试openGauss,可以看到建立了SSL连接。 + +``` +$ gsql -h 192.168.137.5 -p6432 -Upostgres postgres +Password for user postgres: +gsql ((GaussDB Kernel V500R001C20 build ) compiled at 2021-03-09 18:30:51 commit 0 last mr ) +SSL connection (cipher: DHE-RSA-AES128-GCM-SHA256, bits: 128) +Type "help" for help. +postgres=> +``` + +再测试PostgreSQL。 + +``` +$ psql -h192.168.137.11 +Password for user postgres: +psql (12.6) +SSL connection (protocol: TLSv1.2, cipher: ECDHE-RSA-AES256-GCM-SHA384, bits: 256, compression: off) +Type "help" for help. +postgres=# +``` + +## 总结 + +1.PostgreSQL需要编译支持openssl而openGauss已经内置支持。 + +2.openGauss不会识别操作系统层的openssl默认配置文件,需要拷贝指定参数,否则会报错找不到配置文件。 + diff --git "a/content/zh/post/enmo/9\344\270\252MogDB\345\255\230\345\202\250\350\277\207\347\250\213\347\244\272\344\276\213.md" "b/content/zh/post/enmo/9\344\270\252MogDB\345\255\230\345\202\250\350\277\207\347\250\213\347\244\272\344\276\213.md" new file mode 100644 index 0000000000000000000000000000000000000000..8c64a07a35585b4f548f5770faaba277f3967fe9 --- /dev/null +++ "b/content/zh/post/enmo/9\344\270\252MogDB\345\255\230\345\202\250\350\277\207\347\250\213\347\244\272\344\276\213.md" @@ -0,0 +1,376 @@ ++++ + +title = "9个MogDB存储过程示例" + +date = "2022-05-12" + +tags = ["9个MogDB存储过程示例"] + +archives = "2022-05" + +author = "云和恩墨交付团队" + +summary = "9个MogDB存储过程示例" + +img = "/zh/post/enmo/title/img6.png" + +times = "10:20" ++++ + +# 9个MogDB存储过程示例 + +本文出处:[https://www.modb.pro/db/400634](https://www.modb.pro/db/400634) + +存储过程是一组结构化的查询和语句,例如控制语句和声明。这里介绍9个在不同情况下很有用的存储过程示例。 + +创建测试表: + +``` +create table public.test1(id int,name varchar(10)); +``` + +## 1. 使用存储过程插入数据 + +``` +CREATE OR REPLACE PROCEDURE genre_insert_data(GenreId integer, Name character varying) +AS +begin + INSERT INTO public.test1 VALUES (GenreId, Name); +end; +``` + +测试: + +``` +openGauss=# CREATE OR REPLACE PROCEDURE genre_insert_data(GenreId integer, Name character varying) +openGauss-# AS +openGauss$# begin +openGauss$# INSERT INTO public.test1 VALUES (GenreId, Name); +openGauss$# end; +openGauss$# / +CREATE PROCEDURE +openGauss=# call genre_insert_data(1,'aaa'); + genre_insert_data +------------------- + +(1 row) + +openGauss=# select * from test1; + id | name +----+------ + 1 | aaa +(1 row) +``` + +## 2. 在屏幕上显示消息 + +``` + CREATE OR REPLACE PROCEDURE display_message (INOUT msg TEXT) AS BEGIN RAISE NOTICE 'Procedure Parameter: %', msg ; END ; +``` + +测试: + +``` +openGauss=# CREATE OR REPLACE PROCEDURE genre_insert_data(GenreId integer, Name character varying) +openGauss-# AS +openGauss$# begin +openGauss$# INSERT INTO public.test1 VALUES (GenreId, Name); +openGauss$# end; +openGauss$# / +CREATE PROCEDURE +openGauss=# call genre_insert_data(1,'aaa'); + genre_insert_data +------------------- + +(1 row) + +openGauss=# select * from test1; + id | name +----+------ + 1 | aaa +(1 row) +``` + +## 3.使用事务控制 + +``` + CREATE OR REPLACE PROCEDURE control_transaction() + AS + DECLARE + BEGIN + CREATE TABLE test2 (id int); + INSERT INTO test2 VALUES (1); + COMMIT; + CREATE TABLE test3 (id int); + INSERT INTO test2 VALUES (1); + ROLLBACK; + END; +``` + +测试: + +``` +openGauss=# CREATE OR REPLACE PROCEDURE control_transaction() +openGauss-# AS +openGauss$# DECLARE +openGauss$# BEGIN +openGauss$# CREATE TABLE test2 (id int); +openGauss$# INSERT INTO test2 VALUES (1); +openGauss$# COMMIT; +openGauss$# CREATE TABLE test3 (id int); +openGauss$# INSERT INTO test2 VALUES (1); +openGauss$# ROLLBACK; +openGauss$# END; +openGauss$# / +CREATE PROCEDURE +openGauss=# select * from test2; +ERROR: relation "test2" does not exist on dn_6001 +LINE 1: select * from test2; + ^ +openGauss=# call control_transaction(); + control_transaction +--------------------- + +(1 row) +openGauss=# select * from test2; + id +---- + 1 +(1 row) +openGauss=# select * from test3; +ERROR: relation "test3" does not exist on dn_6001 +LINE 1: select * from test3; + ^ +``` + +在这里我们可以看到提交之前的数据是可用的,但是没有提交和回滚的数据会从数据库中删除。 + +## 4.使用列数据类型 + +``` + CREATE OR REPLACE PROCEDURE genre_id_max() AS + DECLARE + Genreid test1.Id%type; + BEGIN + select max(Id) into Genreid from public.test1; + RAISE NOTICE 'Maximum of GenreId is : %', Genreid ; + END; +``` + +测试: + +``` +openGauss=# CREATE OR REPLACE PROCEDURE genre_id_max() AS +openGauss$# DECLARE +openGauss$# Genreid test1.Id%type; +openGauss$# BEGIN +openGauss$# select max(Id) into Genreid from public.test1; +openGauss$# RAISE NOTICE 'Maximum of GenreId is : %', Genreid ; +openGauss$# END; +openGauss$# / +CREATE PROCEDURE +openGauss=# call genre_id_max(); +NOTICE: Maximum of GenreId is : 1 + genre_id_max +-------------- + +(1 row) +``` + +## 5. 发出NOTICE、WARING和 INFO 消息 + +``` + CREATE OR REPLACE PROCEDURE raise_warning() AS + DECLARE + warn INT := 10; + BEGIN + RAISE NOTICE 'value of warn : % at %: ', warn, now(); + warn := warn + 10; + RAISE WARNING 'value of warn : % at %: ', warn, now(); + warn := warn + 10; + RAISE INFO 'value of warn : % at %: ', warn, now(); + END; +``` + +测试: + +``` +openGauss=# CREATE OR REPLACE PROCEDURE raise_warning() AS +openGauss$# DECLARE +openGauss$# warn INT := 10; +openGauss$# BEGIN +openGauss$# RAISE NOTICE 'value of warn : % at %: ', warn, now(); +openGauss$# warn := warn + 10; +openGauss$# RAISE WARNING 'value of warn : % at %: ', warn, now(); +openGauss$# warn := warn + 10; +openGauss$# RAISE INFO 'value of warn : % at %: ', warn, now(); +openGauss$# END; +openGauss$# / +CREATE PROCEDURE +openGauss=# call raise_warning(); +NOTICE: value of warn : 10 at 2022-05-07 14:35:24.810364+08: +WARNING: value of warn : 20 at 2022-05-07 14:35:24.810364+08: +INFO: value of warn : 30 at 2022-05-07 14:35:24.810364+08: + raise_warning +--------------- + +(1 row) +``` + +## 6. 引发异常 + +``` + CREATE OR REPLACE PROCEDURE genre_id_exception() AS + DECLARE + Genreid test1.Id%type ; + BEGIN + select max(Id) into Genreid from public.test1; + RAISE EXCEPTION 'Maximum of GenreId is : %', Genreid USING HINT = 'Test For Raising exception.'; + END; +``` + +测试: + +``` +openGauss=# CREATE OR REPLACE PROCEDURE genre_id_exception() AS +openGauss$# DECLARE +openGauss$# Genreid test1.Id%type ; +openGauss$# BEGIN +openGauss$# select max(Id) into Genreid from public.test1; +openGauss$# RAISE EXCEPTION 'Maximum of GenreId is : %', Genreid USING HINT = 'Test For Raising exception.'; +openGauss$# END; +openGauss$# / +CREATE PROCEDURE +openGauss=# call genre_id_exception(); +ERROR: Maximum of GenreId is : 1 +``` + +## 7. 使用 FOR 循环遍历表中的数据 + +``` + CREATE OR REPLACE PROCEDURE genre_traverse() AS + DECLARE + genre_rec record; + BEGIN + for genre_rec in (select Id,Name from public.test1 order by 1) + loop + RAISE NOTICE 'Id is : % , Name is : %', genre_rec.Id,genre_rec.Name; + end loop; + END; +``` + +测试: + +``` +openGauss=# CREATE OR REPLACE PROCEDURE genre_traverse() AS +openGauss$# DECLARE +openGauss$# genre_rec record; +openGauss$# BEGIN +openGauss$# for genre_rec in (select Id,Name from public.test1 order by 1) +openGauss$# loop +openGauss$# RAISE NOTICE 'Id is : % , Name is : %', genre_rec.Id,genre_rec.Name; +openGauss$# end loop; +openGauss$# END; +openGauss$# / +CREATE PROCEDURE +openGauss=# call genre_traverse(); +NOTICE: Id is : 1 , Name is : aaa +NOTICE: Id is : 1 , Name is : + genre_traverse +---------------- + +(1 row) +``` + +## 8. 使用 SECURITY INVOKER + +SECURITY INVOKER 指示该过程将以调用它的用户的权限执行。这是默认设置。 + +``` +CREATE OR REPLACE PROCEDURE genre_traverse() SECURITY INVOKER + AS + DECLARE + genre_rec record; + BEGIN + for genre_rec in (select Id,Name from public.test1 order by 1) + loop + RAISE NOTICE 'Genre Id is : % , Name is : %', genre_rec.Id,genre_rec.Name; + end loop; + END; +``` + +测试: + +``` +openGauss=# CREATE OR REPLACE PROCEDURE genre_traverse() SECURITY INVOKER +openGauss-# AS +openGauss$# DECLARE +openGauss$# genre_rec record; +openGauss$# BEGIN +openGauss$# for genre_rec in (select Id,Name from public.test1 order by 1) +openGauss$# loop +openGauss$# RAISE NOTICE 'Genre Id is : % , Name is : %', genre_rec.Id,genre_rec.Name; +openGauss$# end loop; +openGauss$# END; +openGauss$# / +CREATE PROCEDURE +openGauss=# \c - test +Password for user test: +Non-SSL connection (SSL connection is recommended when requiring high-security) +You are now connected to database "postgres" as user "test". +openGauss=> call genre_traverse(); +ERROR: permission denied for relation test1 +DETAIL: N/A +CONTEXT: PL/pgSQL function genre_traverse() line 4 at FOR over SELECT rows +``` + +## 9. 使用SECURITY DEFINER + +SECURITY DEFINER 指定该过程将以拥有它的用户的权限执行。SECURITY DEFINER 过程不能执行事务控制语句(例如,COMMIT 和 ROLLBACK,取决于语言)。 + +在此示例中,我们使用用户“postgres”创建了一个存储过程,并使用无权访问该表的“test”用户调用它。 + +``` + CREATE OR REPLACE PROCEDURE genre_traverse() SECURITY DEFINER + AS + DECLARE + genre_rec record; + BEGIN + for genre_rec in (select Id,Name from public.test1 order by 1) + loop + RAISE NOTICE 'Genre Id is : % , Name is : %', genre_rec.Id,genre_rec.Name; + end loop; + END; +``` + +测试: + +``` +openGauss=# CREATE OR REPLACE PROCEDURE genre_traverse() SECURITY DEFINER +openGauss-# AS +openGauss$# DECLARE +openGauss$# genre_rec record; +openGauss$# BEGIN +openGauss$# for genre_rec in (select Id,Name from public.test1 order by 1) +openGauss$# loop +openGauss$# RAISE NOTICE 'Genre Id is : % , Name is : %', genre_rec.Id,genre_rec.Name; +openGauss$# end loop; +openGauss$# END; +openGauss$# / +CREATE PROCEDURE +openGauss=# \c - test +Password for user test: +FATAL: Invalid username/password,login denied. +Previous connection kept +openGauss=# \c - test +Password for user test: +Non-SSL connection (SSL connection is recommended when requiring high-security) +You are now connected to database "postgres" as user "test". +openGauss=> call genre_traverse(); +NOTICE: Genre Id is : 1 , Name is : aaa +NOTICE: Genre Id is : 1 , Name is : + genre_traverse +---------------- + +(1 row) +``` diff --git "a/content/zh/post/enmo/DBEAVER\350\277\236\346\216\245MogDB.md" "b/content/zh/post/enmo/DBEAVER\350\277\236\346\216\245MogDB.md" new file mode 100644 index 0000000000000000000000000000000000000000..40cd2a075d490c130e19c338fef4b4abd3bf031b --- /dev/null +++ "b/content/zh/post/enmo/DBEAVER\350\277\236\346\216\245MogDB.md" @@ -0,0 +1,46 @@ ++++ + +title = "DBEAVER连接MogDB" + +date = "2022-05-05" + +tags = ["DBEAVER连接MogDB"] + +archives = "2022-05" + +author = "云和恩墨" + +summary = "DBEAVER连接MogDB" + +img = "/zh/post/enmo/title/img6.png" + +times = "10:20" ++++ + +# DBEAVER连接MogDB + +本文出处:[https://www.modb.pro/db/77704](https://www.modb.pro/db/77704) + +驱动下载地址 +[https://opengauss.org/zh/download.html](https://opengauss.org/zh/download.html) +![image.png](../images/20210629-f0069048-4ae1-4de4-9891-bc626fbfc170.png) +DBEAVER下载地址 +[https://dbeaver.io/download/](https://dbeaver.io/download/) +![image.png](../images/20210629-e42f737e-188e-407f-8b72-fe1315f8d044.png) + +配置连接 +![image.png](../images/20210629-377faf98-48c6-45e2-a255-1885be07b69f.png) +![image.png](../images/20210629-ba33f624-8d97-4dfc-b0bc-5fdcb589e4bc.png) +将驱动解压到指定的目录,然后在库里添加文件夹 +![image.png](../images/20210629-d00d94bf-ee48-44f2-b3ae-57ab3220f9b2.png) +单击找到类,会自动找到类名 +![image.png](../images/20210629-1d1c8bb8-e200-42a0-a0bb-6910a99bf953.png) +回到设置,注意类名就是上图的驱动类的名称,URL模板安装PG填写即可 +![image.png](../images/20210629-2c2280b0-1281-4b6e-8f2f-7127449786eb.png) +新建连接 +![image.png](../images/20210629-2a61c1f1-65da-4afa-abe9-70b493568e02.png) +搜索mogdb +![image.png](../images/20210629-d2a7ddda-b79e-4702-a44c-7b11d54d9570.png) +填写连接信息即可 +![image.png](../images/20210629-9e45d800-570d-49fb-89d8-8ba12492a0de.png) +![image.png](../images/20210629-b61ad3f2-4e49-407a-a35f-b19286dcbcba.png) diff --git "a/content/zh/post/enmo/Jmeter\345\216\213\346\265\213Mogdb\344\275\277\347\224\250\346\214\207\345\215\227.md" "b/content/zh/post/enmo/Jmeter\345\216\213\346\265\213Mogdb\344\275\277\347\224\250\346\214\207\345\215\227.md" new file mode 100644 index 0000000000000000000000000000000000000000..6df2a36459fec4976ee7cfe3bcdf7469a4ead2d3 --- /dev/null +++ "b/content/zh/post/enmo/Jmeter\345\216\213\346\265\213Mogdb\344\275\277\347\224\250\346\214\207\345\215\227.md" @@ -0,0 +1,169 @@ ++++ + +title = "Jmeter压测Mogdb使用指南" + +date = "2022-05-18" + +tags = ["Jmeter压测Mogdb使用指南"] + +archives = "2022-05" + +author = "云和恩墨" + +summary = "Jmeter压测Mogdb使用指南" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# Jmeter压测Mogdb使用指南 + +本文出处:[https://www.modb.pro/db/388141](https://www.modb.pro/db/388141) + +## 1.Jmeter介绍 + +``` +Apache JMeter是Apache组织开发的基于Java的压力测试工具。用于对软件做压力测试,它最初被设计用于Web应用测试,但后来扩展到其他测试领 域。JMeter 可以用于对服务器、网络或对象模拟巨大的负载,来自不同压力类别下测试它们的强度和分析整体性能。另外,JMeter能够对应用程序做 功能/回归测试,通过创建带有断言的脚本来验证你的程序返回了你期望的结果。 +``` + +## 2.安装Jmeter + +### (1)配置java环境 + +``` +根据自己的系统下载对应的jdk,下载地址https://www.oracle.com/java/technologies/downloads/#java8,这里不做赘述 +mac@xiaofandemac bin % java -version +openjdk version "17.0.1" 2021-10-19 +OpenJDK Runtime Environment (build 17.0.1+12-39) +OpenJDK 64-Bit Server VM (build 17.0.1+12-39, mixed mode, sharing) +``` + +### (2)下载jmeter + +``` +下载地址:https://jmeter.apache.org/download_jmeter.cgi,下载二进制版本即可,可以直接使用不用编译 +``` + +![image20220330145857684.png](../images/20220331-23563656-7787-4400-8913-e05f579d5469.png) + +### (3)解压jmeter并使用 + +``` +zhangfan@xiaofandemac Downloads % cd apache-jmeter-5.4.3 +zhangfan@xiaofandemac apache-jmeter-5.4.3 % ls +LICENSE README.md bin extras licenses +NOTICE backups docs lib printable_docs +zhangfan@xiaofandemac apache-jmeter-5.4.3 % cd bin +zhangfan@xiaofandemac bin % ./jmeter -v +WARNING: package sun.awt.X11 not in java.desktop + _ ____ _ ____ _ _ _____ _ __ __ _____ _____ _____ ____ + / \ | _ \ / \ / ___| | | | ____| | | \/ | ____|_ _| ____| _ \ + / _ \ | |_) / _ \| | | |_| | _| _ | | |\/| | _| | | | _| | |_) | + / ___ \| __/ ___ \ |___| _ | |___ | |_| | | | | |___ | | | |___| _ < +/_/ \_\_| /_/ \_\____|_| |_|_____| \___/|_| |_|_____| |_| |_____|_| \_\ 5.4.3 + +Copyright (c) 1999-2021 The Apache Software Foundation +注意:这里没有配置jmeter的环境变量,使用时需要绝对路径执行.也可配置环境变量,看个人喜好,这里也有配置方法 +export JMETER_HOME=/Users/zhangfan/Downloads/apache-jmeter-5.4.3(jmter所在路径) +export PATH=${PATH}:${JMETER_HOME}/bin +``` + +## 3.图形化使用jmeter + +### (1)调出图形化 + +``` +执行./jmeter.sh,即可调出图形化界面 zhangfan@xiaofandemac bin % ./jmeter.sh +``` + +### (2)使用jmeter对Mogdb进行压测 + +测试场景如下:用Jmeter压测Mogdb,100并发,持续5分钟,对一条sql进行压测,看数据库的性能情况 + +#### 1.创建执行计划 + +![image20220330152908832.png](../images/20220331-98ff026f-be85-4123-8a9a-6f9a4034010b.png) + +#### 2.创建线程组 + +![image20220330153325297.png](../images/20220331-0288c587-ab25-49e4-8f50-d5c7a1993ab2.png) + +![image20220330152546058.png](../images/20220331-998406c3-61ab-436f-9687-383df5f1f6cf.png) + +``` +setUp线程组和tearDown线程组,它们与普通线程组区别在于,setUp线程在普通线程执行前自动触发执行;而tearDown线程组在主线程结束后执行。 setUP线程组在测试任务ThreadGroup 运行前先被运行。通常用在运行测试任务前,做初始化工作。例如建立数据库连接初始分化工作 tearDown线程组在测试任务线程组运行结束后被运行。通常用来做清理测试脏数据、登出、关闭资源等工作。例如关闭数据库连接。 +``` + +#### 3.编辑JDBC连接信息 + +![image20220330153441728.png](../images/20220331-d5b6a331-96f3-4230-8764-96362c0445c4.png) + +根据具体信息填写,数据的连接信息 + +![image20220330152736465.png](../images/20220331-7bfba1cd-8252-4498-a4e3-cfc32bd04b29.png) + +#### 4.创建JDBC request + +``` +这个sampler可以向数据库发送一个jdbc请求(sql语句),它经常需要和JDBC Connection Configuration 配置元件一起配合使用。 +``` + +![image20220330151943119.png](../images/20220331-a7515c05-6c23-4454-9f15-75589a95006b.png) + +#### 5.创建汇总报告 + +![image20220330154037344.png](../images/20220331-e0ce0a32-6f6d-4e7c-bbe0-f5c7a82c096e.png) + +汇总报告可以对测试结果有总体的概览 + +![image20220330154131596.png](../images/20220331-94e3e60e-7b75-4555-9bcf-5354270d8af4.png) + +#### 6.创建察看结果树 + +![image20220330154613244.png](../images/20220331-7df540f1-ed3e-4233-8a53-f7b8cffc8152.png) + +## 4.命令行使用jmeter + +将图形化创建的执行计划保存,将jmx文件,放入要压测服务器上,压测服务上已经安装好了jmeter + +![image20220330154854160.png](../images/20220331-99b1aab5-504d-4993-883e-0c82164d18ba.png) + +#### (1)Jmeter命令行参数介绍 + +命令:jmeter -n -t testplan filename -l listener filename + +``` + -n:说明jmeter非GUI运行 + -t:运行的测试计划名称,xxx.jmx路径+文件 + -l:JTL文件去保存结果,路径+xxx.jtl + -r: 使用远程执行 + -j:保存执行log + -H:代理机主机名或者ip地址 + -P:代理机端口 + -e:设置测试完成后生成报表 +``` + +#### (2)命令行执行 + +![image20220331102415824.png](../images/20220331-cc3d3109-87fb-4563-bd9d-e09f700f6ce2.png) + +#### (3)结果解析 + +``` +summary + 311412 in 00:00:20 = 15321.6/s Avg: 6 Min: 0 Max: 9920 Err: 0 (0.00%) Active: 100 Started: 100Finished: 0 +summary +是这5分钟数据,summary =是累计到当前时刻所有的数据 +311412是发出的请求数,00:00:20是发出的时间,15321是每秒发出的请求,即吞吐量,Avg,Min,Max平均响应时间,最小响应时间,最大响应时间,单位都是ms,Err后面跟的数据是错误数和错误比例 +``` + +#### (2)报表概览 + +``` +测试结果后在指定文件夹中生成报告文件,将打包,下载到自己的电脑,用浏览器打开即可 +[root@oracle zhuyp]# cd db1/ +[root@oracle db1]# ls +content index.html sbadmin2-1.0.7 statistics.json +[root@oracle db1]# +``` + +![image20220331104524196.png](../images/20220331-e07e51f9-87b0-4372-aa5c-c7f63c2dc68d.png) diff --git "a/content/zh/post/enmo/MogDB - \345\244\226\351\224\256\347\272\246\346\235\237\347\256\241\347\220\206\345\210\240\351\231\244\345\244\226\351\224\256\345\205\263\350\201\224\347\232\204\344\270\273\350\241\250\346\225\260\346\215\256\346\255\245\351\252\244.md" "b/content/zh/post/enmo/MogDB - \345\244\226\351\224\256\347\272\246\346\235\237\347\256\241\347\220\206\345\210\240\351\231\244\345\244\226\351\224\256\345\205\263\350\201\224\347\232\204\344\270\273\350\241\250\346\225\260\346\215\256\346\255\245\351\252\244.md" new file mode 100644 index 0000000000000000000000000000000000000000..d895fef7644f137d207bf766291487e835f0215f --- /dev/null +++ "b/content/zh/post/enmo/MogDB - \345\244\226\351\224\256\347\272\246\346\235\237\347\256\241\347\220\206\345\210\240\351\231\244\345\244\226\351\224\256\345\205\263\350\201\224\347\232\204\344\270\273\350\241\250\346\225\260\346\215\256\346\255\245\351\252\244.md" @@ -0,0 +1,143 @@ ++++ + +title = "MogDB - 外键约束管理/删除外键关联的主表数据步骤" + +date = "2022-04-18" + +tags = ["MogDB - 外键约束管理/删除外键关联的主表数据步骤"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "MogDB - 外键约束管理/删除外键关联的主表数据步骤" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# MogDB - 外键约束管理/删除外键关联的主表数据步骤 + +本文出处:https://www.modb.pro/db/391471 + +## 记录MogDB数据库的外键常用操作: + +### 版本 + +本文适用于MogDB V2.0.1及以上环境 + +### 创建外键 + +```sql +--创建主表 +create table t_primary (std_id int primary key,name varchar); + +--创建外表 +create table t_refer1 (std_id int,class_no varchar); +create table t_refer2 (name varchar,in_date timestamp default sysdate,std_id int); + +--创建外键约束 +alter table t_refer1 add constraint fk_refer1 foreign key(std_id) references t_primary(std_id); + +alter table t_refer2 add constraint fk_refer2 foreign key(std_id) references t_primary(std_id); + +--主表插入数据(std_id=1 .. 10) +insert into t_primary select generate_series(1,10),'test'; + +--外键测试正确数据 +insert into t_refer1 values(1,'class 3'); +insert into t_refer2 values('test1',sysdate,1); + +--外键测试错误数据 +insert into t_refer1 values(11,'class 3'); +insert into t_refer2 values('test1',sysdate,11); +``` + +![图片.png](../images/20220408-11f29897-a4b6-4f2e-a8c2-f679957fe730.png) + +### 失能外键 + +```sql +--disable外键 +--只能注释掉表所有的触发器 +alter table t_refer1 disable trigger all; +alter table t_refer2 disable trigger all; + +--测试:再次插入错误数据 +insert into t_refer1 values(11,'class 3'); +insert into t_refer2 values('test1',sysdate,11); +``` + +![图片.png](../images/20220408-0e8866ab-114c-42ab-9bb6-234f8a40acd3.png) + +### 使能外键 + +```sql +--enable外键 +alter table t_refer1 enable trigger all; +alter table t_refer2 enable trigger all; + +--测试:再次插入错误数据 +insert into t_refer1 values(12,'class 3'); +insert into t_refer2 values('test1',sysdate,12); +``` + +![图片.png](../images/20220408-855d3678-23bd-4f69-bdaf-71f840096cd3.png) + +### 查询外键 + +```sql +--根据主表表名查询所有关联的外键约束 +select conname as constrain_name, + contype as constrain_type, + confrelid ::regclass as primary_tab_name, + conrelid ::regclass as foreign_tab_name, + convalidated as constraint_status, + conkey as constraint_col_position, + confkey as primary_tab_col_position + from pg_constraint + where contype = 'f' + and confrelid = 't_primary' ::regclass; + +--查询表的触发器 +select t.tgrelid::regclass as table_name, + t.tgname as trigger_name, + t.tgenabled as trigger_status, + t.tgconstrrelid::regclass as priamry_tab_name, + t.tgconstrindid::regclass as primary_index_name, + c.conname as constraint_name + from pg_trigger t,pg_constraint c + where t.tgconstraint = c.oid + and t.tgrelid = 't_refer2'::regclass + and c.contype = 'f'; + +``` + +![图片.png](../images/20220408-75524765-1188-47d0-b432-664e001638a5.png) + +![图片.png](../images/20220408-685e0f51-303d-416a-970c-d73aea1bb48f.png) + +## 操作外键的常见场景操作步骤: + +### 1、删除外键关联的主表数据,规避级联删除的方法 + +``` +--停业务 + +--备份备表数据 +create table b0_t_primary (like t_primary including all); +insert into b0_t_primary select * from t_primary; + +--重置参数 +set session_replication_role to replica; + +--删除主表 +delete from t_primary; + +--置回参数 +set session_replication_role to origin; + +--启动业务 +``` + diff --git "a/content/zh/post/enmo/MogDB 2.1.1 \345\210\235\345\247\213\345\214\226\345\217\202\346\225\260\346\246\202\350\246\201\350\257\264\346\230\216.md" "b/content/zh/post/enmo/MogDB 2.1.1 \345\210\235\345\247\213\345\214\226\345\217\202\346\225\260\346\246\202\350\246\201\350\257\264\346\230\216.md" new file mode 100644 index 0000000000000000000000000000000000000000..98cb460ea096cf1d28b2fbf51425562a53ba7698 --- /dev/null +++ "b/content/zh/post/enmo/MogDB 2.1.1 \345\210\235\345\247\213\345\214\226\345\217\202\346\225\260\346\246\202\350\246\201\350\257\264\346\230\216.md" @@ -0,0 +1,250 @@ ++++ + +title = "MogDB 2.1.1 初始化参数概要说明" + +date = "2022-05-18" + +tags = ["MogDB 2.1.1 初始化参数概要说明"] + +archives = "2022-05" + +author = "云和恩墨" + +summary = "MogDB 2.1.1 初始化参数概要说明" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# MogDB 2.1.1 初始化参数概要说明 + +本文出处:[https://www.modb.pro/db/394787](https://www.modb.pro/db/394787) + +MogDB数据库安装完成后,官方文档提供了刷新参数的脚本,推荐执行脚本来进行初始化参数设置。 + +本文在官方提供脚本的基础上添加了简单说明,方便新学习的同学能大概了解参数作用。 + +- [CentOS7.7 下标准安装 MogDB 2.1.1](https://cdn.modb.pro/db/394547) +- [官方脚本链接](https://docs.mogdb.io/zh/mogdb/v2.1/7-recommended-parameter-settings) + +### 一、注意 + +- 建议将shell内容拷贝到notepad++等文本阅读器中设置为shell语言阅读 +- 刷新的参数都会写到配置文件/mogdb/data/db1/postgresql.conf中(按照官方文档标准安装是该路径) +- gs_guc set -I all -N all -c 是openGuass/MogDB设置参数的方法,说明见:https://www.modb.pro/db/30065 +- 部分参数设置仅适合测试环境使用 +- 在MogDB中查询参数级别及简单描述 + +``` +\pset pager select context,name,short_desc from pg_settings order by context,category,name; +``` + +- 想进一步了解参数详细,建议查询官方文档:https://opengauss.org/zh/docs/3.0.0/docs/BriefTutorial/BriefTutorial.html + +### 二、脚本 + +``` +#!/bin/bash + +source ~/.bashrc + +##获取当前机器的内存大小,单位KB +memory=`free|awk '{print $2}' |sed -n 2p` +##判断内存是否小于4G +if [[ $memory -lt 4*1024*1024 ]] +##内存小于4G,则设置max_process_memory为2G +then + ##max_process_memory 数据库可用的最大物理内存 + max_process_memory=2GB + ##shared_buffers 共享内存大小 + shared_buffers=128MB + ##max_connections 最大连接数 + max_connections=500 + ##work_mem 内部排序操作和Hash表在开始写入临时磁盘文件之前使用的内存大小 + work_mem=4MB + ##maintenance_work_mem 设置在维护性操作可使用的最大的内存 + maintenance_work_mem=256MB + ##应该是考虑到有物理内存本来就小于2G的情况,可以进一步减小max_process_memory和shared_buffers的值 + echo "If the database fails to start, lower the parameters max_process_memory and shared_buffers" +##判断内存大于4G小于等于8G,按下面的值刷参数 +elif [[ $memory -gt 4*1024*1024 ]] && [[ $memory -lt 8*1024*1024 ]] +then + max_process_memory=5GB + shared_buffers=1GB + max_connections=1000 + work_mem=16MB + maintenance_work_mem=1GB +##大于8G的情况按如下公式计算得出 +else + max_process_memory=$((memory*6/10/1024/1024))GB + shared_buffers=$((memory*3/10/1024/1024))GB + max_connections=3000 + work_mem=64MB + maintenance_work_mem=2GB +fi + +##内存相关参数 +gs_guc set -I all -N all -c "max_process_memory=${max_process_memory}" +gs_guc set -I all -N all -c "shared_buffers=${shared_buffers}" +gs_guc set -I all -N all -c "work_mem=${work_mem}" +gs_guc set -I all -N all -c "maintenance_work_mem=${maintenance_work_mem}" +##cstore_buffers 列存所使用的共享缓冲区的大小,对比O从12c起在SGA中也有类似区域,关于og列式存储的介绍:https://blog.csdn.net/GaussDB/article/details/116017248 +gs_guc set -I all -N all -c "cstore_buffers=16MB" +##wal_buffers 用于存放WAL数据的共享内存空间的XLOG_BLCKSZ数,wal相当于O的redo,wal_buffers看样子类似于SGA中的redo log buffer,但是og的这个内存区域远大于o,可以研究下机制 +gs_guc set -I all -N all -c "wal_buffers=1GB" +##local_syscache_threshold 控制session动态内存大小 +gs_guc set -I all -N all -c "local_syscache_threshold=32MB" +##standby_shared_buffers_fraction 备库所在服务器使用shared_buffers内存缓冲区大小的比例,1代表100% +gs_guc set -I all -N all -c "standby_shared_buffers_fraction=1" + +##连接访问相关参数 +gs_guc set -I all -N all -c "max_connections=${max_connections}" +##max_prepared_transactions 同时处于"预备"状态的事务的最大数目 +gs_guc set -I all -N all -c "max_prepared_transactions=${max_connections}" +##listen_addresses 远程客户端连接使用的数据库主节点ip或者主机名,参考:https://www.modb.pro/db/30200 +gs_guc set -I all -N all -c "listen_addresses = '*'" +##远程连接的读取模式,当前设置为无验证 +gs_guc set -I all -N all -c "remote_read_mode=non_authentication" +##password_encryption_type 加密算法设置,1代表采用sha256和md5方式对密码加密,参考:https://www.modb.pro/db/30252 +gs_guc set -I all -N all -c "password_encryption_type=1" +##password_reuse_time 对新密码进行可重用天数检查 +gs_guc set -I all -N all -c "password_reuse_time=0" +##password_lock_time 密码锁定时间,设置为0时表示即使超过密码错误次数限制导致帐户锁定,也会在短时间内自动解锁 +gs_guc set -I all -N all -c "password_lock_time=0" +##password_effect_time 密码有效期,0表示不开启有效期限制功能,单位为天 +gs_guc set -I all -N all -c "password_effect_time=0" +##session_timeout 开启自动断开功能,0为不开启 +gs_guc set -I all -N all -c "session_timeout=0" + +##wal相关参数 +##wal_level 写入WAL信息量的级别(minimal、archive、hot_standby、logical) +gs_guc set -I all -N all -c "wal_level=logical" +##full_page_writes 在检查点之后对页面的第一次修改时,是否将每个磁盘页面的全部内容写到WAL日志中 +gs_guc set -I all -N all -c "full_page_writes=off" +##wal_log_hints 检查点之后对页面的第一次修改为页面上元组hint bits的修改时,是否将整个页面的全部内容写到WAL日志中 +gs_guc set -I all -N all -c "wal_log_hints=off" +##xloginsert_locks 并发写预写式日志锁的个数 +gs_guc set -I all -N all -c "xloginsert_locks=48" +##advance_xlog_file_num 在后台周期性地提前初始化xlog文件的数目 +gs_guc set -I all -N all -c "advance_xlog_file_num=10" + +##复制相关参数 +##synchronous_commit 当前事务的同步方式,说明:https://blog.csdn.net/Hehuyi_In/article/details/103449611 +gs_guc set -I all -N all -c "synchronous_commit=on" +##wal_keep_segments Xlog日志文件段数量,“pg_xlog”目录下保留事务日志文件的最小数目 +gs_guc set -I all -N all -c "wal_keep_segments=1024" +##max_wal_senders 事务日志发送进程的并发连接最大数量,不可大于等于max_connections +gs_guc set -I all -N all -c "max_wal_senders=16" +##recovery_max_workers 最大并行回放线程个数,关于什么是并行回放可查看:https://zhuanlan.zhihu.com/p/390307047 +gs_guc set -I all -N all -c "recovery_max_workers=4" +##most_available_sync 在备机同步失败时,是否阻塞主机,on为启用,类似O的DG最大可用模式,正常是sync同步,当备库断异常时切换为async,在备库恢复时,切换回sync +gs_guc set -I all -N all -c "most_available_sync=on" +##max_size_for_xlog_prune xlog最大值的阈值,单位KB +gs_guc set -I all -N all -c "max_size_for_xlog_prune=104857600" +##catchup2normal_wait_time 单同步备机情况下,控制备机数据追赶(catchup)阻塞主机的最长时间,https://gitee.com/opengauss/openGauss-server/issues/I23SAM +gs_guc set -I all -N all -c "catchup2normal_wait_time=0" +##enable_slot_log 是否开启逻辑复制槽主备同步特性 +gs_guc set -I all -N all -c "enable_slot_log=on" +##max_replication_slots 当前物理流复制槽数+所需的逻辑复制槽数 +gs_guc set -I all -N all -c "max_replication_slots=32" +##wal_receiver_timeout 从主机接收数据的最大等待时间,中止处于非活动状态超过指定时间的复制连接 +gs_guc set -I all -N all -c "wal_receiver_timeout=60s" +##sync_config_strategy 主机、备机和级联备之间配置文件的同步策略,主机配置为none_node时,表示不允许主机向任何备机主动同步配置文件 +gs_guc set -I all -N all -c "sync_config_strategy=none_node" + +##日志相关参数 +##logging_collector 控制开启后端日志收集进程logger进行日志收集 +gs_guc set -I all -N all -c "logging_collector=on" +##log_duration 记录每个已完成SQL语句的执行时间 +gs_guc set -I all -N all -c "log_duration=on" +##log_line_prefix 每条日志信息的前缀格式 +gs_guc set -I all -N all -c "log_line_prefix='%m %u %d %r %p %S'" +##log_checkpoints 在服务器日志中记录检查点和重启点的信息 +gs_guc set -I all -N all -c "log_checkpoints=on" +##plog_merge_age 控制性能日志数据输出的周期,即多久进行一次性能日志汇聚,单位为毫秒,0是不启用 +gs_guc set -I all -N all -c "plog_merge_age=0" + +##性能统计相关参数 +##vacuum_cost_limit 设置清理进程休眠的开销限制 +gs_guc set -I all -N all -c "vacuum_cost_limit=1000" +##autovacuum_max_workers 能同时运行的自动清理线程的最大数量 +gs_guc set -I all -N all -c "autovacuum_max_workers=10" +##autovacuum_naptime 两次自动清理操作的时间间隔 +gs_guc set -I all -N all -c "autovacuum_naptime=20s" +##autovacuum_vacuum_cost_delay 自动VACUUM操作里使用的开销延迟数值 +gs_guc set -I all -N all -c "autovacuum_vacuum_cost_delay=10" +##autovacuum_vacuum_scale_factor 触发一个VACUUM时增加到autovacuum_vacuum_threshold的表大小的缩放系数 +gs_guc set -I all -N all -c "autovacuum_vacuum_scale_factor=0.05" +##autovacuum_analyze_scale_factor 触发一个ANALYZE时增加到autovacuum_analyze_threshold的表大小的缩放系数 +gs_guc set -I all -N all -c "autovacuum_analyze_scale_factor=0.02" +##autovacuum_vacuum_threshold 触发VACUUM的阈值 +gs_guc set -I all -N all -c "autovacuum_vacuum_threshold=200" +##autovacuum_analyze_threshold 触发ANALYZE操作的阈值 +gs_guc set -I all -N all -c "autovacuum_analyze_threshold=200" +##autovacuum_io_limits 控制autovacuum进程每秒触发IO的上限 +gs_guc set -I all -N all -c "autovacuum_io_limits=104857600" +##instr_unique_sql_count 系统中unique sql信息实时收集功能 +gs_guc set -I all -N all -c "instr_unique_sql_count=20000" +##enable_save_datachanged_timestamp 确定是否收集insert/update/delete, exchange/truncate/drop partition操作对表数据改动的时间 +gs_guc set -I all -N all -c "enable_save_datachanged_timestamp=off" +##track_sql_count 控制对每个会话中当前正在执行的SELECT、INSERT、UPDATE、DELETE、MERGE INTO语句进行计数的统计数据 +gs_guc set -I all -N all -c "track_sql_count=off" +##enable_instr_rt_percentile 开启计算系统中80%和95%的SQL响应时间的功能 +gs_guc set -I all -N all -c "enable_instr_rt_percentile=off" +##enable_instance_metric_persistent 开启实例资源监控转存功能 +gs_guc set -I all -N all -c "enable_instance_metric_persistent=off" +##enable_logical_io_statistics 开启资源监控逻辑IO统计功能 +gs_guc set -I all -N all -c "enable_logical_io_statistics=off" +##enable_user_metric_persistent 开启用户历史资源监控转存功能 +gs_guc set -I all -N all -c "enable_user_metric_persistent=off" +##enable_mergejoin 优化器对融合连接规划类型的使用 +gs_guc set -I all -N all -c "enable_mergejoin=on" +##enable_nestloop 优化器对内表全表扫描嵌套循环连接规划类型的使用 +gs_guc set -I all -N all -c "enable_nestloop=on" +##enable_pbe_optimization 对以PBE(Parse Bind Execute)形式执行的语句进行查询计划的优化 +gs_guc set -I all -N all -c "enable_pbe_optimization=off" +##enable_resource_track 是否开启资源实时监控功能,on表示打开资源监控;off表示关闭资源监控 +gs_guc set -I all -N all -c "enable_resource_track=on" +##enable_wdr_snapshot 数据库监控快照功能 +gs_guc set -I all -N all -c "enable_wdr_snapshot=on" +##instr_unique_sql_count 系统中unique sql信息实时收集功能 +gs_guc set -I all -N all -c "instr_unique_sql_count=5000" + +##客户端白名单 +gs_guc set -I all -N all -h "host all all 0.0.0.0/0 md5" + +##其他参数 +##checkpoint_segments checkpoint_timeout周期内所保留的最少WAL日志段文件数量 +gs_guc set -I all -N all -c "checkpoint_segments=1024" +##checkpoint_completion_target 检查点完成的目标,0.8表示每个checkpoint需要在checkpoints间隔时间的80%内完成。 +gs_guc set -I all -N all -c "checkpoint_completion_target=0.8" +##pagewriter_sleep 设置用于增量检查点打开后,pagewrite线程每隔pagewriter_sleep的时间刷一批脏页下盘。 +gs_guc set -I all -N all -c "pagewriter_sleep=200" +##enable_alarm 告警检测线程,检测数据库中可能的错误场景 +gs_guc set -I all -N all -c "enable_alarm=off" +##enable_codegen 标识是否允许开启代码生成优化,目前代码生成使用的是LLVM优化 +gs_guc set -I all -N all -c "enable_codegen=off" +##audit_enabled 审计进程的开启和关闭 +gs_guc set -I all -N all -c "audit_enabled=off" +##没找到这个参数的说明 +gs_guc set -I all -N all -c "enable_asp=off" +##lc_messages 信息显示的语言 +gs_guc set -I all -N all -c "lc_messages='en_US.UTF-8'" +##lc_monetary 货币值的显示格式 +gs_guc set -I all -N all -c "lc_monetary='en_US.UTF-8'" +##lc_numeric 数值的显示格式 +gs_guc set -I all -N all -c "lc_numeric='en_US.UTF-8'" +##lc_time 时间和区域的显示格式 +gs_guc set -I all -N all -c "lc_time='en_US.UTF-8'" +##update_lockwait_timeout 并发更新参数开启情况下,该参数控制并发更新同一行时单个锁的最长等待时间 +gs_guc set -I all -N all -c "update_lockwait_timeout=1min" +##lockwait_timeout 单个锁的最长等待时间 +gs_guc set -I all -N all -c "lockwait_timeout=1min" +##max_files_per_process 设置每个服务器进程允许同时打开的最大文件数目 +gs_guc set -I all -N all -c "max_files_per_process=100000" +##behavior_compat_options 数据库兼容性行为配置项,该参数的值由若干个配置项用逗号隔开构成,display_leading_zero表示浮点数显示,不配置的话则0.25显示为.25 +gs_guc set -I all -N all -c "behavior_compat_options='display_leading_zero'" +##enable_thread_pool 控制是否使用线程池功能 +gs_guc set -I all -N all -c "enable_thread_pool=off" +``` diff --git "a/content/zh/post/enmo/MogDB openGauss \345\235\217\345\235\227\346\265\213\350\257\225-\345\257\271\345\220\257\345\212\250\347\232\204\345\275\261\345\223\215-\346\265\213\350\257\225\347\254\224\350\256\2601.md" "b/content/zh/post/enmo/MogDB openGauss \345\235\217\345\235\227\346\265\213\350\257\225-\345\257\271\345\220\257\345\212\250\347\232\204\345\275\261\345\223\215-\346\265\213\350\257\225\347\254\224\350\256\2601.md" new file mode 100644 index 0000000000000000000000000000000000000000..abdba4b112247366a6ed4b56f045b58dc2e6b50d --- /dev/null +++ "b/content/zh/post/enmo/MogDB openGauss \345\235\217\345\235\227\346\265\213\350\257\225-\345\257\271\345\220\257\345\212\250\347\232\204\345\275\261\345\223\215-\346\265\213\350\257\225\347\254\224\350\256\2601.md" @@ -0,0 +1,392 @@ ++++ + +title = "MogDB/openGauss 坏块测试-对启动的影响-测试笔记1" + +date = "2022-05-18" + +tags = ["MogDB/openGauss 坏块测试-对启动的影响-测试笔记1"] + +archives = "2022-05" + +author = "云和恩墨-范计杰" + +summary = "MogDB/openGauss 坏块测试-对启动的影响-测试笔记1" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# MogDB/openGauss 坏块测试-对启动的影响-测试笔记1 + +本文出处:[https://www.modb.pro/db/398511](https://www.modb.pro/db/398511) + +在UPDATE操作提交后,脏块落盘前kill掉mogdb数据库,然后对UPDATE修改的坏进行以下破坏操作,仍然能够启动数据库,数据未丢失。 + +1、用旧数据文件替换,可以启动 +2、修改成错误的checksum,可以启动 +3、数据块修改成错误的lsn,可以启动 +4、dd一个数据块为0,可以启动 + +full_page_writes 打开时,每次checkpoint后第一次修改的块,会在wal中记录完整副本,recover时直接使用该副本重写数据文件中的块。 + +``` +[omm2@og01 ~]$ gsql +gsql ((MogDB 2.1.0 build 56189e20) compiled at 2022-01-07 18:47:53 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + + + +omm2=# drop table t; +DROP TABLE + + +omm2=# create table t(id numeric,c varchar(100)) with (fillfactor=50); +CREATE TABLE +omm2=# insert into t select i,'test'||i from generate_series(1,1000)i; +INSERT 0 100 +omm2=# checkpoint; +CHECKPOINT + + +select (select setting from pg_settings where name='data_directory')||'/'||pg_relation_filepath('t'); + + +omm2=# select (select setting from pg_settings where name='data_directory')||'/'||pg_relation_filepath('t'); + ?column? +---------------------------------- + /home/omm2/data/base/16385/73775 +(1 row) + + + +[omm2@og01 ~]$ cp /home/omm2/data/base/16385/73775 /home/omm2/data/base/16385/73775_old +[omm2@og01 ~]$ + + + +omm2=# select ctid,id from t; + ctid | id +--------+----- + (0,1) | 1 + (0,2) | 2 + (0,3) | 3 + (0,4) | 4 + (0,5) | 5 + (0,6) | 6 + (0,7) | 7 + (0,8) | 8 + (0,9) | 9 + (0,10) | 10 + (0,11) | 11 + (0,12) | 12 + (0,13) | 13 + (0,14) | 14 + (0,15) | 15 + + +omm2=# update t set c='a' where id<3; +UPDATE 2 +omm2=# +omm2=# checkpoint; +CHECKPOINT + + +[omm2@og01 ~]$ ps -ef |grep mogdb +omm2 25231 1 3 15:17 pts/0 00:00:04 /home/omm2/mogdb210/bin/mogdb +omm2 26265 20027 0 15:19 pts/1 00:00:00 grep --color=auto mogdb + + + + +omm2=# update t set c='b' where id<500; +UPDATE 499 + + +[omm2@og01 ~]$ kill -9 25231 + + +---用旧数据文件替换,可以启动 + +cp /home/omm2/data/base/16385/73775 /home/omm2/data/base/16385/73775_bak + +cp /home/omm2/data/base/16385/73775_old /home/omm2/data/base/16385/73775 + + +gs_ctl start + + + +2022-04-26 15:21:26.028 [unknown] [unknown] localhost 139810735888128 0[0:0#0] 0 [BACKEND] LOG: undo launcher started +2022-04-26 15:21:26.066 omm2 postgres localhost 139810614208256 0[0:0#0] 0 [BACKEND] LOG: instrumention percentile started +2022-04-26 15:21:26.067 [unknown] [unknown] localhost 139810414786304 0[0:0#0] 0 [UNDO] LOG: [UndoRecycleMain:362]undo recycle started. +2022-04-26 15:21:26.069 omm2 postgres localhost 139810580645632 0[0:0#0] 0 [BACKEND] LOG: statement flush thread start +2022-04-26 15:21:26.069 omm2 postgres localhost 139810479863552 0[0:0#0] 0 [BACKEND] LOG: process wlm thread starting up. +2022-04-26 15:21:26.069 omm2 postgres localhost 139810479863552 0[0:0#0] 0 [BACKEND] LOG: build user data finished +2022-04-26 15:21:26.070 omm2 postgres localhost 139810463082240 0[0:0#0] 0 [BACKEND] LOG: WLMmonitor thread is starting up. +2022-04-26 15:21:26.070 omm2 postgres localhost 139810446300928 0[0:0#0] 0 [BACKEND] LOG: WLMarbiter thread is starting up. +2022-04-26 15:21:26.076 omm2 postgres localhost 139810597426944 0[0:0#0] 0 [BACKEND] LOG: ASP thread start +2022-04-26 15:21:40.996 [unknown] [unknown] localhost 139810836576000 0[0:0#0] 0 [BACKEND] LOG: database first startup and recovery finish,so do checkpointer +2022-04-26 15:21:40.997 [MOT] [INFO] Creating MOT checkpoint snapshot: id: 1650957700 +2022-04-26 15:21:40.997 [MOT] [INFO] MOT snapshot ready. id: 1650957700, lsn: 0 +2022-04-26 15:21:40.997 [unknown] [unknown] localhost 139810836576000 0[0:0#0] 0 [SLRU] LOG: remove old segments(<0) under pg_csnlog +2022-04-26 15:21:40.997 [unknown] [unknown] localhost 139810836576000 0[0:0#0] 0 [BACKEND] LOG: truncate CSN log oldestXact 46536, next xid 46539 +2022-04-26 15:21:41.000 [unknown] [unknown] localhost 139810836576000 0[0:0#0] 0 [SLRU] LOG: remove old segments(<0) under pg_multixact/offsets +2022-04-26 15:21:41.000 [unknown] [unknown] localhost 139810836576000 0[0:0#0] 0 [SLRU] LOG: remove old segments(<0) under pg_multixact/members +2022-04-26 15:21:41.005 [unknown] [unknown] localhost 139810836576000 0[0:0#0] 0 [UNDO] LOG: [CheckPointUndoSystemMeta:353]undo metadata checkPointRedo = 349161880. +2022-04-26 15:21:41.009 [MOT] [INFO] MOT begin checkpoint capture. id: 1650957700, lsn: 349161880 +2022-04-26 15:21:41.010 [MOT] [INFO] GC PARAMS: isGcEnabled = true, limboSizeLimit = 524288, limboSizeLimitHigh = 8388608, rcuFreeCount = 8192 +2022-04-26 15:21:41.011 [MOT] [INFO] GC PARAMS: isGcEnabled = true, limboSizeLimit = 524288, limboSizeLimitHigh = 8388608, rcuFreeCount = 8192 +2022-04-26 15:21:41.113 [MOT] [INFO] Checkpoint [1650957700] completed +2022-04-26 15:21:41.112 [unknown] [unknown] localhost 139810836576000 0[0:0#0] 0 [BACKEND] WARNING: replicationSlotMinLSN is InvalidXLogRecPtr!!! +2022-04-26 15:21:41.112 [unknown] [unknown] localhost 139810836576000 0[0:0#0] 0 [BACKEND] WARNING: replicationSlotMaxLSN is InvalidXLogRecPtr!!! +2022-04-26 15:21:41.113 [unknown] [unknown] localhost 139810836576000 0[0:0#0] 0 [BACKEND] LOG: CreateCheckPoint PrintCkpXctlControlFile: [checkPoint] oldCkpLoc:0/14CA2AC8, oldRedo:0/14CA2A48, newCkpLoc:0/14CFCA18, newRedo:0/14CFC998, preCkpLoc:0/14CA2AC8 +2022-04-26 15:21:41.113 [unknown] [unknown] localhost 139810836576000 0[0:0#0] 0 [BACKEND] LOG: will update control file (create checkpoint), shutdown:0 +2022-04-26 15:21:41.115 [unknown] [unknown] localhost 139810836576000 0[0:0#0] 0 [BACKEND] LOG: attempting to remove WAL segments older than log file 000000010000000000000002 +2022-04-26 15:22:29.772 omm2 postgres localhost 139810250028800 0[0:0#0] 0 [BACKEND] LOG: clean statement thread start + + +omm2=# select * from t where id<10; + id | c +----+--- + 3 | b + 4 | b + 5 | b + 6 | b + 7 | b + 8 | b + 9 | b + 1 | b + 2 | b +(9 rows) + + +omm2=# vacuum t; +VACUUM + + +omm2=# checkpoint; +CHECKPOINT + +---修改成错误的checksum,可以启动 + + +[omm2@og01 dump]$ ./dump -file /home/omm2/data/base/16385/73775 -bs 8192 -n 0 -decoder pg_header +{0 [] 24 map[Pd_checksum:0xc0000682d0 Pd_flags:0xc000068330 Pd_lower:0xc000068390 Pd_lsn:0xc000068240 Pd_pagesize_version:0xc0000684b0 Pd_prune_xid:0xc000068510 Pd_special:0xc000068450 Pd_upper:0xc0000683f0] [0xc000068240 0xc0000682d0 0xc000068330 0xc000068390 0xc0000683f0 0xc000068450 0xc0000684b0 0xc000068510]} +======read and dump block 0======= +@0 Pd_lsn size:8 hex:0x14d4834000000000 val:1500968886722363392 +@8 Pd_checksum size:2 hex:0x4263 val:16995 +@10 Pd_flags size:2 hex:0x45 val:69 +@12 Pd_lower size:2 hex:0x310 val:784 +@14 Pd_upper size:2 hex:0x1480 val:5248 +@16 Pd_special size:2 hex:0x2000 val:8192 +@18 Pd_pagesize_version size:2 hex:0x2006 val:8198 +@20 Pd_prune_xid size:4 hex:0x0 val:0 +[omm2@og01 dump]$ + + +omm2=# update t set c='c' where id<500; +UPDATE 499 + + +[omm2@og01 dump]$ kill -9 27074 + +./dump -file /home/omm2/data/base/16385/73775 -bs 8192 -n 0 -decoder pg_header +{0 [] 24 map[Pd_checksum:0xc00008c2d0 Pd_flags:0xc00008c330 Pd_lower:0xc00008c390 Pd_lsn:0xc00008c240 Pd_pagesize_version:0xc00008c4b0 Pd_prune_xid:0xc00008c510 Pd_special:0xc00008c450 Pd_upper:0xc00008c3f0] [0xc00008c240 0xc00008c2d0 0xc00008c330 0xc00008c390 0xc00008c3f0 0xc00008c450 0xc00008c4b0 0xc00008c510]} +======read and dump block 0======= +@0 Pd_lsn size:8 hex:0x14d4834000000000 val:1500968886722363392 +@8 Pd_checksum size:2 hex:0x4263 val:16995 +@10 Pd_flags size:2 hex:0x45 val:69 +@12 Pd_lower size:2 hex:0x310 val:784 +@14 Pd_upper size:2 hex:0x1480 val:5248 +@16 Pd_special size:2 hex:0x2000 val:8192 +@18 Pd_pagesize_version size:2 hex:0x2006 val:8198 +@20 Pd_prune_xid size:4 hex:0x0 val:0 +[omm2@og01 dump]$ + + +./dump -file /home/omm2/data/base/16385/73775 -bs 8192 -n 0 -decoder pg_header -setname Pd_checksum -setval uint16#10 + +[omm2@og01 dump]$ ./dump -file /home/omm2/data/base/16385/73775 -bs 8192 -n 0 -decoder pg_header -setname Pd_checksum -setval uint16#10 +{0 [] 24 map[Pd_checksum:0xc00008e2d0 Pd_flags:0xc00008e330 Pd_lower:0xc00008e390 Pd_lsn:0xc00008e240 Pd_pagesize_version:0xc00008e4b0 Pd_prune_xid:0xc00008e510 Pd_special:0xc00008e450 Pd_upper:0xc00008e3f0] [0xc00008e240 0xc00008e2d0 0xc00008e330 0xc00008e390 0xc00008e3f0 0xc00008e450 0xc00008e4b0 0xc00008e510]} +======read and dump block 0======= +@0 Pd_lsn size:8 hex:0x14d4834000000000 val:1500968886722363392 +@8 Pd_checksum size:2 hex:0x4263 val:16995 +@10 Pd_flags size:2 hex:0x45 val:69 +@12 Pd_lower size:2 hex:0x310 val:784 +@14 Pd_upper size:2 hex:0x1480 val:5248 +@16 Pd_special size:2 hex:0x2000 val:8192 +@18 Pd_pagesize_version size:2 hex:0x2006 val:8198 +@20 Pd_prune_xid size:4 hex:0x0 val:0 +===============after modify================ +@0 Pd_lsn size:8 hex:0x14d4834000000000 val:1500968886722363392 +@8 Pd_checksum size:2 hex:0xa val:10 +@10 Pd_flags size:2 hex:0x45 val:69 +@12 Pd_lower size:2 hex:0x310 val:784 +@14 Pd_upper size:2 hex:0x1480 val:5248 +@16 Pd_special size:2 hex:0x2000 val:8192 +@18 Pd_pagesize_version size:2 hex:0x2006 val:8198 +@20 Pd_prune_xid size:4 hex:0x0 val:0 +[omm2@og01 dump]$ + +gs_ctl start + + +2022-04-26 15:29:50.790 [unknown] [unknown] localhost 139845378766592 0[0:0#0] 0 [UNDO] LOG: [CheckPointUndoSystemMeta:353]undo metadata checkPointRedo = 350150888. +2022-04-26 15:29:50.794 [MOT] [INFO] MOT begin checkpoint capture. id: 1650958190, lsn: 350150888 +2022-04-26 15:29:50.795 [MOT] [INFO] GC PARAMS: isGcEnabled = true, limboSizeLimit = 524288, limboSizeLimitHigh = 8388608, rcuFreeCount = 8192 +2022-04-26 15:29:50.797 [MOT] [INFO] GC PARAMS: isGcEnabled = true, limboSizeLimit = 524288, limboSizeLimitHigh = 8388608, rcuFreeCount = 8192 +2022-04-26 15:29:50.897 [MOT] [INFO] Checkpoint [1650958190] completed +2022-04-26 15:29:50.897 [unknown] [unknown] localhost 139845378766592 0[0:0#0] 0 [BACKEND] WARNING: replicationSlotMinLSN is InvalidXLogRecPtr!!! +2022-04-26 15:29:50.897 [unknown] [unknown] localhost 139845378766592 0[0:0#0] 0 [BACKEND] WARNING: replicationSlotMaxLSN is InvalidXLogRecPtr!!! +2022-04-26 15:29:50.898 [unknown] [unknown] localhost 139845378766592 0[0:0#0] 0 [BACKEND] LOG: CreateCheckPoint PrintCkpXctlControlFile: [checkPoint] oldCkpLoc:0/14D7B110, oldRedo:0/14D7B090, newCkpLoc:0/14DEE168, newRedo:0/14DEE0E8, preCkpLoc:0/14D7B110 +2022-04-26 15:29:50.898 [unknown] [unknown] localhost 139845378766592 0[0:0#0] 0 [BACKEND] LOG: will update control file (create checkpoint), shutdown:0 +2022-04-26 15:29:50.900 [unknown] [unknown] localhost 139845378766592 0[0:0#0] 0 [BACKEND] LOG: attempting to remove WAL segments older than log file 000000010000000000000002 + + +omm2=# select * from t where id<10; + id | c +----+--- + 3 | c + 4 | c + 5 | c + 6 | c + 7 | c + 8 | c + 9 | c + 1 | c + 2 | c +(9 rows) + + +[omm2@og01 dump]$ ./dump -file /home/omm2/data/base/16385/73775 -bs 8192 -n 0 -decoder pg_header +{0 [] 24 map[Pd_checksum:0xc00008c2d0 Pd_flags:0xc00008c330 Pd_lower:0xc00008c390 Pd_lsn:0xc00008c240 Pd_pagesize_version:0xc00008c4b0 Pd_prune_xid:0xc00008c510 Pd_special:0xc00008c450 Pd_upper:0xc00008c3f0] [0xc00008c240 0xc00008c2d0 0xc00008c330 0xc00008c390 0xc00008c3f0 0xc00008c450 0xc00008c4b0 0xc00008c510]} +======read and dump block 0======= +@0 Pd_lsn size:8 hex:0x14dc3c0000000000 val:1503142346332569600 +@8 Pd_checksum size:2 hex:0x2269 val:8809 +@10 Pd_flags size:2 hex:0x41 val:65 +@12 Pd_lower size:2 hex:0x478 val:1144 +@14 Pd_upper size:2 hex:0x900 val:2304 +@16 Pd_special size:2 hex:0x2000 val:8192 +@18 Pd_pagesize_version size:2 hex:0x2006 val:8198 +@20 Pd_prune_xid size:4 hex:0x2ad val:68 + + + +omm2=# vacuum t; +VACUUM +omm2=# checkpoint; +CHECKPOINT +omm2=# + + +omm2=# update t set c='d' where id<500; +UPDATE 499 + + +[omm2@og01 dump]$ kill -9 30834 + + +---数据块修改成错误的lsn,可以启动 + +./dump -file /home/omm2/data/base/16385/73775 -bs 8192 -n 0 -decoder pg_header -setname Pd_lsn -setval uint64#10 + +gs_ctl start + + +2022-04-26 15:34:02.760 omm2 postgres localhost 140324014712576 0[0:0#0] 0 [BACKEND] LOG: instrumention percentile started +2022-04-26 15:34:02.765 omm2 postgres localhost 140323895105280 0[0:0#0] 0 [BACKEND] LOG: process wlm thread starting up. +2022-04-26 15:34:02.766 omm2 postgres localhost 140323895105280 0[0:0#0] 0 [BACKEND] LOG: build user data finished +2022-04-26 15:34:02.766 omm2 postgres localhost 140323856250624 0[0:0#0] 0 [BACKEND] LOG: WLMmonitor thread is starting up. +2022-04-26 15:34:02.767 [unknown] [unknown] localhost 140323790059264 0[0:0#0] 0 [UNDO] LOG: [UndoRecycleMain:362]undo recycle started. +2022-04-26 15:34:02.770 omm2 postgres localhost 140323981149952 0[0:0#0] 0 [BACKEND] LOG: statement flush thread start +2022-04-26 15:34:02.772 omm2 postgres localhost 140323997931264 0[0:0#0] 0 [BACKEND] LOG: ASP thread start +2022-04-26 15:34:02.778 omm2 postgres localhost 140323832067840 0[0:0#0] 0 [BACKEND] LOG: WLMarbiter thread is starting up. +2022-04-26 15:34:17.694 [unknown] [unknown] localhost 140324246648576 0[0:0#0] 0 [BACKEND] LOG: database first startup and recovery finish,so do checkpointer +2022-04-26 15:34:17.694 [MOT] [INFO] Creating MOT checkpoint snapshot: id: 1650958457 +2022-04-26 15:34:17.694 [MOT] [INFO] MOT snapshot ready. id: 1650958457, lsn: 0 +2022-04-26 15:34:17.694 [unknown] [unknown] localhost 140324246648576 0[0:0#0] 0 [SLRU] LOG: remove old segments(<0) under pg_csnlog +2022-04-26 15:34:17.694 [unknown] [unknown] localhost 140324246648576 0[0:0#0] 0 [BACKEND] LOG: truncate CSN log oldestXact 47616, next xid 47619 +2022-04-26 15:34:17.698 [unknown] [unknown] localhost 140324246648576 0[0:0#0] 0 [SLRU] LOG: remove old segments(<0) under pg_multixact/offsets +2022-04-26 15:34:17.698 [unknown] [unknown] localhost 140324246648576 0[0:0#0] 0 [SLRU] LOG: remove old segments(<0) under pg_multixact/members +2022-04-26 15:34:17.707 [unknown] [unknown] localhost 140324246648576 0[0:0#0] 0 [UNDO] LOG: [CheckPointUndoSystemMeta:353]undo metadata checkPointRedo = 350849296. +2022-04-26 15:34:17.713 [MOT] [INFO] MOT begin checkpoint capture. id: 1650958457, lsn: 350849296 +2022-04-26 15:34:17.715 [MOT] [INFO] GC PARAMS: isGcEnabled = true, limboSizeLimit = 524288, limboSizeLimitHigh = 8388608, rcuFreeCount = 8192 +2022-04-26 15:34:17.816 [MOT] [INFO] Checkpoint [1650958457] completed +2022-04-26 15:34:17.816 [unknown] [unknown] localhost 140324246648576 0[0:0#0] 0 [BACKEND] WARNING: replicationSlotMinLSN is InvalidXLogRecPtr!!! +2022-04-26 15:34:17.816 [unknown] [unknown] localhost 140324246648576 0[0:0#0] 0 [BACKEND] WARNING: replicationSlotMaxLSN is InvalidXLogRecPtr!!! +2022-04-26 15:34:17.817 [unknown] [unknown] localhost 140324246648576 0[0:0#0] 0 [BACKEND] LOG: CreateCheckPoint PrintCkpXctlControlFile: [checkPoint] oldCkpLoc:0/14E41FC8, oldRedo:0/14E41F48, newCkpLoc:0/14E98990, newRedo:0/14E98910, preCkpLoc:0/14E41FC8 +2022-04-26 15:34:17.817 [unknown] [unknown] localhost 140324246648576 0[0:0#0] 0 [BACKEND] LOG: will update control file (create checkpoint), shutdown:0 +2022-04-26 15:34:17.819 [unknown] [unknown] localhost 140324246648576 0[0:0#0] 0 [BACKEND] LOG: attempting to remove WAL segments older than log file 000000010000000000000002 + + +omm2=# select * from t where id<10; + id | c +----+--- + 3 | d + 4 | d + 5 | d + 6 | d + 7 | d + 8 | d + 9 | d + 1 | d + 2 | d +(9 rows) + + +omm2=# update t set c='e' where id<500; +UPDATE 499 + +[omm2@og01 dump]$ kill -9 499 + +---dd一个数据块为0,可以启动 + +dd if=/dev/zero of=/home/omm2/data/base/16385/73775 conv=notrunc bs=8192 count=1 +记录了1+0 的读入 +记录了1+0 的写出 +8192字节(8.2 kB)已复制,0.00013172 秒,62.2 MB/秒 + + +2022-04-26 15:39:24.369 [unknown] [unknown] localhost 140529590335232 0[0:0#0] 0 [SLRU] LOG: remove old segments(<0) under pg_csnlog +2022-04-26 15:39:24.369 [unknown] [unknown] localhost 140529590335232 0[0:0#0] 0 [BACKEND] LOG: truncate CSN log oldestXact 48052, next xid 48055 +2022-04-26 15:39:24.373 [unknown] [unknown] localhost 140529590335232 0[0:0#0] 0 [SLRU] LOG: remove old segments(<0) under pg_multixact/offsets +2022-04-26 15:39:24.373 [unknown] [unknown] localhost 140529590335232 0[0:0#0] 0 [SLRU] LOG: remove old segments(<0) under pg_multixact/members +2022-04-26 15:39:24.379 [unknown] [unknown] localhost 140529590335232 0[0:0#0] 0 [UNDO] LOG: [CheckPointUndoSystemMeta:353]undo metadata checkPointRedo = 351319280. +2022-04-26 15:39:24.384 [MOT] [INFO] MOT begin checkpoint capture. id: 1650958764, lsn: 351319280 +2022-04-26 15:39:24.386 [MOT] [INFO] GC PARAMS: isGcEnabled = true, limboSizeLimit = 524288, limboSizeLimitHigh = 8388608, rcuFreeCount = 8192 +2022-04-26 15:39:24.387 [MOT] [INFO] GC PARAMS: isGcEnabled = true, limboSizeLimit = 524288, limboSizeLimitHigh = 8388608, rcuFreeCount = 8192 +2022-04-26 15:39:24.488 [MOT] [INFO] Checkpoint [1650958764] completed +2022-04-26 15:39:24.487 [unknown] [unknown] localhost 140529590335232 0[0:0#0] 0 [BACKEND] WARNING: replicationSlotMinLSN is InvalidXLogRecPtr!!! +2022-04-26 15:39:24.487 [unknown] [unknown] localhost 140529590335232 0[0:0#0] 0 [BACKEND] WARNING: replicationSlotMaxLSN is InvalidXLogRecPtr!!! +2022-04-26 15:39:24.488 [unknown] [unknown] localhost 140529590335232 0[0:0#0] 0 [BACKEND] LOG: CreateCheckPoint PrintCkpXctlControlFile: [checkPoint] oldCkpLoc:0/14E98990, oldRedo:0/14E98910, newCkpLoc:0/14F0B570, newRedo:0/14F0B4F0, preCkpLoc:0/14E98990 +2022-04-26 15:39:24.488 [unknown] [unknown] localhost 140529590335232 0[0:0#0] 0 [BACKEND] LOG: will update control file (create checkpoint), shutdown:0 +2022-04-26 15:39:24.490 [unknown] [unknown] localhost 140529590335232 0[0:0#0] 0 [BACKEND] LOG: attempting to remove WAL segments older than log file 000000010000000000000002 +[omm2@og01 pg_log]$ + + +omm2=# select * from t where id<10; + id | c +----+--- + 3 | e + 4 | e + 5 | e + 6 | e + 7 | e + 8 | e + 9 | e + 1 | e + 2 | e +(9 rows) + + +omm2=# select (select setting from pg_settings where name='data_directory')||'/'||pg_relation_filepath('t'); + ?column? +---------------------------------- + /home/omm2/data/base/16385/73775 + +--full_page_writes 打开时,每次checkpoint后第一次修改的块,会在wal中记录完整副本,recover时直接重写数据文件中的块。 +[omm2@og01 pg_log]$ gsql -c "show all"|grep full_page_writes + full_page_writes | on +``` diff --git "a/content/zh/post/enmo/MogDB openGauss\345\255\246\344\271\240\347\254\224\350\256\260-\350\216\267\345\217\226\345\257\271\350\261\241DDL.md" "b/content/zh/post/enmo/MogDB openGauss\345\255\246\344\271\240\347\254\224\350\256\260-\350\216\267\345\217\226\345\257\271\350\261\241DDL.md" new file mode 100644 index 0000000000000000000000000000000000000000..f8ec3f3344e67ae3fa28de5a906b223aa4c581df --- /dev/null +++ "b/content/zh/post/enmo/MogDB openGauss\345\255\246\344\271\240\347\254\224\350\256\260-\350\216\267\345\217\226\345\257\271\350\261\241DDL.md" @@ -0,0 +1,135 @@ ++++ + +title = "MogDB/openGauss学习笔记-获取对象DDL" + +date = "2022-05-18" + +tags = ["MogDB/openGauss学习笔记-获取对象DDL"] + +archives = "2022-05" + +author = "云和恩墨-范计杰" + +summary = "MogDB/openGauss学习笔记-获取对象DDL" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# MogDB/openGauss学习笔记-获取对象DDL + +本文出处:[https://www.modb.pro/db/399230](https://www.modb.pro/db/399230) + +## 内置函数 + +``` +omm2=# \df *def List of functions Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind ------------+----------------------+------------------+----------------------------------------------------------+--------+------------+------------+--------- pg_catalog | pg_get_constraintdef | text | oid | normal | f | f | f pg_catalog | pg_get_constraintdef | text | oid, boolean | normal | f | f | f pg_catalog | pg_get_functiondef | record | funcid oid, OUT headerlines integer, OUT definition text | normal | f | f | f pg_catalog | pg_get_indexdef | text | oid | normal | f | f | f pg_catalog | pg_get_indexdef | text | oid, boolean | normal | f | f | f pg_catalog | pg_get_indexdef | text | oid, integer, boolean | normal | f | f | f pg_catalog | pg_get_ruledef | text | oid | normal | f | f | f pg_catalog | pg_get_ruledef | text | oid, boolean | normal | f | f | f pg_catalog | pg_get_tabledef | text | regclass | normal | f | f | f pg_catalog | pg_get_triggerdef | text | oid | normal | f | f | f pg_catalog | pg_get_triggerdef | text | oid, boolean | normal | f | f | f pg_catalog | pg_get_viewdef | text | oid | normal | f | f | f pg_catalog | pg_get_viewdef | text | oid, boolean | normal | f | f | f pg_catalog | pg_get_viewdef | text | oid, integer | normal | f | f | f pg_catalog | pg_get_viewdef | text | text | normal | f | f | f pg_catalog | pg_get_viewdef | text | text, boolean | normal | f | f | f (16 rows) +``` + +## 示例 + +### 获取表的DDL + +``` +omm2=# select pg_get_tabledef('t'); + pg_get_tabledef +-------------------------------------------------------- + SET search_path = public; + + CREATE TABLE t ( + + id numeric, + + c character varying(100) + + ) + + WITH (orientation=row, fillfactor=50, compression=no); +(1 row) + +omm2=# \x +Expanded display is on. +omm2=# select pg_get_tabledef('t'); +-[ RECORD 1 ]---+------------------------------------------------------- +pg_get_tabledef | SET search_path = public; + | CREATE TABLE t ( + | id numeric, + | c character varying(100) + | ) + | WITH (orientation=row, fillfactor=50, compression=no); +``` + +### 获取索引DDL + +``` +omm2=# select pg_get_indexdef('idx_t_id'::regclass); + pg_get_indexdef +------------------------------------------------------------------- + CREATE INDEX idx_t_id ON t USING btree (id) TABLESPACE pg_default +(1 row) + +还可以直接查询视图 +omm2=# \x +Expanded display is on. +omm2=# select * from pg_indexes where indexname='idx_t_id'; +-[ RECORD 1 ]----------------------------------------------------------------- +schemaname | public +tablename | t +indexname | idx_t_id +tablespace | +indexdef | CREATE INDEX idx_t_id ON t USING btree (id) TABLESPACE pg_default +``` + +## 通过gs_dump生成ddl + +这样还可以生成表及表上的索引定义等 +$ gs_dump -t t –section pre-data omm2 + +``` +$ gs_dump -t t --section pre-data omm2 + +SET statement_timeout = 0; +SET xmloption = content; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; + +SET search_path = public; + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: t; Type: TABLE; Schema: public; Owner: omm2; Tablespace: +-- + +CREATE TABLE t ( + id numeric, + c character varying(100) +) +WITH (orientation=row, fillfactor=50, compression=no); + + +ALTER TABLE public.t OWNER TO omm2; +``` + +$ gs_dump -t t –section post-data omm2 + +``` +$ gs_dump -t t --section post-data omm2 + +SET statement_timeout = 0; +SET xmloption = content; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; + +SET search_path = public; + +SET default_tablespace = ''; + +-- +-- Name: idx_t_id; Type: INDEX; Schema: public; Owner: omm2; Tablespace: +-- + +CREATE INDEX idx_t_id ON t USING btree (id) TABLESPACE pg_default; +``` diff --git "a/content/zh/post/enmo/MogDB \345\210\206\345\214\272\350\241\250\345\205\250\346\223\215\344\275\234\351\200\237\346\237\245\344\275\277\347\224\250\350\204\232\346\234\254\345\256\236\347\216\260RANGE\345\210\206\345\214\272\345\256\232\346\227\266\350\207\252\345\212\250\345\210\240\351\231\244.md" "b/content/zh/post/enmo/MogDB \345\210\206\345\214\272\350\241\250\345\205\250\346\223\215\344\275\234\351\200\237\346\237\245\344\275\277\347\224\250\350\204\232\346\234\254\345\256\236\347\216\260RANGE\345\210\206\345\214\272\345\256\232\346\227\266\350\207\252\345\212\250\345\210\240\351\231\244.md" new file mode 100644 index 0000000000000000000000000000000000000000..9da0f0cd9236f0bbb243cc64950a4c4bb3f0fc78 --- /dev/null +++ "b/content/zh/post/enmo/MogDB \345\210\206\345\214\272\350\241\250\345\205\250\346\223\215\344\275\234\351\200\237\346\237\245\344\275\277\347\224\250\350\204\232\346\234\254\345\256\236\347\216\260RANGE\345\210\206\345\214\272\345\256\232\346\227\266\350\207\252\345\212\250\345\210\240\351\231\244.md" @@ -0,0 +1,390 @@ ++++ + +title = "MogDB 分区表全操作速查/使用脚本实现RANGE分区定时自动删除" + +date = "2022-04-14" + +tags = ["MogDB 分区表全操作速查/使用脚本实现RANGE分区定时自动删除"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "MogDB 分区表全操作速查/使用脚本实现RANGE分区定时自动删除" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# MogDB 分区表全操作速查/使用脚本实现RANGE分区定时自动删除 + +本文出处:https://www.modb.pro/db/237966 + +
+ +**本文所有操作基于Mogdb 版本2.0.1** + + + +## 分区类型 + +- RANGE分区 按照范围值进行分区,生产环境最常用到的分区类型,特别适用于按照日期进行业务查询的系统。 +- INTERVAL分区 可以理解为RANGE分区的加强版,在新数据插入时,按照partition key的取值,实时自动建立新分区,非常实用。 +- LIST分区 按照枚举值进行分区,目前mogdb版本(2.0.1)不支持default分区,如果插入数据时,出现partition key中没有枚举到的值的话,SQL会执行失败。 +- HASH分区 一般作为子分区存在,很少单独使用。 +- SUB-PARTITION子分区 在分区的基础上,为分区添加下一级子分区,一般用于优化SQL语句的执行速度,如RANGE-HASH子分区。本文不对子分区做介绍。 + + + +## 分区相关操作操作 + +### 创建分区表 + +***RANGE分区\*** + +- partition key支持多列分区 +- partition key为maxvalue的分区可以存放不符合其他分区的数值 +- 注意partition key分区范围为[ ),分区不存储partition key的边界值,如partition p3 values less than('2021-10-01'),P3分区不存储'2021-10-01'的数据,该数据存放在下个分区中 + +```sql +--SQL语句 +CREATE TABLE part_range ( + id int not null, + remark varchar(8), + db_insert_time date +) PARTITION BY RANGE (db_insert_time) +( + partition p1 values less than('2021-04-01'), + partition p2 values less than('2021-07-01'), + partition p3 values less than('2021-10-01'), + partition p4 values less than(maxvalue) +); +``` + +***INTERVAL分区\*** + +- 分隔间隔可以为,日,周,月,年,最小分隔单位为日。 + +```sql +--SQL语句 +create table part_interval( + id serial, + remark varchar(16), + db_insert_time date +) partition by range(db_insert_time) interval('1 day') +( + partition p1 values less than('2020-12-08'), + partition p2 values less than('2020-12-09') + ); +``` + +***LIST分区\*** + +- 暂不支持default分区 如果插入的新数据不在所有分区取值范围内,SQL将会报错退出 +- 分区个数不能超过64个 +- 每个分区键键值不能超过1048575 + +```sql +--SQL语句 +CREATE TABLE part_list ( + id serial not null, + remark varchar(16), + province_name varchar(16) +) PARTITION BY LIST(province_name) +( + PARTITION p1 values('harbin','shenyang','changchun'), + PARTITION p2 values('beijing','shenzhen','tianjing') +); +``` + +***HASH分区\*** + +- 分区个数建议为power(2,N)个,分区个数在create table创建完毕之后,不再允许修改 +- 分区个数不能超过1048575 + +```sql +--SQL语句 +create table part_hash( + id int not null, + name varchar(16) +) partition by hash(id) +( + partition p1, + partition p2, + partition p3, + partition p4 +); +``` + + + +### 查询分区表分区信息 + +```sql +--SQL语句 +select rel_table.relname table_name, rel_partition.* + from (select relname, parentid from pg_partition where parttype = 'r') rel_table, + (select relname, + parentid, + relfilenode, + parttype, + partstrategy, + boundaries, + interval + from pg_partition + where parttype = 'p') rel_partition + where rel_table.parentid = rel_partition.parentid + and rel_table.relname in + ('part_range', 'part_list', 'part_hash', 'part_interval') + order by 1, 2; +``` + + + +### 重命名分区 + +***RANGE分区\*** + +```sql +--SQL语句 +alter table part_range rename partition p1 to p_1; +``` + +***INTERVAL分区\*** + +```sql +--SQL语句 +alter table part_interval rename partition p1 to p_1; +``` + +***LIST分区\*** + +```sql +--SQL语句 +alter table part_list rename partition p1 to p_1; +``` + +***HASH分区\*** + +```sql +--SQL语句 +alter table part_hash rename partition p1 to p_1; +``` + + + +### 添加分区 + +***RANGE分区\*** + +- 如果创建了maxvalue分区,需要先删除maxvalues的分区/或直接使用split命令增加分区 +- 支持一次增加多个分区 + +```sql +--SQL语句 +alter table part_range drop partition p4; +alter table part_range add partition p5 values less than('2022-02-01'),add partition p6 values less than('2022-02-02'); +``` + +***INTERVAL分区\*** + +- 无需手动添加分区,数据插入时自动增加分区 + +``` +--SQL语句 +insert into part_interval values(1,null,now()); +``` + +--插入数据前 + +![img](https://oss-emcsprod-public.modb.pro/image/editor/20220209-1519ffcf-3086-4a81-bd90-0eba6592138f.png) + +--插入数据后 + +![img](https://oss-emcsprod-public.modb.pro/image/editor/20220209-17160237-a0f1-4952-9072-2ec56ca3b6fd.png) + +***LIST分区\*** + +- 支持一次增加多个分区 + +```sql +--SQL语句 +alter table part_list add partition p3 values('taiwan'),add partition p4 values('aomen'),add partition p5 values('xiangguang'); +``` + +***HASH分区\*** + +- 暂不支持添加分区 + + + +### 删除分区 + +***RANGE分区\*** + +- 支持一次删除多个分区 + +```sql +--SQL语句 +alter table part_range drop partition p5,drop partition p6; +``` + +***INTERVAL分区\*** + +- 参考分区自动删除脚本 + +***LIST分区\*** + +- 支持一次删除多个分区 + +```sql +--SQL语句 +alter table part_list drop partition p3,drop partition p4,drop partition p5; +``` + +***HASH分区\*** + +- 暂不支持删除分区 + + + +### 切割分区 + +***RANGE分区\*** + +- split_point(分割点)需要位于前分区和分隔分区之间 ,且不能跨越多个分区 + +- 分区名不能重用 + +```sql +--SQL语句 +alter table part_range split partition p3 at ('2021-08-01') into (partition p_3,partition p_4); +``` + +***INTERVAL分区\*** + +- 同RANGE分区 + +***LIST分区\*** + +- 暂不支持切割分区 + +***HASH分区\*** + +- 暂不支持切割分区 + + + +### 清空分区 + +***RANGE分区\*** + +```sql +--SQL语句 +alter table part_range truncate partition p_1; +``` + +***INTERVAL分区\*** + +- 同RANGE分区 + +***LIST分区\*** + +- 暂不支持清空分区 + +***HASH分区\*** + +- 暂不支持清空 + + + +### 交换分区 + +**(普通表转换为表分区,分区表位普通表)** + +- 需要列结构一致,索引/约束一致 +- 普通表数据要在将替换的分区partition key范围之内 适用在变更时间较短,并且数据量较多的场合(考虑全局索引重建的因素) + +***RANGE分区\*** + +```sql +--创建普通表 +CREATE TABLE t_exchange_range ( + id int not null, + remark varchar(8), + db_insert_time date +); + +--普通表插入数据 +insert into t_exchange_range values(3,null,date '2021-08-01'),(3,null,date '2021-08-15'),(3,null,date '2021-09-01'),(3,null,date '2021-09-15'); + +--交换分区 +alter table part_range EXCHANGE partition (p_4) with table t_exchange_range; + +--查询分区 +select * from part_range partition(p_4); + +--输出结果 + +``` + +***INTERVAL分区\*** + +- 同RANGE分区 + +***LIST分区\*** + +- 暂不支持交换分区 + +***HASH分区\*** + +- 暂不支持交换分区 + + + +## 自动删除分区脚本: + +``` +名称类型描述 +oidoid行标识符(隐藏属性,必须明确选择)。 +relnamename分区表、分区、分区上toast表和分区索引的名称。 +parttype"char"对象类型: +- 'r': partitioned table +- 'p': table partition +- 'x': index partition +- 't': toast table +parentidoid当对象为分区表或分区时,此字段表示分区表在PG_CLASS中的OID。当对象为index partition时,此字段表示所属分区表索引(partitioned index)的OID。 +rangenuminteger保留字段。 +intervalnuminteger保留字段。 +partstrategy"char"分区表分区策略,现在仅支持: +- 'r': 范围分区。 +- 'v': 数值分区。 +- 'i': 间隔分区。 +- 'l':list分区。 +- 'h':hash分区。 +- 'n':无效分区。 +relfilenodeoidtable partition、index partition、分区上toast表的物理存储位置。 +reltablespaceoidtable partition、index partition、分区上toast表所属表空间的OID。 +relpagesdouble precision统计信息: table partition、index partition的数据页数量。 +reltuplesdouble precision统计信息: table partition、index partition的元组数。 +relallvisibleinteger统计信息: table partition、index partition的可见数据页数。 +reltoastrelidoidtable partition所对应toast表的OID。 +reltoastidxidoidtable partition所对应toast表的索引的OID。 +indextblidoidindex partition对应table partition的OID。 +indisusableBoolean分区索引是否可用。 +reldeltarelidoidDelta表的OID。 +reldeltaidxoidDelta表的索引表的OID。 +relcudescrelidoidCU描述表的OID。 +relcudescidxoidCU描述表的索引表的OID。 +relfrozenxidxid32冻结事务ID号。 +为保持前向兼容,保留此字段,新增relfrozenxid64用于记录此信息。 +intspnuminteger间隔分区所属表空间的个数。 +partkeyint2vector分区键的列号。 +intervaltablespaceoidvector间隔分区所属的表空间,间隔分区以round-robin方式落在这些表空间内。 +intervaltext[]间隔分区的间隔值。 +boundariestext[]范围分区和间隔分区的上边界。 +transittext[]间隔分区的跳转点。 +reloptionstext[]设置partition的存储属性,与pg_class.reloptions的形态一样,用"keyword=value"格式的字符串来表示 ,目前用于在线扩容的信息搜集。 +relfrozenxid64xid冻结事务ID号。 +``` diff --git "a/content/zh/post/enmo/MogDB-BRM\345\267\245\345\205\267\345\244\207\344\273\275\345\217\212\345\274\202\346\234\272\346\201\242\345\244\215\346\265\213\350\257\225.md" "b/content/zh/post/enmo/MogDB-BRM\345\267\245\345\205\267\345\244\207\344\273\275\345\217\212\345\274\202\346\234\272\346\201\242\345\244\215\346\265\213\350\257\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..6dc2f326ea66b05fc2d392fb51ed2c6a7b6793ea --- /dev/null +++ "b/content/zh/post/enmo/MogDB-BRM\345\267\245\345\205\267\345\244\207\344\273\275\345\217\212\345\274\202\346\234\272\346\201\242\345\244\215\346\265\213\350\257\225.md" @@ -0,0 +1,368 @@ ++++ + +title = "MogDB-BRM工具备份及异机恢复测试" + +date = "2022-05-18" + +tags = ["MogDB-BRM工具备份及异机恢复测试"] + +archives = "2022-05" + +author = "云和恩墨" + +summary = "MogDB-BRM工具备份及异机恢复测试" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# MogDB-BRM工具备份及异机恢复测试 + +本文出处:[https://www.modb.pro/db/336213](https://www.modb.pro/db/336213) + +### BRM工具简介 + +BRM备份恢复工具全称为:Backup and Recovery Manager,面向MogDB数据库实现备份和恢复运维管理工作。 + +BRM工具支持的备份方式:数据库全备,增量备份以及归档备份。 + +BRM工具支持的恢复方式:基于时间点恢复,以及事务ID恢复以及基于备份id恢复。 + +下面绍使用BRM工具进行全备和归档备份并在异机实现恢复数据库到指定时间点的例子。 + +### 解压BRM工具 + +登录数据库主机,解压安装介质并授权 + +``` +# cd /home/omm/software/brm +# unzip brm_0.0.12_linux_amd64.zip +# chown omm: -R /home/omm/software/brm +``` + +### 配置brm工具参数文件 + +/home/omm/下创建.brm目录并将配置文件复制到/home/omm/.brm目录下 + +``` +# su - omm +$ mkdir /home/omm/.brm +$ cp /home/omm/software/conf/brm.yaml /home/omm/.brm/ +``` + +修改BRM配置文件brm.yaml,指定备份文件存放路径和日志存放路径,存放路径需具有omm用户创建目录权限。 + +``` +# 没有启用 +backup_user: omm +# 备份文件存放目录 +backup_home: /home/omm/brm +# 日志目录 +log_file: /home/omm/log/brm/brm.log +# 日志级别 +log_level: DEBUG +# 没有启用 +lock_directory: /home/omm/log/run +# wal全局备份几次 default 1 +wal_retention_redundancy: 1 +# 是否允许主库进行备份. default:false +no_allow_primary_backup: false +network_limit_rate: 10m +# default on +enable_backup_wal_file_check: on +## 启动备份文件中心同步 +#enable_backup_push: on +## 当前Brm节点名称,当enable_backup_center_push为on时此项为必填 +#brm_node_name: 127.0.0.1:5434 +## brm 备份文件集中存储配置 +#backup_center: +# - host: 127.0.0.1 +# port: 44332 +# - host: 127.0.0.1 +# port: 44333 +``` + +### BRM工具验证 + +使用omm用户执行如下验证工具安装成功 + +``` +$ /home/omm/software/brm/brm version + +time="2022-02-18 11:21:36.291979" level=info msg="Using config file:/home/omm2/.brm/brm.yaml" + +Release version: 0.0.12 + +Git Commit hash: 8bcf8b7 + +Git Tag : v0.0.12 + +Build timestamp: 2021-11-15T05:00:20ZZ +``` + +### 数据库开启归档 + +使用omm用户登录主库确认归档是否开启 + +``` +postgres=# show archive_mode; archive_mode -------------- on (1 row) postgres=# show archive_dest; archive_dest -------------- /home/omm/arch (1 row) +``` + +如主库归档未开启,通过如下方式登录主库开启归档 + +``` +postgres=# show archive_mode; + archive_mode +-------------- + on +(1 row) + +postgres=# show archive_dest; + archive_dest +-------------- + /home/omm/arch +(1 row) +``` + +### BRM创建备份服务 + +使用omm用户创建备份服务 + +``` +$ ./brm add-server --instance=dn_6001 --pgdata=/home/omm/mogdata/db1 --retention-redundancy=3 --retention-window=3 --pgdatabase=postgres --pgport=26000 --archive-timeout 2min --archive-dir=/home/omm/arch\ +time="2022-02-18 13:44:42.180559" level=info msg="Using config file:/home/omm/.brm/brm.yaml" +time="2022-02-18 13:44:42.180836" level=info msg="add server begin" +time="2022-02-18 13:44:42.181034" level=info msg="the gs_probackup path /home/omm/mogdb/app/bin/gs_probackup" +time="2022-02-18 13:44:42.185983" level=info msg="the gs_probackup version 2.1.0" +time="2022-02-18 13:44:42.186054" level=info msg="the gs_ctl path /home/omm/mogdb/app/bin/gs_ctl" +time="2022-02-18 13:44:42.190809" level=info msg="gs_ctl version 9.2.4 " +time="2022-02-18 13:44:42.196368" level=info msg="Instance 'dn_6001' version 2.1.0" +time="2022-02-18 13:44:42.200305" level=info msg="Instance 'dn_6001' XLogSegSize 16777216" +time="2022-02-18 13:44:42.218946" level=info msg="Check params archive_dest" +time="2022-02-18 13:44:42.221207" level=info msg="add server end " +``` + +### 数据库全备 + +使用omm用户进行数据库全备操作 + +``` +$ ./brm add-server --instance=dn_6001 --pgdata=/home/omm/mogdata/db1 +``` + +### 数据库进行修改数据操作 + +全备后,登录数据库进行创建表和插入数据的操作 + +``` +$ gsql -p26000 -Uusername dbname +Password for user username: +gsql ((MogDB 2.1.0 build 56189e20) compiled at 2022-01-07 18:47:34 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +dbname=> create table test_P as select * from P; +INSERT 0 1056321 + +dbname=> \c - omm +Non-SSL connection (SSL connection is recommended when requiring high-security) +You are now connected to database "dbname" as user "omm". + +dbname=# select pg_switch_xlog(); + pg_switch_xlog +---------------- + 3/37807ED0 +(1 row) + +dbname=# \c - username +Password for user username: +Non-SSL connection (SSL connection is recommended when requiring high-security) +You are now connected to database "dbname" as user "username". + +dbname=> select count(*) from test_P; + count +--------- + 1056321 +(1 row) + +dbname=> \q + +``` + +### 数据库归档备份 + +使用omm用户进行备份数据库归档文件操作 + +``` +$ ./brm backup-wal --instance=dn_6001 --clear=on --delete-wal -j 4 +``` + +### 再次修改数据库数据 + +登录数据库多测进行数据修改操作 + +``` +$ gsql -p26000 -Uusername dbname +Password for user username: +gsql ((MogDB 2.1.0 build 56189e20) compiled at 2022-01-07 18:47:34 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. +...... + +dbname=> select count(*) from test_P; + count +--------- + 1056312 +(1 row) + +dbname=> select sysdate from dual; --有少数据 + sysdate +--------------------- + 2022-02-18 16:48:22 +(1 row) + +dbname=> delete from test_P; +DELETE 1056312 +dbname-> \q + +[omm@HDTYV-testdb-1T ~]$ gsql -p26000 -Uusername dbname +Password for user username: +gsql ((MogDB 2.1.0 build 56189e20) compiled at 2022-01-07 18:47:34 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +dbname=> select count(*) from test_P; + count +------- + 0 +(1 row) + +openGauss=# select pg_switch_xlog(); --没数据 + pg_switch_xlog +---------------- + 3/54605338 +(1 row) + +openGauss=# select sysdate; + sysdate +--------------------- + 2022-02-18 16:50:48 +(1 row) +..... + +dbname=> select count(*) from test_P; + count +--------- + 1056321 +(1 row) + +dbname=> select sysdate;--有数据 + sysdate +--------------------- + 2022-02-18 17:15:04 +(1 row) + +dbname=> + +openGauss=# select pg_switch_xlog();--有数据 + pg_switch_xlog +---------------- + 3/6A21D228 +(1 row) + +openGauss=# select sysdate; + sysdate +--------------------- + 2022-02-18 17:15:20 +(1 row) +``` + +### 数据库归档备份 + +使用omm用户再次进行备份数据库归档文件操作 + +``` +$ ./brm backup-wal --instance=dn_6001 --clear=on --delete-wal -j 4 +``` + +### 数据库恢复到指定时间点 + +将/home/omm/backup/brm文件夹拷贝到进行恢复数据库的主机 + +注:恢复数据库的主机要提前安装过mogdb数据库并配置好BRM工具;如果使用数据库归档备份文件恢复数据库,要求数据库安装用户和源库相同,且源库使用的端口号在目标库不能被占用。 + +恢复主机上BRM工具配置文件内容: + +``` +$ cat ~/.brm/brm.yaml +# 没有启用 +backup_user: omm +# 备份文件存放目录 +backup_home: /home/omm/backup/brm +# 日志目录 +log_file: /home/omm/backup/log/brm/brm.log +# 日志级别 +log_level: DEBUG +# 没有启用 +lock_directory: /home/omm/backup/lib/run +# wal全局备份几次 default 1 +wal_retention_redundancy: 1 +# 是否允许主库进行备份. default:false +no_allow_primary_backup: false +network_limit_rate: 10m +# default on +enable_backup_wal_file_check: on +## 启动备份文件中心同步 +#enable_backup_push: on +## 当前BRM节点名称,当enable_backup_center_push为on时此项为必填 +#brm_node_name: 127.0.0.1:5434 +## BRM 备份文件集中存储配置 +#backup_center: +# - host: 127.0.0.1 +# port: 44332 +# - host: 127.0.0.1 +# port: 44333 +``` + +将源库的BRM备份文件复制到目标服务器BRM工具的备份目录下 + +``` +$ pwd +/home/omm/backup +$ scp omm@:/home/omm/brm.tar ./ +brm.tar 100% 15GB 196.0MB/s 01:17 +$ tar -xf brm.tar +$ ll +total 15513360 +drwx------ 4 omm2 dbgrp 43 Feb 18 14:14 . +drwx------ 8 omm2 dbgrp 297 Feb 18 14:11 .. +drwx------ 4 omm2 dbgrp 32 Feb 18 14:04 brm +-rw------- 1 omm2 dbgrp 15885680640 Feb 18 14:12 brm.tar +drwx------ 3 omm2 dbgrp 17 Feb 18 11:21 log +$ rm brm.tar +``` + +基于时间点恢复数据库 + +``` +$ ./brm restore -i dn_6001 -D /home/omm/mogdata/db5 --recovery-target-time='2022-02-18 16:47:22' -j 4 +``` + +### 查看恢复后的数据库中的数据 + +恢复数据库操作完成后,检查数据库数据,确认与恢复时间点时的源库数据库数据一致。 + +``` +$ gsql -p26000 dbname -Uusername +Password for user username: +gsql ((MogDB 2.1.0 build 56189e20) compiled at 2022-01-07 18:47:34 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +dbname=> select count(*) from test_P; + count +--------- + 1056312 +(1 row) +``` diff --git "a/content/zh/post/enmo/MogDB2.1\346\225\260\346\215\256\345\272\223\346\226\260\347\211\271\346\200\247\344\271\213\357\274\232CREATE PACKAGE.md" "b/content/zh/post/enmo/MogDB2.1\346\225\260\346\215\256\345\272\223\346\226\260\347\211\271\346\200\247\344\271\213\357\274\232CREATE PACKAGE.md" new file mode 100644 index 0000000000000000000000000000000000000000..30c94e52bf0cea0e4575d2bed71fdddf17023696 --- /dev/null +++ "b/content/zh/post/enmo/MogDB2.1\346\225\260\346\215\256\345\272\223\346\226\260\347\211\271\346\200\247\344\271\213\357\274\232CREATE PACKAGE.md" @@ -0,0 +1,149 @@ ++++ + +title = "MogDB2.1数据库新特性之:CREATE PACKAGE" + +date = "2022-04-15" + +tags = ["MogDB2.1数据库新特性之:CREATE PACKAGE"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "MogDB2.1数据库新特性之:CREATE PACKAGE" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# MogDB2.1数据库新特性之:CREATE PACKAGE + +本文出处:https://www.modb.pro/db/390543 + +
+ +MogDB2.1版本开始支持创建PACKAGE。 + +### 使用PACKAGE的注意事项: + +- 由于升级的限制,即使在不同的Package间,也无法创建同名同参的存储过程。 +- package只支持集中式,无法在分布式中使用。 +- 在package specification中声明过的函数或者存储过程,必须在package body中找到定义。 +- 在实例化中,无法调用带有commit/rollback的存储过程。 +- 不能在Trigger中调用package函数。 +- 不能在外部SQL中直接使用package当中的变量。 +- 不允许在package外部调用package的私有变量和存储过程。 +- 不支持其它存储过程不支持的用法,例如,在function中不允许调用commit/rollback,则package的function中同样无法调用commit/rollback。 +- 不支持schema与package同名。 +- 只支持A风格的存储过程和函数定义。 +- 不支持package内有同名变量,包括包内同名参数。 +- package的全局变量为session级,不同session之间package的变量不共享。 +- package中调用自治事务的函数,不允许使用公有变量,以及递归的使用公有变量的函数。 +- package中不支持声明ref cursor类型。 + +### CREATE PACKAGE语法: + +**CREATE PACKAGE SPECIFICATION语法格式** + +```sql +CREATE [ OR REPLACE ] PACKAGE [ schema ] package_name + [ invoker_rights_clause ] { IS | AS } item_list_1 END package_name; + +``` + +invoker_rights_clause可以被声明为AUTHID DEFINER或者AUTHID INVOKER,分别为定义者权限和调用者权限。 +item_list_1可以为声明的变量或者存储过程以及函数。 +PACKAGE SPECIFICATION(包规格)声明了包内的公有变量、函数、异常等,可以被外部函数或者存储过程调用。在PACKAGE SPECIFICATION中只能声明存储过程,函数,不能定义存储过程或者函数。 + +**CREATE PACKAGE BODY语法格式。** + +```sql +CREATE [ OR REPLACE ] PACKAGE BODY [ schema ] package_name + { IS | AS } declare_section [ initialize_section ] END package_name; +``` + +PACKAGE BODY(包体内)定义了包的私有变量,函数等。如果变量或者函数没有在PACKAGE SPECIFICATION中声明过,那么这个变量或者函数则为私有变量或者函数。 +PACKAGE BODY也可以声明实例化部分,用来初始化package + +### 测试: + +#### 1.创建PACKAGE: + +```plsql +CREATE OR REPLACE PACKAGE package1 IS + O_SQLERROR varchar2(2000); + FUNCTION func1(num1 int,num2 int) RETURN int; + PROCEDURE proc1(i_num1 IN int,i_num2 IN int, O_RESULT OUT int, O_ERRCODE OUT varchar, O_ERROR OUT varchar) ; +end package1; +/ +CREATE OR REPLACE PACKAGE BODY package1 IS + FUNCTION func1(num1 int,num2 int) RETURN int iS + func_result int; + BEGIN + func_result:= num1+num2; + O_SQLERROR :='00000'; + RETURN func_result; + EXCEPTION + WHEN OTHERS THEN + RETURN NULL; + END ; + PROCEDURE proc1(i_num1 IN int,i_num2 IN int, O_RESULT OUT int, O_ERRCODE OUT varchar, O_ERROR OUT varchar) IS + BEGIN + o_result := i_num1 + i_num2; + O_ERRCODE := '0000'; + O_ERROR := 'successful completion'; + O_SQLERROR :='11111'; + EXCEPTION + WHEN OTHERS THEN + O_ERRCODE := SQLSTATE; + O_ERROR := SQLERRM; + END ; + end package1; +/ + +``` + +测试执行包中的存储过程和函数: + +```sql + 测试结果: +ora_proc=> call package1.proc1(1,2,a,b,c); + o_result | o_errcode | o_error +----------+-----------+----------------------- + 3 | 0000 | successful completion +(1 row) +ora_proc=> select package1.func1(1,2); + func1 +------- + 3 +(1 row) + +``` + +#### 2.已在PACKAGE中定义的存储过程/函数,不能和在其他PACKAGE中定义的存储过程/函数同名且同参数,也不能和不是在PACKAGE中定义的存储过程/函数同名且同参数。 + +```plsql +ora_proc=> create or replace FUNCTION func1(num1 int,num2 int) RETURN int package IS +ora_proc$> func_result int; +ora_proc$> BEGIN +ora_proc$> func_result:= num1+num2; +ora_proc$> RETURN func_result; +ora_proc$> EXCEPTION +ora_proc$> WHEN OTHERS THEN +ora_proc$> RETURN NULL; +ora_proc$> END ; +ora_proc$> / +ERROR: Due to upgrade mode,Do not allow different package have same function name with same parameter,please drop package by oid 17309 first +ora_proc=> create or replace FUNCTION func1(num1 int,num2 int,num3 int) RETURN int iS +ora_proc$> func_result int; +ora_proc$> BEGIN +ora_proc$> func_result:= num1+num2 +num3; +ora_proc$> RETURN func_result; +ora_proc$> EXCEPTION +ora_proc$> WHEN OTHERS THEN +ora_proc$> RETURN NULL; +ora_proc$> END ; +ora_proc$> / +CREATE FUNCTION +``` diff --git "a/content/zh/post/enmo/MogDBopenGauss\345\205\263\344\272\216PLSQL\345\214\277\345\220\215\345\235\227\350\260\203\347\224\250\346\265\213\350\257\225.md" "b/content/zh/post/enmo/MogDBopenGauss\345\205\263\344\272\216PLSQL\345\214\277\345\220\215\345\235\227\350\260\203\347\224\250\346\265\213\350\257\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..47c7765957fd188ef7346de18f0f88406c99c6cc --- /dev/null +++ "b/content/zh/post/enmo/MogDBopenGauss\345\205\263\344\272\216PLSQL\345\214\277\345\220\215\345\235\227\350\260\203\347\224\250\346\265\213\350\257\225.md" @@ -0,0 +1,209 @@ ++++ + +title = "MogDB/openGauss关于PL/SQL匿名块调用测试" + +date = "2022-04-11" + +tags = ["MogDB/openGauss关于PL/SQL匿名块调用测试"] + +archives = "2022-04" + +author = "恩墨交付团队" + +summary = "MogDB/openGauss关于PL/SQL匿名块调用测试" + +img = "/zh/post/enmo/title/img6.png" + +times = "10:20" + ++++ + +# MogDB/openGauss关于PL/SQL匿名块调用测试 + +## 一、原理介绍 + +PL/SQL(Procedure Language/Structure Query Language)是标准SQL语言添加了过程化功能的一门程序设计语言。 + +单一的SQL语句只能进行数据操作,没有流程控制,无法开发复杂的应用。PL/SQL语言是结合了结构化查询与数据库自身过程控制为一体的强大语言。 + +### 1.PL/SQL原理 + +PL/SQL是一种块结构的语言,它将一组语句放在一个块中,一次性发送给服务器。 + +PL/SQL引擎分析收到PL/SQL语句块中的内容,把其中的过程控制语句由PL/SQL引擎自身去执行,把PL/SQL块中的SQL语句交给服务器的SQL语句执行器执行。 + +PL/SQL块发送给服务器后,先被编译然后执行,对于有名称的PL/SQL块(如子程序)可以单独编译,永久的存储在数据库中,随时准备执行。 + +PL/SQL是一种块结构的语言,一个PL/SQL程序包含了一个或者多个逻辑块,逻辑块中可以声明变量,变量在使用之前必须先声明。 + +### 2.PL/SQL特点 + +–与SQL紧密结合 +–支持面向对象编程 +–更好的性能 +–可移植性 +–安全性 + +### 3.语法结构 + +除了正常的执行程序外,PL/SQL还提供了专门的异常处理部分进行异常处理 + +```plsql +[DECLARE + --declaration statements] ① +BEGIN + --executable statements ② +[EXCEPTION + --exception statements] ③ +END; + +``` + +语法解析 +①声明部分:声明部分包含了变量和常量的定义。在此声明PL/SQL用到的变量,类型及游标,以及局部的存储过程和函数, +这个部分由关键字DECLARE开始,如果不声明变量或者常量,可以省略这部分。 +②执行部分:执行部分是 PL/SQL块的指令部分,由关键字BEGIN开始,关键字END结尾。 +所有的可执行PL/SQL语句都放在这一部分,该部分执行命令并操作变量。其他的PL/SQL块可以作为子块嵌套在该部分。 +PL/SQL块的执行部分是必选的。注意END关键字后面用分号结尾。 +③异常处理部分:该部分是可选的,该部分用EXCEPTION关键字把可执行部分分成两个小部分,之前的程序是正常运行的程序, +一旦出现异常就跳转到异常部分执行。 + +### 4.PL/SQL语句块的类型 + +1、匿名块 +2、命名块 +–①procedure 存储过程 +–②function 函数 +–③package 包 +–④trigger 触发器 + +原本大家可能一提到PL/SQL就会想到ORACLE,ORACLE的PL/SQL很强大,它的匿名块调用以及有名块调用可以解决很多问题,在MOGDB/openGauss中,其实也有这样的功能,如下,是我针对MOGDB/openGauss匿名块的一些测试。 + +## 二、匿名块测试 + +### 1.普通匿名块调用 + +```plsql +openGauss=# create table t1(a int ,b text); +CREATE TABLE + +openGauss=# DECLARE +openGauss-# PRAGMA AUTONOMOUS_TRANSACTION; +openGauss-# BEGIN +openGauss$# raise notice 'Normal anonymous block printing.'; +openGauss$# insert into t1 values(1,'I am lmj!'); +openGauss$# END; +openGauss$# / +NOTICE: Normal anonymous block printing. + +ANONYMOUS BLOCK EXECUTE +openGauss=# select * from t1; + a | b +---+----------- + 1 | I am lmj! +(1 row) + +``` + +### 2.匿名块和事务影响 + +启动一个事务后,执行一个自治事务匿名块,如果事务回滚,则匿名块不回滚。 + +```plsql +openGauss=# truncate table t1; +TRUNCATE TABLE + +openGauss=# START TRANSACTION; +START TRANSACTION +openGauss=# DECLARE +openGauss-# PRAGMA AUTONOMOUS_TRANSACTION; +openGauss-# BEGIN +openGauss$# raise notice 'an autonomous transaction anonymous block.'; +openGauss$# insert into t1 values(1,'it will commit!'); +openGauss$# END; +openGauss$# / +NOTICE: an autonomous transaction anonymous block. + +ANONYMOUS BLOCK EXECUTE +openGauss=# insert into t1 values(1,'you will rollback!'); +INSERT 0 1 +openGauss=# rollback; +ROLLBACK +openGauss=# select * from t1; + a | b +---+----------------- + 1 | it will commit! +(1 row) +``` + +### 3.外部匿名块和内部匿名块 + +其中外部匿名块是一个公共匿名块,而内部匿名块是一个自治事务匿名块,可以根据如下例子和第二个例子对比事务回滚和匿名块回滚 + +```plsql +openGauss=# truncate table t1; +TRUNCATE TABLE + +openGauss=# DECLARE +openGauss-# BEGIN +openGauss$# DECLARE +openGauss$# PRAGMA AUTONOMOUS_TRANSACTION; +openGauss$# BEGIN +openGauss$# raise notice 'just use call.'; +openGauss$# insert into t1 values(1,'can you rollback!'); +openGauss$# END; +openGauss$# insert into t1 values(2,'I will rollback!'); +openGauss$# rollback; +openGauss$# END; +openGauss$# / +NOTICE: just use call. +ANONYMOUS BLOCK EXECUTE +openGauss=# select * from t1; + a | b +---+--- +(0 rows) +``` + +### 4.匿名块直接执行自治事务匿名块并引发异常 + +```plsql +openGauss=# DECLARE +openGauss-# PRAGMA AUTONOMOUS_TRANSACTION; +openGauss-# res int := 0; +openGauss-# res2 int := 1; +openGauss-# BEGIN +openGauss$# raise notice 'just use call.'; +openGauss$# res2 = res2/res; +openGauss$# END; +openGauss$# / +NOTICE: just use call. + +ERROR: ERROR: division by zero +CONTEXT: PL/pgSQL function inline_code_block line 7 at assignment +``` + +匿名块执行错误,会报出异常 + +### 5.异常捕获 + +在执行期间引发异常后,将捕获匿名块,如下所示,在执行错误后,抛出autonomous throw exception提示 + +```plsql +openGauss=# DECLARE +openGauss-# PRAGMA AUTONOMOUS_TRANSACTION; +openGauss-# res int := 0; +openGauss-# res2 int := 1; +openGauss-# BEGIN +openGauss$# raise notice 'error catch.'; +openGauss$# res2 = res2/res; +openGauss$# EXCEPTION +openGauss$# WHEN division_by_zero THEN +openGauss$# raise notice 'autonomous throw exception.'; +openGauss$# END; +openGauss$# / +NOTICE: error catch. + +NOTICE: autonomous throw exception. + +ANONYMOUS BLOCK EXECUTE +``` diff --git "a/content/zh/post/enmo/MogDB\345\244\207\346\234\272\345\244\204\344\272\216standby need-repair(WAL)\347\212\266\346\200\201.md" "b/content/zh/post/enmo/MogDB\345\244\207\346\234\272\345\244\204\344\272\216standby need-repair(WAL)\347\212\266\346\200\201.md" new file mode 100644 index 0000000000000000000000000000000000000000..f93494f73530fc23212aa36a91a541e04e3e52be --- /dev/null +++ "b/content/zh/post/enmo/MogDB\345\244\207\346\234\272\345\244\204\344\272\216standby need-repair(WAL)\347\212\266\346\200\201.md" @@ -0,0 +1,42 @@ ++++ + +title = "MogDB备机处于standby need-repair(WAL)状态" + +date = "2022-05-18" + +tags = ["MogDB备机处于standby need-repair(WAL)状态"] + +archives = "2022-05" + +author = "云和恩墨" + +summary = "MogDB备机处于standby need-repair(WAL)状态" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# MogDB备机处于standby need-repair(WAL)状态 + +本文出处:[https://www.modb.pro/db/402820](https://www.modb.pro/db/402820) + +## 问题现象 + +Mogdb主备环境,备机检查发现Standby Need repair(WAL)故障。 + +## 原因分析 + +因网络故障、磁盘满等原因造成主备实例连接断开,主备日志不同步,导致数据库集群在启动时异常。 + +## 处理分析 + +通过gs_ctl build -D 命令对故障节点进行重建,具体的操作方法请参见Mogdb工具参考中的build参数。 + +## 主备状态检查 + +![img](../images/20220512-6b05767d-e69a-450b-89e2-ef3a98cc24dd.png) + +## 修复后结果展示 + +![img](../images/20220512-e61af498-f6ce-4dd2-a96b-3b57e1500d96.png) diff --git "a/content/zh/post/enmo/MogDB\345\256\236\344\276\213\347\232\204\345\210\240\351\231\244.md" "b/content/zh/post/enmo/MogDB\345\256\236\344\276\213\347\232\204\345\210\240\351\231\244.md" new file mode 100644 index 0000000000000000000000000000000000000000..2713d436e679e1e80270337ed055a0b175aff0ad --- /dev/null +++ "b/content/zh/post/enmo/MogDB\345\256\236\344\276\213\347\232\204\345\210\240\351\231\244.md" @@ -0,0 +1,130 @@ ++++ + +title = "MogDB实例的删除" + +date = "2022-04-25" + +tags = ["MogDB实例的删除"] + +archives = "2022-04" + +author = "云和恩墨" + +summary ="MogDB实例的删除" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# MogDB实例的删除 + +本文出处:[https://www.modb.pro/db/245460](https://www.modb.pro/db/245460) + +1 查询mogdb的实例进程 +![1.png](../images/20220206-c5a88d2a-a455-4473-ab40-4306878f1dd9.png) + +2 根据clusterconfig.xml文件可以看出gaussdbAppPath,gaussdbLogPath,gaussdbToolPath,corePath,clusterType以及dataNode1的位置。 + +- gaussdbAppPath:app的的安装路径 +- gaussdbLogPath:mogdb的数据库的日志目录 +- gaussdbToolPath:mogdb的tool的所在目录 +- corePath:coredump的目录 +- clusterType:集群类型,但实例和HA +- dataNode1:数据目录的路径 +- clusterName:集群的名称 +- nodeNames:节点的名称,其实就是主机名称 +- backIp1s:后端主机的ip地址 +- sn:device的设备号 +- dataPortBase:mogdb的对外提供服务的端口 + +3 卸载mogdb + +- 现正常关闭mogdb数据库 + + ``` + [omm@mogdb-001 mogdb]$ ps -ef |grep mogdb + omm 833 25700 0 17:45 pts/1 00:00:00 grep --color=auto mogdb + omm 28491 1 2 16:18 pts/1 00:01:46 /mogdb/mogdb/app/bin/mogdb -D /mogdb/data/db1 + + [omm@mogdb-001 mogdb]$ gs_om -t stop + + Stopping cluster. + ========================================= + + Successfully stopped cluster. + ========================================= + + End stop cluster. + [omm@mogdb-001 mogdb]$ ps -ef |grep mogdb + omm 1408 25700 0 17:45 pts/1 00:00:00 grep --color=auto mogdb + ``` + +- 先删除gaussdbAppPath,gaussdbLogPath,gaussdbToolPath,corePath路径里面的文件 + + ``` + [omm@mogdb-001 mogdb]$ cd /mogdb/ + [omm@mogdb-001 mogdb]$ ls + data mogdb + [omm@mogdb-001 mogdb]$ cd mogdb/ + [omm@mogdb-001 mogdb]$ ls + app app_01071903 corefile tools + [omm@mogdb-001 mogdb]$ rm -rf tools app_01071903 corefile app + [omm@mogdb-001 mogdb]$ ls + ``` + + 删除gaussdbLogPath的日志文件 + + ``` + [omm@mogdb-001 mogdb]$ ls -ld /var/log/mogdb/omm/* + drwx------ 3 omm dbgrp 4096 Feb 6 16:18 /var/log/mogdb/omm/asp_data + drwx------ 7 omm dbgrp 4096 Feb 6 16:18 /var/log/mogdb/omm/bin + drwx------ 3 omm dbgrp 4096 Feb 6 16:18 /var/log/mogdb/omm/gs_profile + drwx------ 2 omm dbgrp 4096 Feb 6 17:45 /var/log/mogdb/omm/om + drwx------ 3 omm dbgrp 4096 Feb 6 16:18 /var/log/mogdb/omm/pg_audit + drwx------ 3 omm dbgrp 4096 Feb 6 16:18 /var/log/mogdb/omm/pg_log + drwx------ 3 omm dbgrp 4096 Feb 6 16:18 /var/log/mogdb/omm/pg_perf + drwx------ 3 omm dbgrp 4096 Feb 6 16:18 /var/log/mogdb/omm/sql_monitor + [omm@mogdb-001 mogdb]$ rm -rf /var/log/mogdb/omm/* + [omm@mogdb-001 mogdb]$ ls -ld /var/log/mogdb/omm/* + ls: cannot access /var/log/mogdb/omm/*: No such file or directory + [omm@mogdb-001 mogdb]$ ls -ld /var/log/mogdb/omm/ + drwx------ 2 omm dbgrp 4096 Feb 6 17:49 /var/log/mogdb/omm/ + ``` + +> 注意:如果此时觉得卸载成功,那就高兴的太早了。因为在此安装的时候有一个步骤(set_finish_flag)会去检查环境变量,如果存在以前的会停止预安装,这样就导致安装失败,所以此时还需要清理一个文件,叫omm用户家目录下面的.bashrc文件,正常的文件为 + +``` +[omm@mogdb-001 ~]$ cat .bashrc + +# .bashrc + +# Source global definitions + +if [ -f /etc/bashrc ]; then +. /etc/bashrc +fi +``` + +``` +# Uncomment the following line if you don't like systemctl's auto-paging feature: +# export SYSTEMD_PAGER= + +# User specific aliases and functions +export GPHOME=/mogdb/mogdb/tools +export PATH=$ GPHOME/script/gspylib/pssh/bin:$ GPHOME/script:$ PATH +export LD_LIBRARY_PATH=$ GPHOME/lib:$ LD_LIBRARY_PATH +export PYTHONPATH=$ GPHOME/lib +export GAUSSHOME=/mogdb/mogdb/app +export PATH=$ GAUSSHOME/bin:$ PATH +export LD_LIBRARY_PATH=$ GAUSSHOME/lib:$ LD_LIBRARY_PATH +export S3_CLIENT_CRT_FILE=$GAUSSHOME/lib/client.crt +export GAUSS_VERSION=2.1.0 +export PGHOST=/mogdb/mogdb/tools/omm_mppdb +export GAUSSLOG=/var/log/mogdb/omm +umask 077 +export GAUSS_ENV=2 +export GS_CLUSTER_NAME=dbCluster +``` + +所以在下载最后异步需要去检查这个文件,记得去讲这些export开头的环境变量清除了,这样在重新安装就没有任何的问题了。 diff --git "a/content/zh/post/enmo/MogDB\346\212\245\351\224\231 - ERROR role cannot be dropped because some objects depend on it DETAIL 1 object in database\345\244\204\347\220\206.md" "b/content/zh/post/enmo/MogDB\346\212\245\351\224\231 - ERROR role cannot be dropped because some objects depend on it DETAIL 1 object in database\345\244\204\347\220\206.md" new file mode 100644 index 0000000000000000000000000000000000000000..b91d9de82fc1cefc77dd09bc9f09591fd4ac6551 --- /dev/null +++ "b/content/zh/post/enmo/MogDB\346\212\245\351\224\231 - ERROR role cannot be dropped because some objects depend on it DETAIL 1 object in database\345\244\204\347\220\206.md" @@ -0,0 +1,100 @@ ++++ + +title = "MogDB报错 - ERROR: role cannot be dropped because some objects depend on it DETAIL: 1 object in database处理" + +date = "2022-04-13" + +tags = ["MogDB报错 - ERROR: role cannot be dropped because some objects depend on it DETAIL: 1 object in database处理"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "MogDB报错 - ERROR: role cannot be dropped because some objects depend on it DETAIL: 1 object in database处理" + +img = "/zh/post/enmo/title/img6.png" + +times = "10:20" ++++ + +# MogDB报错 - ERROR: role cannot be dropped because some objects depend on it DETAIL: 1 object in database处理 + +本文出处:https://www.modb.pro/db/336198 + +版本:MogDB V2.0.1 + +**删除用户时,报错:** + +```sql +postgres=# \dg + List of roles + Role name | Attributes | Member of +-----------+------------------------------------------------------------------------------------------------------------------+----------- + itsm | Create DB, Cannot login | {} + omm | Sysadmin, Create role, Create DB, Replication, Administer audit, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} + +postgres=# drop role itsm; +ERROR: role "itsm" cannot be dropped because some objects depend on it +DETAIL: 1 object in database itsm +``` + +**检查业务进程,是否存在用户进程,如果有的话,断连进程** + +```sql +postgres=# select datname,usename,state,count(*) + from pg_stat_activity + group by datname,usename,state order by 4 desc; + + datname | usename | state | count +----------+---------+--------+------- + postgres | omm | active | 3 + postgres | omm | idle | 2 + itsm | omm | active | 1 +(3 rows) +``` + +**检查itsm角色的对象** + +```sql +--检查表属主 +select relname,relnamespace,relkind from pg_class + where relowner=(select oid from pg_roles where rolname='itsm') + order by 3 desc; + +--检查用户的系统权限 +SELECT * FROM pg_roles WHERE rolname='itsm'; + +--检查用户的表权限 +select * from information_schema.table_privileges + where grantee='itsm'; + +--检查用户的usage权限 +select * from information_schema.usage_privileges + where grantee='itsm'; + +--检查用户在存储过程函数的执行权限 +select * from information_schema.routine_privileges + where grantee='itsm'; + +--检查用户在表的列上的权限 +select * from information_schema.column_privileges + where grantee='itsm'; + +--检查用户自定义类型上授予的USAGE权限 +select * from information_schema.udt_privileges + where grantee='itsm'; +``` + +**针对删除用户权限后,有时需要重连数据库生效:** + +```sql +postgres=# revoke all on database itsm from itsm; +REVOKE +postgres=# \c itsm +Non-SSL connection (SSL connection is recommended when requiring high-security) +You are now connected to database "itsm" as user "omm". +itsm=# drop user itsm; +DROP ROLE +``` + +总结:删除用户首先需要断连所有业务连接如果不清楚对象具体的权限,可以使用“revoke all on [schema]/[database] from rolename;”取消对象上所有权限的赋权 diff --git "a/content/zh/post/enmo/ODBC\351\251\261\345\212\250\350\277\236\346\216\245MogDB openGauss.md" "b/content/zh/post/enmo/ODBC\351\251\261\345\212\250\350\277\236\346\216\245MogDB openGauss.md" new file mode 100644 index 0000000000000000000000000000000000000000..68178f22d5e5813faf699c15a96d198b7e051c9d --- /dev/null +++ "b/content/zh/post/enmo/ODBC\351\251\261\345\212\250\350\277\236\346\216\245MogDB openGauss.md" @@ -0,0 +1,130 @@ ++++ + +title = "ODBC驱动连接MogDB/openGauss" + +date = "2022-04-07" + +tags = ["ODBC驱动连接MogDB/openGauss"] + +archives = "2022-04" + +author = "云和恩墨交付战队" + +summary = "ODBC驱动连接MogDB/openGauss" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" + ++++ + +# ODBC驱动连接MogDB/openGauss + +## 一、环境说明 + +```sql +[root@node1 ~]# cat /etc/redhat-release +CentOS Linux release 7.6.1810 (Core) +``` + +## 二、unixODBC安装 + +### 有网络安装(可直接跳至三) + +```sql +yum install -y unixODBC.x86_64 +``` + +### 无网络安装 + +#### 1.下载软件包并解压 + +```sql +wget https://sourceforge.net/projects/unixodbc/files/unixODBC/2.3.7/unixODBC-2.3.7pre.tar.gz/download --no-check-certificate +tar -zxvf unixODBC-2.3.7pre.tar.gz +``` + +#### 2.编译odbc + +```sql +修改configure文件,找到LIB_VERSION,将它的值修改为"1:0:0",这样将编译出*.so.1的动态库,与psqlodbcw.so的依赖关系相同 +cd unixODBC-2.3.7pre/ +./configure --enable-gui=no +make +make install +``` + +## 三、替换客户端MogDB程序 + +```sql +wget https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.1/x86/openGauss-2.0.0-ODBC.tar.gz +tar -zxvf openGauss-2.0.0-ODBC.tar.gz +将解压得到的lib包下的文件和odbc文件夹下的lib拷贝到/usr/local/lib/ +``` + +## 四、配置数据源 + +```sql +[root@node1 ~]# cat /usr/local/etc/odbc.ini +[MGODBC] +Driver=TEST +Servername=8.131.53.xxx (数据库IP) +Database=test_db (数据库名) +Username=test_usr (数据库用户) +Password=test@123 (数据库密码) +Port=26000 (数据端口) +Sslmode=allow + +[root@node1 ~]# tail -3 /usr/local/etc/odbcinst.ini +[TEST] +Driver64=/usr/local/lib/psqlodbcw.so +setup=/usr/local/lib/psqlodbcw.so +``` + +## 五、数据库配置说明 + +```sql +这里使用简单的方式配置(也可采用guc参数进行设置) +[omm@node1 data]$ tail -5 postgresql.conf +port=26000 +listen_addresses = '0.0.0.0' +password_encryption_type = 0 +log_directory = 'pg_log' +remote_read_mode=non_authentication +[omm@node1 data]$ tail -1 pg_hba.conf +host all all 0.0.0.0/0 md5 +重启数据库 +gs_om -t stop +gs_om -t start +``` + +## 六、客户端配置环境变量 + +```sql +[root@node1 ~]# tail -3 .bashrc +export LD_LIBRARY_PATH=/usr/local/lib/:$LD_LIBRARY_PATH +export ODBCSYSINI=/usr/local/etc +export ODBCINI=/usr/local/etc/odbc.ini +``` + +## 七、测试数据源 + +```sql +[root@node1 ~]# isql -v MGODBC ++---------------------------------------+ +| Connected! | +| | +| sql-statement | +| help [tablename] | +| quit | +| | ++---------------------------------------+ +SQL> +即连接成功 +``` + +## 八、总结 + +```sql +上述文档描述的是如何通过ODBC连接MogDB,更多细节可以参考官网ODBC数据源配置https://docs.mogdb.io/ +``` diff --git "a/content/zh/post/enmo/Psycopg\350\277\236\346\216\245Mogdb openGauss.md" "b/content/zh/post/enmo/Psycopg\350\277\236\346\216\245Mogdb openGauss.md" new file mode 100644 index 0000000000000000000000000000000000000000..11c430fed60c86f92764bea3e47f7512bcf3c2c2 --- /dev/null +++ "b/content/zh/post/enmo/Psycopg\350\277\236\346\216\245Mogdb openGauss.md" @@ -0,0 +1,164 @@ ++++ + +title = "Psycopg连接Mogdb/opengauss" + +date = "2022-04-08" + +tags = ["Psycopg连接Mogdb/opengauss"] + +archives = "2022-04" + +author = "云和恩墨交付团队" + +summary = "Psycopg连接Mogdb/opengauss" + +img = "/zh/post/enmo/title/img6.png" + +times = "10:20" + ++++ + +# Psycopg连接Mogdb/opengauss + +## 1.简介 + +Psycopg是一种用于执行SQL语句的PythonAPI,可以为PostgreSQL、GaussDB数据库提供统一访问接口,应用程序可基于它进行数据操作。Psycopg2是对libpq的封装,主要使用C语言实现,既高效又安全。它具有客户端游标和服务器端游标、异步通信和通知、支持“COPY TO/COPY FROM”功能。支持多种类型Python开箱即用,适配PostgreSQL数据类型;通过灵活的对象适配系统,可以扩展和定制适配。Psycopg2兼容Unicode和Python 3。MogDB数据库提供了对Psycopg2特性的支持,并且支持psycopg2通过SSL模式链接。 + +## 2.环境介绍 + +``` +[root@mogdb-kernel-0004 Psycopg]# python3 Python 3.6.8 (default, Nov 16 2020, 16:55:22) [root@mogdb-kernel-0004 Psycopg]# cat /etc/os-release [root@mogdb-kernel-0004 Psycopg]# cat /etc/redhat-release CentOS Linux release 7.6.1810 (Core) [root@mogdb-kernel-0004 Psycopg]# lscpu Architecture: x86_64 CPU op-mode(s): 32-bit, 64-bit +``` + +## 3.下载python驱动 + +``` +[root@mogdb-kernel-0004 Psycopg]# wget https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.1.0/x86/openGauss-2.1.0-CentOS-x86_64-Python.tar.gz [root@mogdb-kernel-0004 Psycopg]# ls openGauss-2.1.0-CentOS-x86_64-Python.tar.gz [root@mogdb-kernel-0004 Psycopg]# tar -xf openGauss-2.1.0-CentOS-x86_64-Python.tar.gz [root@mogdb-kernel-0004 Psycopg]# ls lib openGauss-2.1.0-CentOS-x86_64-Python.tar.gz psycopg2 [root@mogdb-kernel-0004 Psycopg]# +``` + +注:这里驱动在https://opengauss.org/zh/download.html这里下载,可以根据操作系统版本下载对应的驱动 + +![image20220330140613272.png](../images/20220330-26a4d82f-650b-49dc-a859-630df55c0aa2.png) + +## 4.安装驱动 + +### (1)找到Python的安装位置 + +``` +[root@mogdb-kernel-0004 Psycopg]# whereis python python: /usr/bin/python3.6 /usr/bin/python2.7 /usr/bin/python3.6m-config /usr/bin/python /usr/bin/python3.6m-x86_64-config /usr/bin/python2.7-config /usr/bin/python3.6-config /usr/bin/python3.6m /usr/lib/python3.6 /usr/lib/python2.7 /usr/lib64/python3.6 /usr/lib64/python2.7 /etc/python /usr/local/lib/python3.6 /usr/include/python2.7 /usr/include/python3.6m /usr/share/man/man1/python.1.gz +``` + +### (2)将驱动拷贝到python下的site-packages目录 + +``` +[root@mogdb-kernel-0004 Psycopg]# ls lib openGauss-2.1.0-CentOS-x86_64-Python.tar.gz psycopg2 [root@mogdb-kernel-0004 Psycopg]# cp -r psycopg2/ /usr/lib/python3.6/site-packages/ +``` + +### (3)数据库创建连接用户 + +```sql +openGauss=# create database test_db; +CREATE DATABASE +openGauss=# create user test_usr password 'test@123'; +NOTICE: The encrypted password contains MD5 ciphertext, which is not secure. +CREATE ROLE +openGauss=# alter user test_usr sysadmin; +ALTER ROLE +``` + +### (4)编写python文件 + +```sql +import psycopg2 +conn=psycopg2.connect(database="test_db",user="test_usr",password="test@123",host="本机ip",port=26000) +print("Conn database successfully") +cur=conn.cursor() +cur.execute("CREATE TABLE student(id integer,name varchar,sex varchar);") +cur.execute("INSERT INTO student(id,name,sex) VALUES(%s,%s,%s)",(1,'Aspirin','M')) +cur.execute("INSERT INTO student(id,name,sex) VALUES(%s,%s,%s)",(2,'Taxol','F')) +cur.execute('SELECT id,name,sex FROM student') +results=cur.fetchall() +print (results) +conn.commit() +cur.close() +conn.close() +``` + +(5)连接测试 + +```sql +[root@mogdb-kernel-0004 Psycopg]# python3 conn.py +Conn database successfully +[(1, 'Aspirin', 'M'), (2, 'Taxol', 'F')] +[root@mogdb-kernel-0004 Psycopg]# +``` + +## 4.常见报错 + +### (1)缺少依赖包 + +```sql +[root@node1 ~]# python3 conn.py +Traceback (most recent call last): + File "conn.py", line 1, in + import psycopg2 + File "/root/psycopg2/__init__.py", line 51, in + from psycopg2._psycopg import ( # noqa +ImportError: libpq.so.5: cannot open shared object file: No such file or directory +``` + +解决办法: + +``` +[root@mogdb-kernel-0004 ~]# yum install -y libpq.so.5* +``` + +### (2)libpg的版本过低 + +```sql +[root@mogdb-kernel-0004 Psycopg]# python3 conn.py +Traceback (most recent call last): + File "conn.py", line 2, in + conn=psycopg2.connect(database="test_db",user="test_usr",password="test@123",host="localhost",port=26000) + File "/root/Psycopg/psycopg2/__init__.py", line 122, in connect + conn = _connect(dsn, connection_factory=connection_factory, **kwasync) +psycopg2.OperationalError: SCRAM authentication requires libpq version 10 or above +``` + +解决办法: + +```sql +大概意思是libpg的版本低了,但使用 yum install postgresql-devel 只能更新到 9.2.24版本, +1. 添加源 +rpm -Uvh https://download.postgresql.org/pub/repos/yum/reporpms/EL-7-x86_64/pgdg-redhat-repo-latest.noarch.rpm +[root@mogdb-kernel-0004 Psycopg]# rpm -Uvh https://download.postgresql.org/pub/repos/yum/reporpms/EL-7-x86_64/pgdg-redhat-repo-latest.noarch.rpm +Retrieving https://download.postgresql.org/pub/repos/yum/reporpms/EL-7-x86_64/pgdg-redhat-repo-latest.noarch.rpm +warning: /var/tmp/rpm-tmp.K5x7Bw: Header V4 DSA/SHA1 Signature, key ID 442df0f8: NOKEY +Preparing... ################################# [100%] +Updating / installing... + 1:pgdg-redhat-repo-42.0-24 ################################# [100%] +2. 安装新版本 +yum install postgresql10-devel + +``` + +### (3)身份验证失败 + +```sql +[root@mogdb-kernel-0004 Psycopg]# python3 conn.py +Traceback (most recent call last): + File "conn.py", line 2, in + conn=psycopg2.connect(database="test_db",user="test_usr",password="test@123",host="localhost",port=26000) + File "/root/Psycopg/psycopg2/__init__.py", line 122, in connect + conn = _connect(dsn, connection_factory=connection_factory, **kwasync) +psycopg2.OperationalError: none of the server's SASL authentication mechanisms are supported +``` + +解决方法: + +``` +这里是host无法识别localhost,将其改为本机ip即可 +conn=psycopg2.connect(database="test_db",user="test_usr",password="test@123",host="localhost",port=26000) +改正过后 +conn=psycopg2.connect(database="test_db",user="test_usr",password="test@123",host="172.16.0.xxx",port=26000) +``` diff --git "a/content/zh/post/enmo/go\350\257\255\350\250\200\350\277\236\346\216\245Mogdb.md" "b/content/zh/post/enmo/go\350\257\255\350\250\200\350\277\236\346\216\245Mogdb.md" new file mode 100644 index 0000000000000000000000000000000000000000..9c8ccda58bf0d16c77576e9c39fa7a52dce0c905 --- /dev/null +++ "b/content/zh/post/enmo/go\350\257\255\350\250\200\350\277\236\346\216\245Mogdb.md" @@ -0,0 +1,119 @@ ++++ + +title = "go语言连接Mogdb" + +date = "2022-05-05" + +tags = ["go语言连接Mogdb"] + +archives = "2022-05" + +author = "云和恩墨" + +summary = "go语言连接Mogdb" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# go语言连接Mogdb + +本文出处:[https://www.modb.pro/db/388092](https://www.modb.pro/db/388092) + +## 1.环境介绍 + +``` +[root@mogdb-kernel-0004 src]# go version //yum安装的go go version go1.16.13 linux/amd64 [root@mogdb-kernel-0004 src]# cat /etc/redhat-release CentOS Linux release 7.6.1810 (Core) +``` + +## 2.获取pg驱动 + +``` +https://github.com/bmizerany/pq 支持database/sql驱动,纯Go写的 https://github.com/jbarham/gopgsqldriver 支持database/sql驱动,纯Go写的 https://github.com/lxn/go-pgsql 支持database/sql驱动,纯Go写的 +``` + +### (1)自动下载驱动包 + +``` +go get github.com/bmizerany/pq go get github.com/jbarham/gopgsqldrive go get github.com/lxn/go-pgsql 注:3选1即可,但因为网络原因,大概率不能自动下载下来,所以我也会介绍手动安装驱动包 +``` + +### (2)手动安装驱动包 + +github上手动下载驱动包,将其上golang安装目录的src文件夹下解压, + +``` +[root@mogdb-kernel-0004 src]# ll pq-master.zip +-rw-r--r-- 1 root root 17412 Mar 31 09:45 pq-master.zip +[root@mogdb-kernel-0004 src]# pwd +/usr/lib/golang/src +[root@mogdb-kernel-0004 src]# tree pq-master +pq-master +├── buf.go +├── conn.go +├── conn_test.go +├── encode.go +├── encode_test.go +├── error.go +├── LICENSE.md +├── README.md +├── types.go +├── url.go +└── url_test.go +0 directories, 11 files + +复制 +``` + +![image20220331094632736.png](../images/20220331-af3ac54a-3daf-41db-b682-f65ac99c4596.png) + +我选择的是github.com/bmizerany/pq这个驱动,所以需要在src下建立github.com/bmizerany/pq文件夹 + +``` +[root@mogdb-kernel-0004 src]# mkdir -p github.com/bmizerany/pq +[root@mogdb-kernel-0004 src]# cd github.com/bmizerany/pq/ +[root@mogdb-kernel-0004 pq]# mv /usr/lib/golang/src/pq-master/* ./ //将pq-master启动包移动到github.com/bmizerany/pq下 +[root@mogdb-kernel-0004 pq]# pwd +/usr/lib/golang/src/github.com/bmizerany/pq +[root@mogdb-kernel-0004 pq]# ll +total 72 +-rw-r--r-- 1 root root 1295 Nov 29 2013 buf.go +-rw-r--r-- 1 root root 12225 Nov 29 2013 conn.go +-rw-r--r-- 1 root root 7939 Nov 29 2013 conn_test.go +-rw-r--r-- 1 root root 2428 Nov 29 2013 encode.go +-rw-r--r-- 1 root root 3417 Nov 29 2013 encode_test.go +-rw-r--r-- 1 root root 1757 Nov 29 2013 error.go +-rw-r--r-- 1 root root 1058 Nov 29 2013 LICENSE.md +-rw-r--r-- 1 root root 2844 Nov 29 2013 README.md +-rw-r--r-- 1 root root 16426 Nov 29 2013 types.go +-rw-r--r-- 1 root root 1242 Nov 29 2013 url.go +-rw-r--r-- 1 root root 1181 Nov 29 2013 url_test.go +[root@mogdb-kernel-0004 pq]# go install github.com/bmizerany/pq //手动安装,没有任何信息报出,及说明安装完成 +[root@mogdb-kernel-0004 pq]# + +复制 +``` + +## 3.创建简单测试表 + +``` +//创建用户 openGauss=# create database test_db; CREATE DATABASE openGauss=# create user test_usr password 'test@123'; NOTICE: The encrypted password contains MD5 ciphertext, which is not secure. CREATE ROLE openGauss=# alter user test_usr sysadmin; ALTER ROLE //连接用户创建表 [omm@mogdb-kernel-0004 ~]$ gsql -d test_db -p 26000 -r -U test_usr -W test@123 gsql ((MogDB 2.1.0 build 56189e20) compiled at 2022-01-07 18:47:53 commit 0 last mr ) Non-SSL connection (SSL connection is recommended when requiring high-security) Type "help" for help. test_db=> create table student(id int,name varchar(20)); CREATE TABLE +``` + +## 4.编写go文件 + +``` +package main import ( "database/sql" "fmt" "log" _ "github.com/bmizerany/pq" ) //数据库相关信息 const ( host = "172.16.0.XXX" port = 26000 user = "test_usr" password = "test@123" dbname = "test_db" ) //连接数据库 func connectDB() *sql.DB{ psqlInfo := fmt.Sprintf("host=%s port=%d user=%s "+ "password=%s dbname=%s sslmode=disable", host, port, user, password, dbname) db, err := sql.Open("postgres", psqlInfo) if err != nil { panic(err) } err = db.Ping() if err != nil { panic(err) } fmt.Println("Successfully connected!") return db } //向表里插入数据 func insertUser(db *sql.DB) { stmt,err := db.Prepare("insert into student(id,name) values($1,$2)") if err != nil { log.Fatal(err) } _,err = stmt.Exec(1,"mgr") if err != nil { log.Fatal(err) }else { fmt.Println("insert into student success!") } } //查询数据 func query(db *sql.DB){ var id,name string rows,err:=db.Query(" select * from student where id=$1","1") if err!= nil{ fmt.Println(err) } defer rows.Close() for rows.Next(){ err:= rows.Scan(&id,&name) if err!= nil{ fmt.Println(err) } } err = rows.Err() if err!= nil{ fmt.Println(err) } fmt.Println(id,name) } func main() { db:=connectDB() insertUser(db) query(db) } +``` + +## 5.测试连接 + +``` +[root@mogdb-kernel-0004 src]# go run conn.go +Successfully connected! +insert into student success! +1 mgr +测试成功 +``` + diff --git a/content/zh/post/enmo/images/20210603-9b70ba89-658c-4902-818a-099c359808b4.png b/content/zh/post/enmo/images/20210603-9b70ba89-658c-4902-818a-099c359808b4.png new file mode 100644 index 0000000000000000000000000000000000000000..6a9a66dc9b7b5631ae9fafda2e68431fd74a6c87 Binary files /dev/null and b/content/zh/post/enmo/images/20210603-9b70ba89-658c-4902-818a-099c359808b4.png differ diff --git a/content/zh/post/enmo/images/20210629-1d1c8bb8-e200-42a0-a0bb-6910a99bf953.png b/content/zh/post/enmo/images/20210629-1d1c8bb8-e200-42a0-a0bb-6910a99bf953.png new file mode 100644 index 0000000000000000000000000000000000000000..de84a1ae526e68a6f6a2da428b8a3964beab655c Binary files /dev/null and b/content/zh/post/enmo/images/20210629-1d1c8bb8-e200-42a0-a0bb-6910a99bf953.png differ diff --git a/content/zh/post/enmo/images/20210629-2a61c1f1-65da-4afa-abe9-70b493568e02.png b/content/zh/post/enmo/images/20210629-2a61c1f1-65da-4afa-abe9-70b493568e02.png new file mode 100644 index 0000000000000000000000000000000000000000..2bf8360eabe951d405a5bc313b83bb8e06fbe731 Binary files /dev/null and b/content/zh/post/enmo/images/20210629-2a61c1f1-65da-4afa-abe9-70b493568e02.png differ diff --git a/content/zh/post/enmo/images/20210629-2c2280b0-1281-4b6e-8f2f-7127449786eb.png b/content/zh/post/enmo/images/20210629-2c2280b0-1281-4b6e-8f2f-7127449786eb.png new file mode 100644 index 0000000000000000000000000000000000000000..d270dc9c5da0053fec079431577ba54d6dc5ed96 Binary files /dev/null and b/content/zh/post/enmo/images/20210629-2c2280b0-1281-4b6e-8f2f-7127449786eb.png differ diff --git a/content/zh/post/enmo/images/20210629-377faf98-48c6-45e2-a255-1885be07b69f.png b/content/zh/post/enmo/images/20210629-377faf98-48c6-45e2-a255-1885be07b69f.png new file mode 100644 index 0000000000000000000000000000000000000000..20e54fb88734e24d1145cb9f1b94cb02abf736a8 Binary files /dev/null and b/content/zh/post/enmo/images/20210629-377faf98-48c6-45e2-a255-1885be07b69f.png differ diff --git a/content/zh/post/enmo/images/20210629-9e45d800-570d-49fb-89d8-8ba12492a0de.png b/content/zh/post/enmo/images/20210629-9e45d800-570d-49fb-89d8-8ba12492a0de.png new file mode 100644 index 0000000000000000000000000000000000000000..4fb5dbdbe6655758df7bc19763abfc8a8b316d76 Binary files /dev/null and b/content/zh/post/enmo/images/20210629-9e45d800-570d-49fb-89d8-8ba12492a0de.png differ diff --git a/content/zh/post/enmo/images/20210629-b61ad3f2-4e49-407a-a35f-b19286dcbcba.png b/content/zh/post/enmo/images/20210629-b61ad3f2-4e49-407a-a35f-b19286dcbcba.png new file mode 100644 index 0000000000000000000000000000000000000000..d8418b2383cfb511a90c9697a50ffe9c2b412ba3 Binary files /dev/null and b/content/zh/post/enmo/images/20210629-b61ad3f2-4e49-407a-a35f-b19286dcbcba.png differ diff --git a/content/zh/post/enmo/images/20210629-ba33f624-8d97-4dfc-b0bc-5fdcb589e4bc.png b/content/zh/post/enmo/images/20210629-ba33f624-8d97-4dfc-b0bc-5fdcb589e4bc.png new file mode 100644 index 0000000000000000000000000000000000000000..79a9216d20281ab7f40e7f43643d878528a6942c Binary files /dev/null and b/content/zh/post/enmo/images/20210629-ba33f624-8d97-4dfc-b0bc-5fdcb589e4bc.png differ diff --git a/content/zh/post/enmo/images/20210629-d00d94bf-ee48-44f2-b3ae-57ab3220f9b2.png b/content/zh/post/enmo/images/20210629-d00d94bf-ee48-44f2-b3ae-57ab3220f9b2.png new file mode 100644 index 0000000000000000000000000000000000000000..b2432325dd808242a1a8050aeaa68f9929002058 Binary files /dev/null and b/content/zh/post/enmo/images/20210629-d00d94bf-ee48-44f2-b3ae-57ab3220f9b2.png differ diff --git a/content/zh/post/enmo/images/20210629-d2a7ddda-b79e-4702-a44c-7b11d54d9570.png b/content/zh/post/enmo/images/20210629-d2a7ddda-b79e-4702-a44c-7b11d54d9570.png new file mode 100644 index 0000000000000000000000000000000000000000..84fe83e9f583f416e8aa2332cbaa3e54605a0bd6 Binary files /dev/null and b/content/zh/post/enmo/images/20210629-d2a7ddda-b79e-4702-a44c-7b11d54d9570.png differ diff --git a/content/zh/post/enmo/images/20210629-e42f737e-188e-407f-8b72-fe1315f8d044.png b/content/zh/post/enmo/images/20210629-e42f737e-188e-407f-8b72-fe1315f8d044.png new file mode 100644 index 0000000000000000000000000000000000000000..0eff27ae527b9b2a4ecca8d64d2bd936d7935bb8 Binary files /dev/null and b/content/zh/post/enmo/images/20210629-e42f737e-188e-407f-8b72-fe1315f8d044.png differ diff --git a/content/zh/post/enmo/images/20210629-f0069048-4ae1-4de4-9891-bc626fbfc170.png b/content/zh/post/enmo/images/20210629-f0069048-4ae1-4de4-9891-bc626fbfc170.png new file mode 100644 index 0000000000000000000000000000000000000000..bb5632a4823140e10831eadaf8cc2fcf37120fd5 Binary files /dev/null and b/content/zh/post/enmo/images/20210629-f0069048-4ae1-4de4-9891-bc626fbfc170.png differ diff --git a/content/zh/post/enmo/images/20211124-cc72a2d2-2d38-4bba-b79a-0e3b8ddbc7ff.png b/content/zh/post/enmo/images/20211124-cc72a2d2-2d38-4bba-b79a-0e3b8ddbc7ff.png new file mode 100644 index 0000000000000000000000000000000000000000..4c6f9868cc334c47a64291426d4862e93f447376 Binary files /dev/null and b/content/zh/post/enmo/images/20211124-cc72a2d2-2d38-4bba-b79a-0e3b8ddbc7ff.png differ diff --git a/content/zh/post/enmo/images/20220206-c5a88d2a-a455-4473-ab40-4306878f1dd9.png b/content/zh/post/enmo/images/20220206-c5a88d2a-a455-4473-ab40-4306878f1dd9.png new file mode 100644 index 0000000000000000000000000000000000000000..1259843698bef90bb257a42ace0c65a0b3b02edb Binary files /dev/null and b/content/zh/post/enmo/images/20220206-c5a88d2a-a455-4473-ab40-4306878f1dd9.png differ diff --git a/content/zh/post/enmo/images/20220330-26a4d82f-650b-49dc-a859-630df55c0aa2.png b/content/zh/post/enmo/images/20220330-26a4d82f-650b-49dc-a859-630df55c0aa2.png new file mode 100644 index 0000000000000000000000000000000000000000..90046de6188211440794c86573a99214c3e54103 Binary files /dev/null and b/content/zh/post/enmo/images/20220330-26a4d82f-650b-49dc-a859-630df55c0aa2.png differ diff --git a/content/zh/post/enmo/images/20220331-0288c587-ab25-49e4-8f50-d5c7a1993ab2.png b/content/zh/post/enmo/images/20220331-0288c587-ab25-49e4-8f50-d5c7a1993ab2.png new file mode 100644 index 0000000000000000000000000000000000000000..bad73e30a62c607f08ebdbb7f20c8ed12d67c278 Binary files /dev/null and b/content/zh/post/enmo/images/20220331-0288c587-ab25-49e4-8f50-d5c7a1993ab2.png differ diff --git a/content/zh/post/enmo/images/20220331-23563656-7787-4400-8913-e05f579d5469.png b/content/zh/post/enmo/images/20220331-23563656-7787-4400-8913-e05f579d5469.png new file mode 100644 index 0000000000000000000000000000000000000000..04e02c9486f2cbd4dfa22bc9233ffa0c3d45aea7 Binary files /dev/null and b/content/zh/post/enmo/images/20220331-23563656-7787-4400-8913-e05f579d5469.png differ diff --git a/content/zh/post/enmo/images/20220331-7bfba1cd-8252-4498-a4e3-cfc32bd04b29.png b/content/zh/post/enmo/images/20220331-7bfba1cd-8252-4498-a4e3-cfc32bd04b29.png new file mode 100644 index 0000000000000000000000000000000000000000..f539daf0794e0363e678a9456187bf9166b6118c Binary files /dev/null and b/content/zh/post/enmo/images/20220331-7bfba1cd-8252-4498-a4e3-cfc32bd04b29.png differ diff --git a/content/zh/post/enmo/images/20220331-7df540f1-ed3e-4233-8a53-f7b8cffc8152.png b/content/zh/post/enmo/images/20220331-7df540f1-ed3e-4233-8a53-f7b8cffc8152.png new file mode 100644 index 0000000000000000000000000000000000000000..6a4919a35242d345f2cec76f17c816dac7120de0 Binary files /dev/null and b/content/zh/post/enmo/images/20220331-7df540f1-ed3e-4233-8a53-f7b8cffc8152.png differ diff --git a/content/zh/post/enmo/images/20220331-94e3e60e-7b75-4555-9bcf-5354270d8af4.png b/content/zh/post/enmo/images/20220331-94e3e60e-7b75-4555-9bcf-5354270d8af4.png new file mode 100644 index 0000000000000000000000000000000000000000..4efcd33b26cffbad40738598febc02cddb80742e Binary files /dev/null and b/content/zh/post/enmo/images/20220331-94e3e60e-7b75-4555-9bcf-5354270d8af4.png differ diff --git a/content/zh/post/enmo/images/20220331-98ff026f-be85-4123-8a9a-6f9a4034010b.png b/content/zh/post/enmo/images/20220331-98ff026f-be85-4123-8a9a-6f9a4034010b.png new file mode 100644 index 0000000000000000000000000000000000000000..80463a4fd24ea101bd2f1329437f177fecc1a711 Binary files /dev/null and b/content/zh/post/enmo/images/20220331-98ff026f-be85-4123-8a9a-6f9a4034010b.png differ diff --git a/content/zh/post/enmo/images/20220331-998406c3-61ab-436f-9687-383df5f1f6cf.png b/content/zh/post/enmo/images/20220331-998406c3-61ab-436f-9687-383df5f1f6cf.png new file mode 100644 index 0000000000000000000000000000000000000000..d0ac54b606a7a3ede577961fc207935380907143 Binary files /dev/null and b/content/zh/post/enmo/images/20220331-998406c3-61ab-436f-9687-383df5f1f6cf.png differ diff --git a/content/zh/post/enmo/images/20220331-99b1aab5-504d-4993-883e-0c82164d18ba.png b/content/zh/post/enmo/images/20220331-99b1aab5-504d-4993-883e-0c82164d18ba.png new file mode 100644 index 0000000000000000000000000000000000000000..ba1554ec6456791d2e8c475f0c9a5aa918ac34f7 Binary files /dev/null and b/content/zh/post/enmo/images/20220331-99b1aab5-504d-4993-883e-0c82164d18ba.png differ diff --git a/content/zh/post/enmo/images/20220331-a7515c05-6c23-4454-9f15-75589a95006b.png b/content/zh/post/enmo/images/20220331-a7515c05-6c23-4454-9f15-75589a95006b.png new file mode 100644 index 0000000000000000000000000000000000000000..a3c03459e841776abb72e57b313e58d2524022e0 Binary files /dev/null and b/content/zh/post/enmo/images/20220331-a7515c05-6c23-4454-9f15-75589a95006b.png differ diff --git a/content/zh/post/enmo/images/20220331-af3ac54a-3daf-41db-b682-f65ac99c4596.png b/content/zh/post/enmo/images/20220331-af3ac54a-3daf-41db-b682-f65ac99c4596.png new file mode 100644 index 0000000000000000000000000000000000000000..78b0a5081ccb30ce5c49b700530de39e809463cb Binary files /dev/null and b/content/zh/post/enmo/images/20220331-af3ac54a-3daf-41db-b682-f65ac99c4596.png differ diff --git a/content/zh/post/enmo/images/20220331-cc3d3109-87fb-4563-bd9d-e09f700f6ce2.png b/content/zh/post/enmo/images/20220331-cc3d3109-87fb-4563-bd9d-e09f700f6ce2.png new file mode 100644 index 0000000000000000000000000000000000000000..970a93d285d5ee5878a7a0e8d331de3504d123e2 Binary files /dev/null and b/content/zh/post/enmo/images/20220331-cc3d3109-87fb-4563-bd9d-e09f700f6ce2.png differ diff --git a/content/zh/post/enmo/images/20220331-d5b6a331-96f3-4230-8764-96362c0445c4.png b/content/zh/post/enmo/images/20220331-d5b6a331-96f3-4230-8764-96362c0445c4.png new file mode 100644 index 0000000000000000000000000000000000000000..8564b6ff7b3f4a41a1b057f128548e69c7e1846a Binary files /dev/null and b/content/zh/post/enmo/images/20220331-d5b6a331-96f3-4230-8764-96362c0445c4.png differ diff --git a/content/zh/post/enmo/images/20220331-e07e51f9-87b0-4372-aa5c-c7f63c2dc68d.png b/content/zh/post/enmo/images/20220331-e07e51f9-87b0-4372-aa5c-c7f63c2dc68d.png new file mode 100644 index 0000000000000000000000000000000000000000..f411b8ca78afb0f8ed33c297aef54198bfe852e8 Binary files /dev/null and b/content/zh/post/enmo/images/20220331-e07e51f9-87b0-4372-aa5c-c7f63c2dc68d.png differ diff --git a/content/zh/post/enmo/images/20220331-e0ce0a32-6f6d-4e7c-bbe0-f5c7a82c096e.png b/content/zh/post/enmo/images/20220331-e0ce0a32-6f6d-4e7c-bbe0-f5c7a82c096e.png new file mode 100644 index 0000000000000000000000000000000000000000..f52ad12ad897345aa68d3f996b5e66aa617630b3 Binary files /dev/null and b/content/zh/post/enmo/images/20220331-e0ce0a32-6f6d-4e7c-bbe0-f5c7a82c096e.png differ diff --git a/content/zh/post/enmo/images/20220408-0e8866ab-114c-42ab-9bb6-234f8a40acd3.png b/content/zh/post/enmo/images/20220408-0e8866ab-114c-42ab-9bb6-234f8a40acd3.png new file mode 100644 index 0000000000000000000000000000000000000000..966b3dc6ed3fd92c4b329ff095c28802982ed595 Binary files /dev/null and b/content/zh/post/enmo/images/20220408-0e8866ab-114c-42ab-9bb6-234f8a40acd3.png differ diff --git a/content/zh/post/enmo/images/20220408-11f29897-a4b6-4f2e-a8c2-f679957fe730.png b/content/zh/post/enmo/images/20220408-11f29897-a4b6-4f2e-a8c2-f679957fe730.png new file mode 100644 index 0000000000000000000000000000000000000000..23964d41cf8cbffd94704f08b5bcc28bad0cc6d4 Binary files /dev/null and b/content/zh/post/enmo/images/20220408-11f29897-a4b6-4f2e-a8c2-f679957fe730.png differ diff --git a/content/zh/post/enmo/images/20220408-685e0f51-303d-416a-970c-d73aea1bb48f.png b/content/zh/post/enmo/images/20220408-685e0f51-303d-416a-970c-d73aea1bb48f.png new file mode 100644 index 0000000000000000000000000000000000000000..162f772ce7a6f3b0c820fd8718b0eb8cd5f222ce Binary files /dev/null and b/content/zh/post/enmo/images/20220408-685e0f51-303d-416a-970c-d73aea1bb48f.png differ diff --git a/content/zh/post/enmo/images/20220408-75524765-1188-47d0-b432-664e001638a5.png b/content/zh/post/enmo/images/20220408-75524765-1188-47d0-b432-664e001638a5.png new file mode 100644 index 0000000000000000000000000000000000000000..7b11d53f10c6c2f9c6d93629a0f08bdd6c95fb92 Binary files /dev/null and b/content/zh/post/enmo/images/20220408-75524765-1188-47d0-b432-664e001638a5.png differ diff --git a/content/zh/post/enmo/images/20220408-855d3678-23bd-4f69-bdaf-71f840096cd3.png b/content/zh/post/enmo/images/20220408-855d3678-23bd-4f69-bdaf-71f840096cd3.png new file mode 100644 index 0000000000000000000000000000000000000000..6aa8d360c0f407b786bd968f0760f0b20f6e6b2d Binary files /dev/null and b/content/zh/post/enmo/images/20220408-855d3678-23bd-4f69-bdaf-71f840096cd3.png differ diff --git a/content/zh/post/enmo/images/20220412-0b6a8c86-dae6-4fd6-bd75-f7cc342b9d53.png b/content/zh/post/enmo/images/20220412-0b6a8c86-dae6-4fd6-bd75-f7cc342b9d53.png new file mode 100644 index 0000000000000000000000000000000000000000..dbe2dc3f39b9283fdda48eca91f587d4186881f0 Binary files /dev/null and b/content/zh/post/enmo/images/20220412-0b6a8c86-dae6-4fd6-bd75-f7cc342b9d53.png differ diff --git a/content/zh/post/enmo/images/20220412-14115c27-04ae-40a5-9e46-30a2d96da03d.png b/content/zh/post/enmo/images/20220412-14115c27-04ae-40a5-9e46-30a2d96da03d.png new file mode 100644 index 0000000000000000000000000000000000000000..959644c78da9d0962b282e1a17bc2cfdab723aef Binary files /dev/null and b/content/zh/post/enmo/images/20220412-14115c27-04ae-40a5-9e46-30a2d96da03d.png differ diff --git a/content/zh/post/enmo/images/20220412-190d47ff-2390-479a-99da-8685f519fcd6.png b/content/zh/post/enmo/images/20220412-190d47ff-2390-479a-99da-8685f519fcd6.png new file mode 100644 index 0000000000000000000000000000000000000000..824d8976e4a6e9754757c1e461567b55cce0196b Binary files /dev/null and b/content/zh/post/enmo/images/20220412-190d47ff-2390-479a-99da-8685f519fcd6.png differ diff --git a/content/zh/post/enmo/images/20220412-2d7e1422-d8db-4aa8-819a-e21478581f41.png b/content/zh/post/enmo/images/20220412-2d7e1422-d8db-4aa8-819a-e21478581f41.png new file mode 100644 index 0000000000000000000000000000000000000000..6bb8209629a3d39016c8c82144370716150fee89 Binary files /dev/null and b/content/zh/post/enmo/images/20220412-2d7e1422-d8db-4aa8-819a-e21478581f41.png differ diff --git a/content/zh/post/enmo/images/20220412-59c437c1-227e-492b-8c05-9ffca010e9a5.png b/content/zh/post/enmo/images/20220412-59c437c1-227e-492b-8c05-9ffca010e9a5.png new file mode 100644 index 0000000000000000000000000000000000000000..736d37af0df412362462d703c4dae15e73af665d Binary files /dev/null and b/content/zh/post/enmo/images/20220412-59c437c1-227e-492b-8c05-9ffca010e9a5.png differ diff --git a/content/zh/post/enmo/images/20220412-863aac27-a9ad-4ba1-bdcd-645521ecab34.png b/content/zh/post/enmo/images/20220412-863aac27-a9ad-4ba1-bdcd-645521ecab34.png new file mode 100644 index 0000000000000000000000000000000000000000..308f783de2dca5cfcad90509c3e4aad1032c9d4a Binary files /dev/null and b/content/zh/post/enmo/images/20220412-863aac27-a9ad-4ba1-bdcd-645521ecab34.png differ diff --git a/content/zh/post/enmo/images/20220412-9af92e2d-9172-4c8d-b3ae-a7ce82107375.png b/content/zh/post/enmo/images/20220412-9af92e2d-9172-4c8d-b3ae-a7ce82107375.png new file mode 100644 index 0000000000000000000000000000000000000000..619a1e8fb71101bc01e55523ad883fc5033b001e Binary files /dev/null and b/content/zh/post/enmo/images/20220412-9af92e2d-9172-4c8d-b3ae-a7ce82107375.png differ diff --git a/content/zh/post/enmo/images/20220412-bf4a0c4a-7b3b-4c5b-9770-d97d85fbf128.png b/content/zh/post/enmo/images/20220412-bf4a0c4a-7b3b-4c5b-9770-d97d85fbf128.png new file mode 100644 index 0000000000000000000000000000000000000000..dcf287481b10fd32c185cd4c6e9d0f93493645c1 Binary files /dev/null and b/content/zh/post/enmo/images/20220412-bf4a0c4a-7b3b-4c5b-9770-d97d85fbf128.png differ diff --git a/content/zh/post/enmo/images/20220412-c55ff218-119a-466e-8fdb-2fc70ac46831.png b/content/zh/post/enmo/images/20220412-c55ff218-119a-466e-8fdb-2fc70ac46831.png new file mode 100644 index 0000000000000000000000000000000000000000..76ebe17d03c7675132a2a0ba7bf404591fb7a264 Binary files /dev/null and b/content/zh/post/enmo/images/20220412-c55ff218-119a-466e-8fdb-2fc70ac46831.png differ diff --git a/content/zh/post/enmo/images/20220412-f0cf7187-0379-4746-9aa2-8d8d84fab8e0.png b/content/zh/post/enmo/images/20220412-f0cf7187-0379-4746-9aa2-8d8d84fab8e0.png new file mode 100644 index 0000000000000000000000000000000000000000..0f676b5e45c6aca7a7e8d9fc64eec357cbc4a707 Binary files /dev/null and b/content/zh/post/enmo/images/20220412-f0cf7187-0379-4746-9aa2-8d8d84fab8e0.png differ diff --git a/content/zh/post/enmo/images/20220512-6b05767d-e69a-450b-89e2-ef3a98cc24dd.png b/content/zh/post/enmo/images/20220512-6b05767d-e69a-450b-89e2-ef3a98cc24dd.png new file mode 100644 index 0000000000000000000000000000000000000000..28e9ed038703efc6f3a8241d5e20cfd7ee7fdae9 Binary files /dev/null and b/content/zh/post/enmo/images/20220512-6b05767d-e69a-450b-89e2-ef3a98cc24dd.png differ diff --git a/content/zh/post/enmo/images/20220512-e61af498-f6ce-4dd2-a96b-3b57e1500d96.png b/content/zh/post/enmo/images/20220512-e61af498-f6ce-4dd2-a96b-3b57e1500d96.png new file mode 100644 index 0000000000000000000000000000000000000000..f257c5f4f7f348be1cc9d88560f8f616d9837020 Binary files /dev/null and b/content/zh/post/enmo/images/20220512-e61af498-f6ce-4dd2-a96b-3b57e1500d96.png differ diff --git a/content/zh/post/enmo/images/20220513-2acdcd6a-b6f8-49fb-b718-9196894b7011.png b/content/zh/post/enmo/images/20220513-2acdcd6a-b6f8-49fb-b718-9196894b7011.png new file mode 100644 index 0000000000000000000000000000000000000000..59baed938ae8f67708c1e4a08cb71fa266c79a91 Binary files /dev/null and b/content/zh/post/enmo/images/20220513-2acdcd6a-b6f8-49fb-b718-9196894b7011.png differ diff --git a/content/zh/post/enmo/images/20220513-706c424f-3e3b-4305-8798-706921389979.png b/content/zh/post/enmo/images/20220513-706c424f-3e3b-4305-8798-706921389979.png new file mode 100644 index 0000000000000000000000000000000000000000..62ec3d00f9d6cd120703e049b8b397170257abe7 Binary files /dev/null and b/content/zh/post/enmo/images/20220513-706c424f-3e3b-4305-8798-706921389979.png differ diff --git a/content/zh/post/enmo/images/20220513-9759cb7a-7d71-46af-9ff9-bed99d762a0b.png b/content/zh/post/enmo/images/20220513-9759cb7a-7d71-46af-9ff9-bed99d762a0b.png new file mode 100644 index 0000000000000000000000000000000000000000..42c45ff3c5fe8e4c039fb38a58326ef91ce919f9 Binary files /dev/null and b/content/zh/post/enmo/images/20220513-9759cb7a-7d71-46af-9ff9-bed99d762a0b.png differ diff --git "a/content/zh/post/enmo/openGauss 3.0.0 docker \345\256\211\350\243\205.md" "b/content/zh/post/enmo/openGauss 3.0.0 docker \345\256\211\350\243\205.md" new file mode 100644 index 0000000000000000000000000000000000000000..5d44ebda35c8b708e5a0a1bb47d8126672a71c96 --- /dev/null +++ "b/content/zh/post/enmo/openGauss 3.0.0 docker \345\256\211\350\243\205.md" @@ -0,0 +1,184 @@ ++++ + +title = "openGauss 3.0.0 docker 安装" + +date = "2022-05-05" + +tags = ["openGauss 3.0.0 docker 安装"] + +archives = "2022-05" + +author = "云和恩墨" + +summary = "openGauss 3.0.0 docker 安装" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# openGauss 3.0.0 docker 安装 + +本文出处:[https://www.modb.pro/db/393017](https://www.modb.pro/db/393017) + + + +1. 从Docker Hub查找镜像 + +![image.png](../images/20220412-14115c27-04ae-40a5-9e46-30a2d96da03d.png) + +1. 从镜像仓库中拉取或者更新指定镜像 + +``` +- docker pull enmotech/opengauss +``` + +1. 使用脚本安装主备,输入版本为3.0.0 + +``` +#!/bin/bash -e +# Parameters +#!/bin/bash + +#set OG_SUBNET,GS_PASSWORD,MASTER_IP,SLAVE_1_IP,MASTER_HOST_PORT,MASTER_LOCAL_PORT,SLAVE_1_HOST_PORT,SLAVE_1_LOCAL_PORT,MASTER_NODENAME,SLAVE_NODENAME + +read -p "Please input OG_SUBNET (容器所在网段) [172.11.0.0/24]: " OG_SUBNET +OG_SUBNET=${OG_SUBNET:-172.11.0.0/24} +echo "OG_SUBNET set $OG_SUBNET" + +read -p "Please input GS_PASSWORD (定义数据库密码)[Enmo@123]: " GS_PASSWORD +GS_PASSWORD=${GS_PASSWORD:-Enmo@123} +echo "GS_PASSWORD set $GS_PASSWORD" + +read -p "Please input MASTER_IP (主库IP)[172.11.0.101]: " MASTER_IP +MASTER_IP=${MASTER_IP:-172.11.0.101} +echo "MASTER_IP set $MASTER_IP" + +read -p "Please input SLAVE_1_IP (备库IP)[172.11.0.102]: " SLAVE_1_IP +SLAVE_1_IP=${SLAVE_1_IP:-172.11.0.102} +echo "SLAVE_1_IP set $SLAVE_1_IP" + +read -p "Please input MASTER_HOST_PORT (主库数据库服务端口)[5432]: " MASTER_HOST_PORT +MASTER_HOST_PORT=${MASTER_HOST_PORT:-5432} +echo "MASTER_HOST_PORT set $MASTER_HOST_PORT" + +read -p "Please input MASTER_LOCAL_PORT (主库通信端口)[5434]: " MASTER_LOCAL_PORT +MASTER_LOCAL_PORT=${MASTER_LOCAL_PORT:-5434} +echo "MASTER_LOCAL_PORT set $MASTER_LOCAL_PORT" + +read -p "Please input SLAVE_1_HOST_PORT (备库数据库服务端口)[6432]: " SLAVE_1_HOST_PORT +SLAVE_1_HOST_PORT=${SLAVE_1_HOST_PORT:-6432} +echo "SLAVE_1_HOST_PORT set $SLAVE_1_HOST_PORT" + +read -p "Please input SLAVE_1_LOCAL_PORT (备库通信端口)[6434]: " SLAVE_1_LOCAL_PORT +SLAVE_1_LOCAL_PORT=${SLAVE_1_LOCAL_PORT:-6434} +echo "SLAVE_1_LOCAL_PORT set $SLAVE_1_LOCAL_PORT" + +read -p "Please input MASTER_NODENAME [opengauss_master]: " MASTER_NODENAME +MASTER_NODENAME=${MASTER_NODENAME:-opengauss_master} +echo "MASTER_NODENAME set $MASTER_NODENAME" + +read -p "Please input SLAVE_NODENAME [opengauss_slave1]: " SLAVE_NODENAME +SLAVE_NODENAME=${SLAVE_NODENAME:-opengauss_slave1} +echo "SLAVE_NODENAME set $SLAVE_NODENAME" + +read -p "Please input openGauss VERSION [1.1.0]: " VERSION +VERSION=${VERSION:-1.1.0} +echo "openGauss VERSION set $VERSION" + +echo "starting " + +docker network create --subnet=$OG_SUBNET opengaussnetwork \ +|| { + echo "" + echo "ERROR: OpenGauss Database Network was NOT successfully created." + echo "HINT: opengaussnetwork Maybe Already Exsist Please Execute 'docker network rm opengaussnetwork' " + exit 1 +} +echo "OpenGauss Database Network Created." + +docker run --network opengaussnetwork --ip $MASTER_IP --privileged=true \ +--name $MASTER_NODENAME -h $MASTER_NODENAME -p $MASTER_HOST_PORT:$MASTER_HOST_PORT -d \ +-e GS_PORT=$MASTER_HOST_PORT \ +-e OG_SUBNET=$OG_SUBNET \ +-e GS_PASSWORD=$GS_PASSWORD \ +-e NODE_NAME=$MASTER_NODENAME \ +-e REPL_CONN_INFO="replconninfo1 = 'localhost=$MASTER_IP localport=$MASTER_LOCAL_PORT localservice=$MASTER_HOST_PORT remotehost=$SLAVE_1_IP remoteport=$SLAVE_1_LOCAL_PORT remoteservice=$SLAVE_1_HOST_PORT'\n" \ +enmotech/opengauss:$VERSION -M primary \ +|| { + echo "" + echo "ERROR: OpenGauss Database Master Docker Container was NOT successfully created." + exit 1 +} +echo "OpenGauss Database Master Docker Container created." + +sleep 30s + +docker run --network opengaussnetwork --ip $SLAVE_1_IP --privileged=true \ +--name $SLAVE_NODENAME -h $SLAVE_NODENAME -p $SLAVE_1_HOST_PORT:$SLAVE_1_HOST_PORT -d \ +-e GS_PORT=$SLAVE_1_HOST_PORT \ +-e OG_SUBNET=$OG_SUBNET \ +-e GS_PASSWORD=$GS_PASSWORD \ +-e NODE_NAME=$SLAVE_NODENAME \ +-e REPL_CONN_INFO="replconninfo1 = 'localhost=$SLAVE_1_IP localport=$SLAVE_1_LOCAL_PORT localservice=$SLAVE_1_HOST_PORT remotehost=$MASTER_IP remoteport=$MASTER_LOCAL_PORT remoteservice=$MASTER_HOST_PORT'\n" \ +enmotech/opengauss:$VERSION -M standby \ +|| { + echo "" + echo "ERROR: OpenGauss Database Slave1 Docker Container was NOT successfully created." + exit 1 +} +echo "OpenGauss Database Slave1 Docker Container created." + +``` + +1. 安装成功 + +![image.png](../images/20220412-bf4a0c4a-7b3b-4c5b-9770-d97d85fbf128.png) + +1. 安装完成后分别进入主备 + +``` +- docker exec -it opengauss_master bash - docker exec -it opengauss_slave1 bash +``` + +1. 切换omm用户,查看集群状态 + +发现集群状态异常 + +``` +$ gs_ctl query +``` + +![image.png](../images/20220412-9af92e2d-9172-4c8d-b3ae-a7ce82107375.png) + +查看pg_stat_replication + +![image.png](../images/20220412-0b6a8c86-dae6-4fd6-bd75-f7cc342b9d53.png) + +查看pg_hba.conf文件 +![image.png](../images/20220412-59c437c1-227e-492b-8c05-9ffca010e9a5.png) + +查看数据库加密方式,为md5加密 + +![image.png](../images/20220412-190d47ff-2390-479a-99da-8685f519fcd6.png) + +查看用户密码,omm的加密方式默认为sha256 + +![image.png](../images/20220412-f0cf7187-0379-4746-9aa2-8d8d84fab8e0.png) + +可修改数据库加密方式为1,或者加密方式为2,再更新原有md5加密方式的用户密码 + +![image.png](../images/20220412-863aac27-a9ad-4ba1-bdcd-645521ecab34.png) + +修改pg_hba.conf文件 + +![image.png](../images/20220412-2d7e1422-d8db-4aa8-819a-e21478581f41.png) + +重新加载 + +``` +gs_ctl reload -D $PGDATA +查看集群状态正常 +``` + +【注】测试过程中发现,本次安装docker使用的镜像非企业版,有些企业版独有的功能无法使用,例如逻辑复制 diff --git "a/content/zh/post/enmo/openGauss MogDB PostgreSQL\346\225\260\346\215\256\345\272\223\346\230\223\347\212\257\347\232\204\345\215\201\345\244\247\351\224\231\350\257\257.md" "b/content/zh/post/enmo/openGauss MogDB PostgreSQL\346\225\260\346\215\256\345\272\223\346\230\223\347\212\257\347\232\204\345\215\201\345\244\247\351\224\231\350\257\257.md" new file mode 100644 index 0000000000000000000000000000000000000000..c083e467cf0a68e18d370225bf6aba034e11400b --- /dev/null +++ "b/content/zh/post/enmo/openGauss MogDB PostgreSQL\346\225\260\346\215\256\345\272\223\346\230\223\347\212\257\347\232\204\345\215\201\345\244\247\351\224\231\350\257\257.md" @@ -0,0 +1,194 @@ ++++ + +title = "openGauss/MogDB/PostgreSQL数据库易犯的十大错误" + +date = "2022-04-13" + +tags = ["openGauss/MogDB/PostgreSQL数据库易犯的十大错误"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "openGauss/MogDB/PostgreSQL数据库易犯的十大错误" + +img = "/zh/post/enmo/title/img6.png" + +times = "10:20" ++++ + +# openGauss/MogDB/PostgreSQL数据库易犯的十大错误 + +本文出处:https://www.modb.pro/db/69117 + +总结十点openGauss/MogDB/PostgreSQL数据库中容易犯的错误。 + +### 1.同时设置日志行前缀和csvlog格式 + +比较常见大家同时配置下面这两个参数 + +``` +log_line_prefix = '%m %u %d %p' log_destination='csvlog' +``` + +- %m是带毫秒的时间戳 +- %u是用户名 +- %d是数据库名 +- %p是进程ID + +然后当我们配置为csvlog日志时,日志行的内容项是固定的,所以当我们需要配置日志前缀,精简日志行的内容项时,log_destination不能配置为csvlog。下面是正确的配置: + +``` +log_destination='stderr' log_line_prefix = '%m %u %d %p' +``` + +### 2.不符合预期的日志轮换策略 + +日志轮换策略可以通过log_rotation_size参数按日志文件大小控制或者通过log_rotation_age参数按时间控制,但下面这四个参数需要合理组合使用。 + +``` +log_filename log_truncate_on_rotation log_rotation_age log_rotation_size +``` + +方案一:每天生成一个新的日志文件 + +``` +log_filename='postgresql-%Y-%m-%d.log' log_truncate_on_rotation=off log_rotation_age=1d log_rotation_size=0 +``` + +方案二:写满固定大小(如10MB),则进行切换 + +``` +log_filename='postgresql-%Y-%m-%d_%H%M%S.log' log_truncate_on_rotation=off log_rotation_age=0 log_rotation_size=10MB +``` + +这种方案我们一般是为了根据时间去查看日志,文件名根据日志量可以设置到时分秒,但这里设置log_rotation_size并不能严格控制固定大小。 + +方案三:保留固定天数的日志并循环覆盖,例如固定一周或者固定一个月 + +``` +log_filename='postgresql-%u.log' log_truncate_on_rotation=on log_rotation_age=1d log_rotation_size=0 +``` + +log_filename常见的通配符变量 + +- %u是星期的数字表示,范围是[1,7],1代表星期一 +- %w也是星期的数字表示,范围是[0,6],0代表星期天 +- %d是月份中的天数表示,范围是[01,31] + +生产环境第三种方案更合适一些。 + +### 3.同步复制表的序列 + +看看下面这个例子,我们创建test表使用serial自增序列类型,系统帮我们生成了test_id_seq序列。 + +```sql +postgres=# create table test(id serial primary key,name varchar unique); +CREATE TABLE +postgres=# \d test + Table "public.test" + Column | Type | Collation | Nullable | Default +--------+-------------------+-----------+----------+---------------------------------- + id | integer | | not null | nextval('test_id_seq'::regclass) + name | character varying | | | +Indexes: + "test_pkey" PRIMARY KEY, btree (id) + "test_name_key" UNIQUE CONSTRAINT, btree (name) +``` + +当我们复制t_test表时,test表的序列引用也同时复制过来了,可以使用虚拟生成列来解决这个问题。 + +```sql +postgres=# create table t_test(like test including all); +CREATE TABLE +postgres=# \d t_test + Table "public.t_test" + Column | Type | Collation | Nullable | Default +--------+-------------------+-----------+----------+---------------------------------- + id | integer | | not null | nextval('test_id_seq'::regclass) + name | character varying | | | +Indexes: + "t_test_pkey" PRIMARY KEY, btree (id) + "t_test_name_key" UNIQUE CONSTRAINT, btree (name) + +``` + +openGauss对PG的这个问题做了修复,下面是openGauss复制t_test时,序列按表名做了区分。 + +```sql +omm=# \d t_test + Table "public.t_test" + Column | Type | Modifiers +--------+-------------------+----------------------------------------------------- + id | integer | not null default nextval('t_test_id_seq'::regclass) + name | character varying | +Indexes: + "t_test_pkey" PRIMARY KEY, btree (id) TABLESPACE pg_default + "t_test_name_key" UNIQUE CONSTRAINT, btree (name) TABLESPACE pg_default + +``` + +### 4.跳变的序列值 + +创建序列seq1,设置cache为10,session A获取下一个值为1. + +```sql +postgres=# create sequence seq1 cache 10; +CREATE SEQUENCE +postgres=# select nextval('seq1'); + nextval +--------- + 1 +(1 row) + +``` + +session B查询获取下一个值为11 + +``` +postgres=# select nextval('seq1'); nextval --------- 11 (1 row) +``` + +序列值插入为了保证连续性,要设置cache为1。 + +### 5.从任意库查询pg_stat_statements模块统计信息 + +pg_stat_statements模块用来跟踪SQL语句的执行统计信息,我们如果把该模块安装到postgres数据库,就只能连到postgres数据库进行查询,除非其它数据库也安装了该模块,否则会提示报错找不到。 + +无论任何操作,都需要连接到一个数据库,即使我们只想创建一个全局的数据库用户,所以选对数据库特别重要。 + +### 6.truncate操作理解为DML语句 + +log_statement参数控制日志记录级别,有4个选项:none、ddl、mod、all。开启ddl,它会记录 create、alter和drop相关的语句,但不记录truncate。 +truncate在Oracle中属于DDL语句,在PostgreSQL中属于DML语句。因此,当我们使用DDL日志记录语句时,无法记录到Truncate。 + +### 7.认为数据库的owner可以管理其下所有对象 + +数据库、模式、表的都有自己的owner,他们都属于实例中的对象,数据库owner只是具有数据库这个对象的CTc权限。数据库的默认权限为: + +- 允许public角色连接,即允许任何人连接。 +- 不允许除了超级用户和owner之外的任何人在数据库中创建schema。 +- 会自动创建名为public的schema,这个schema的所有权限已经赋予给public角色,即允许任何人在里面创建对象。 + +schema使用注意事项: +schema的owner默认是该schema下的所有对象的owner,但是允许用户在别人的schema下创建对象,所以一个对象的owner和schema的owner可能不同,都有drop对象的权限。 + +### 8.认为public模式下的对象可以互相访问 + +public模式只是允许任何人在里面创建对象并管理自己的对象,并不能查看别人创建的对象。 + +### 9.创建索引时起名为表名称 + +单个数据库里,索引和表的名称不能重复,因为他们都属于relation。 + +```sql +postgres=# create index a on a(id); +ERROR: relation "a" already exists +``` + +### 10.把walsender当作主库 + +通常我们从操作系统层查看主库有walsender,备库有walreceiver,并且walsender信息中可以看到备库的IP地址,可以初步判断主备状态正常。 +但请注意有walsender或者数据库中能查到pg_stat_replication视图并不能断定是主库,仅在一主一备环境可以这样简单判断,下面的图可以看出,虽然有walsender,但它也是个备库. + +![QQ截图20210603091354.png](../images/20210603-9b70ba89-658c-4902-818a-099c359808b4.png) diff --git "a/content/zh/post/enmo/openGauss MogDB WDR\346\212\245\345\221\212\350\257\246\350\247\243.md" "b/content/zh/post/enmo/openGauss MogDB WDR\346\212\245\345\221\212\350\257\246\350\247\243.md" new file mode 100644 index 0000000000000000000000000000000000000000..c5f221f2a693fd44e468939329429cf52ae917a0 --- /dev/null +++ "b/content/zh/post/enmo/openGauss MogDB WDR\346\212\245\345\221\212\350\257\246\350\247\243.md" @@ -0,0 +1,288 @@ ++++ + +title = "openGauss/MogDB WDR报告详解" + +date = "2022-05-12" + +tags = ["openGauss/MogDB WDR报告详解"] + +archives = "2022-05" + +author = "云和恩墨" + +summary = "openGauss/MogDB WDR报告详解" + +img = "/zh/post/enmo/title/img6.png" + +times = "10:20" ++++ + +# openGauss/MogDB WDR报告详解 + +本文出处:[https://www.modb.pro/db/401290](https://www.modb.pro/db/401290) + +# 摘要 + +> WDR(Workload Diagnosis Report)**负载诊断报告**,是openGauss的工作负载诊断报告,常用于判断openGauss长期性能问题。WDR报告基于两次不同时间点系统的性能快照数据,生成这两个时间点之间的性能表现报表。 + +# 开启WDR快照 + +## 参数简介 + +### enable_wdr_snapshot + +**参数说明**: 是否开启数据库监控快照功能。 + +该参数属于SIGHUP类型参数,请参考表[GUC参数分类](https://docs.mogdb.io/zh/mogdb/v2.1/30-appendix)中对应设置方法进行设置。 + +**取值范围**: 布尔型 + +- on: 打开数据库监控快照功能。 +- off: 关闭数据库监控快照功能。 + +**默认值**: off + +### wdr_snapshot_retention_days + +**参数说明**: 系统中数据库监控快照数据的保留天数,超过设置的值之后,系统每隔wdr_snapshot_interval时间间隔,清理snapshot_id最小的快照数据。 + +该参数属于SIGHUP类型参数,请参考表[GUC参数分类](https://docs.mogdb.io/zh/mogdb/v2.1/30-appendix)中对应设置方法进行设置。 + +**取值范围:** 整型,1~8。 + +**默认值**: 8 + +### wdr_snapshot_query_timeout + +**参数说明**: 系统执行数据库监控快照操作时,设置快照操作相关的sql语句的执行超时时间。如果语句超过设置的时间没有执行完并返回结果,则本次快照操作失败。 + +该参数属于SIGHUP类型参数,请参考表[GUC参数分类](https://docs.mogdb.io/zh/mogdb/v2.1/30-appendix)中对应设置方法进行设置。 + +**取值范围:** 整型,100~INT_MAX(秒)。 + +**默认值**: 100s + +### wdr_snapshot_interval + +**参数说明**: 后台线程Snapshot自动对数据库监控数据执行快照操作的时间间隔。 + +该参数属于SIGHUP类型参数,请参考表[GUC参数分类](https://docs.mogdb.io/zh/mogdb/v2.1/30-appendix)中对应设置方法进行设置。 + +**取值范围:** 整型,10~60(分钟)。 + +**默认值**: 1h + +## 查看当前wdr相关配置 + +``` +postgres@omm:local=#select name, setting from pg_settings where name like '%wdr%'; + name | setting +-----------------------------+--------- + enable_wdr_snapshot | off + wdr_snapshot_interval | 60 + wdr_snapshot_query_timeout | 100 + wdr_snapshot_retention_days | 8 +(4 rows) +``` + +## 开启wdr日志 + +``` +omm@107707f966f0:/var/lib/mogdb/data$ gs_guc reload -D $PGDATA -c "enable_wdr_snapshot=on" +expected instance path: [/var/lib/mogdb/data/postgresql.conf] +gs_guc reload: enable_wdr_snapshot=on: [/var/lib/mogdb/data/postgresql.conf] +server signaled + +Total instances: 1. Failed instances: 0. +Success to perform gs_guc! + +omm@107707f966f0:/var/lib/mogdb/data$ gsql -d postgres -r +gsql ((MogDB 2.1.1 build b5f25b20) compiled at 2022-03-21 14:42:30 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +postgres@omm:local=#select name, setting from pg_settings where name like '%wdr%'; + name | setting +-----------------------------+--------- + enable_wdr_snapshot | on + wdr_snapshot_interval | 60 + wdr_snapshot_query_timeout | 100 + wdr_snapshot_retention_days | 8 +(4 rows) + +``` + +## 查看快照统计表 + +``` +postgres@omm:local=#show search_path; + search_path +---------------- + "$user",public +(1 row) + +postgres@omm:local=#alter session set search_path=snapshot; +SET +postgres@omm:local=#show search_path; + search_path +------------- + snapshot +(1 row) + +postgres@omm:local=#\d + List of relations + Schema | Name | Type | Owner | Storage +----------+------------------------------------------+----------+-------+---------------------------------- + snapshot | snap_class_vital_info | table | omm | {orientation=row,compression=no} + snapshot | snap_global_bgwriter_stat | table | omm | {orientation=row,compression=no} + snapshot | snap_global_ckpt_status | table | omm | {orientation=row,compression=no} + snapshot | snap_global_config_settings | table | omm | {orientation=row,compression=no} + snapshot | snap_global_double_write_status | table | omm | {orientation=row,compression=no} + snapshot | snap_global_file_iostat | table | omm | {orientation=row,compression=no} + snapshot | snap_global_file_redo_iostat | table | omm | {orientation=row,compression=no} + snapshot | snap_global_instance_time | table | omm | {orientation=row,compression=no} + snapshot | snap_global_memory_node_detail | table | omm | {orientation=row,compression=no} + snapshot | snap_global_os_runtime | table | omm | {orientation=row,compression=no} + snapshot | snap_global_os_threads | table | omm | {orientation=row,compression=no} + snapshot | snap_global_pagewriter_status | table | omm | {orientation=row,compression=no} + snapshot | snap_global_record_reset_time | table | omm | {orientation=row,compression=no} + snapshot | snap_global_recovery_status | table | omm | {orientation=row,compression=no} + snapshot | snap_global_redo_status | table | omm | {orientation=row,compression=no} + snapshot | snap_global_rel_iostat | table | omm | {orientation=row,compression=no} + snapshot | snap_global_replication_slots | table | omm | {orientation=row,compression=no} + snapshot | snap_global_replication_stat | table | omm | {orientation=row,compression=no} + snapshot | snap_global_rto_status | table | omm | {orientation=row,compression=no} + snapshot | snap_global_shared_memory_detail | table | omm | {orientation=row,compression=no} + snapshot | snap_global_stat_all_indexes | table | omm | {orientation=row,compression=no} + snapshot | snap_global_stat_all_tables | table | omm | {orientation=row,compression=no} + snapshot | snap_global_stat_bad_block | table | omm | {orientation=row,compression=no} + snapshot | snap_global_stat_database | table | omm | {orientation=row,compression=no} + snapshot | snap_global_stat_database_conflicts | table | omm | {orientation=row,compression=no} + snapshot | snap_global_stat_db_cu | table | omm | {orientation=row,compression=no} + snapshot | snap_global_stat_user_functions | table | omm | {orientation=row,compression=no} + snapshot | snap_global_statement_count | table | omm | {orientation=row,compression=no} + snapshot | snap_global_statio_all_indexes | table | omm | {orientation=row,compression=no} + snapshot | snap_global_statio_all_sequences | table | omm | {orientation=row,compression=no} + snapshot | snap_global_statio_all_tables | table | omm | {orientation=row,compression=no} + snapshot | snap_global_thread_wait_status | table | omm | {orientation=row,compression=no} + snapshot | snap_global_threadpool_status | table | omm | {orientation=row,compression=no} + snapshot | snap_global_transactions_prepared_xacts | table | omm | {orientation=row,compression=no} + snapshot | snap_global_transactions_running_xacts | table | omm | {orientation=row,compression=no} + snapshot | snap_global_wait_events | table | omm | {orientation=row,compression=no} + snapshot | snap_global_workload_transaction | table | omm | {orientation=row,compression=no} + snapshot | snap_seq | sequence | omm | + snapshot | snap_statement_responsetime_percentile | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_file_iostat | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_file_redo_iostat | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_rel_iostat | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_stat_all_indexes | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_stat_all_tables | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_stat_bad_block | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_stat_database | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_stat_database_conflicts | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_stat_user_functions | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_statement | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_statement_count | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_statio_all_indexes | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_statio_all_sequences | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_statio_all_tables | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_transactions_prepared_xacts | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_transactions_running_xacts | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_user_login | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_workload_sql_count | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_workload_sql_elapse_time | table | omm | {orientation=row,compression=no} + snapshot | snap_summary_workload_transaction | table | omm | {orientation=row,compression=no} + snapshot | snapshot | table | omm | {orientation=row,compression=no} + snapshot | tables_snap_timestamp | table | omm | {orientation=row,compression=no} +(61 rows) + +``` + +# 手动生产快照 + +### SNAPSHOT.SNAPSHOT + +SNAPSHOT表记录当前系统中存储的WDR快照数据的索引信息、开始、结束时间。只能在系统库中查询到结果,在用户库中无法查询。 + +**表 1** SNAPSHOT表属性 + +| 名称 | 类型 | 描述 | 示例 | +| ----------- | --------- | ------------------- | ----------------------------- | +| snapshot_id | bigint | WDR快照序号。 | 1 | +| start_ts | timestamp | WDR快照的开始时间。 | 2019-12-28 17:11:27.423742+08 | +| end_ts | timestamp | WDR快照的结束时间。 | 2019-12-28 17:11:43.67726+08 | + +``` +postgres@omm:local=#select * from snapshot.snapshot; + snapshot_id | start_ts | end_ts +-------------+-------------------------------+------------------------------- + 1 | 2022-05-02 11:19:37.239977+00 | 2022-05-02 11:19:37.865708+00 +(1 row) + +postgres@omm:local=#select create_wdr_snapshot(); + create_wdr_snapshot +----------------------------------------- + WDR snapshot request has been submitted +(1 row) + +postgres@omm:local=#select * from snapshot.snapshot; + snapshot_id | start_ts | end_ts +-------------+-------------------------------+------------------------------- + 1 | 2022-05-02 11:19:37.239977+00 | 2022-05-02 11:19:37.865708+00 + 2 | 2022-05-02 11:42:28.047396+00 | 2022-05-02 11:42:28.617173+00 +(2 rows) + +``` + +# 生成性能报告 + +## a. 执行如下命令生成格式化性能报告文件。 + +``` +\a \t \o 服务器文件路径 +``` + +上述命令涉及参数说明如下: + +- \a: 切换非对齐模式。 +- \t: 切换输出的字段名的信息和行计数脚注。 +- \o: 把所有的查询结果发送至服务器文件里。 +- 服务器文件路径:生成性能报告文件存放路径。用户需要拥有此路径的读写权限。 + +## b. 执行如下命令将查询到的信息写入性能报告中。 + +``` +select generate_wdr_report(begin_snap_id bigint, end_snap_id bigint, report_type cstring, report_scope cstring, node_name cstring); +``` + +命令中涉及的参数说明如下。 + +**表 3** generate_wdr_report函数参数说明 + +| 参数 | 说明 | 取值范围 | +| ------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | +| begin_snap_id | 查询时间段开始的snapshot的id(表snapshot.snaoshot中的snapshot_id)。 | - | +| end_snap_id | 查询时间段结束snapshot的id。默认end_snap_id大于begin_snap_id(表snapshot.snaoshot中的snapshot_id)。 | - | +| report_type | 指定生成report的类型。例如,summary/detail/all。 | summary: 汇总数据。 detail: 明细数据。 all: 包含summary和detail。 | +| report_scope | 指定生成report的范围,可以为cluster或者node。 | cluster: 数据库级别的信息。 node: 节点级别的信息。 | +| node_name | 在report_scope指定为node时,需要把该参数指定为对应节点的名称。(节点名称可以执行select * from pg_node_env;查询)。在report_scope为cluster时,该值可以指定为省略、空或者为NULL。 | | + +执行操作 + +``` +postgres@omm:local=#select * from pg_node_env; + node_name | host | process | port | installpath | datapath | log_directory +-----------+-----------+---------+------+------------------+---------------------+--------------- + mogdb | localhost | 1 | 5432 | /usr/local/mogdb | /var/lib/mogdb/data | pg_log +(1 row) +postgres@omm:local=# +postgres@omm:local=#\a \t \o wdr_20220502.html +postgres@omm:local=#select generate_wdr_report(1,2,'all','node','mogdb'); +``` + +## c.执行如下命令关闭输出选项及格式化输出命令。 + +```Plain +\o \a \t +查看报告 +``` diff --git "a/content/zh/post/enmo/openGauss MogDB\345\260\217\347\211\210\346\234\254\345\215\207\347\272\247\357\274\232\344\273\2162.0.0 \345\210\2602.0.1.md" "b/content/zh/post/enmo/openGauss MogDB\345\260\217\347\211\210\346\234\254\345\215\207\347\272\247\357\274\232\344\273\2162.0.0 \345\210\2602.0.1.md" new file mode 100644 index 0000000000000000000000000000000000000000..ab702243c31869bdb5635b1059f62dbc9678eb77 --- /dev/null +++ "b/content/zh/post/enmo/openGauss MogDB\345\260\217\347\211\210\346\234\254\345\215\207\347\272\247\357\274\232\344\273\2162.0.0 \345\210\2602.0.1.md" @@ -0,0 +1,175 @@ ++++ + +title = "openGauss/MogDB小版本升级:从2.0.0 到2.0.1" + +date = "2022-04-11" + +tags = ["openGauss/MogDB小版本升级:从2.0.0 到2.0.1"] + +archives = "2022-04" + +author = "恩墨交付团队" + +summary = "openGauss/MogDB小版本升级:从2.0.0 到2.0.1" + +img = "/zh/post/enmo/title/img6.png" + +times = "10:20" + ++++ + +# openGauss/MogDB小版本升级:从2.0.0 到2.0.1 + +### 一、目的 + +openGauss/MogDB2.0.0版本升级到2.0.1 + +### 二、主要思想 + +替换对应的app包 + +### 三、环境 + +| 操作系统 | Centos X86 | +| -------------- | ------------------------ | +| 当前数据库版本 | 2.0.0 commit号:78689da9 | +| 升级后版本 | 2.0.1 commit号:d97c0e8a | + +### 四、具体步骤 + +##### 4.1 压缩包准备 + +1. 创建目录 + + ``` + mkdir /opengauss2.0.1 + ``` + +2. 将安装包上传至/opengauss2.0.1目录下 + +3. 赋权 + + ``` + chown -R omm:dbgrp /opengauss2.0.1 + ``` + +4. 解压压缩包 + + ``` + su - omm + cd /opengauss2.0.1 + tar -zxvf openGauss-2.0.1-CentOS-64bit-all.tar.gz + # 目录下的文件有 + openGauss-2.0.1-CentOS-64bit-om.tar.gz + openGauss-2.0.1-CentOS-64bit.tar.bz2 + openGauss-2.0.1-CentOS-64bit-om.sha256 + openGauss-2.0.1-CentOS-64bit.sha256 + upgrade_sql.tar.gz + upgrade_sql.sha256 + + # 解压 openGauss-2.0.1-CentOS-64bit.tar.bz2 + tar -xvf openGauss-2.0.1-CentOS-64bit.tar.bz2 + ``` + +##### 4.2 更换安装包(omm用户) + +1. 先在主库进行操作 + + 1. 进入数据库安装目录 + + ``` + cd /opt/gaussdb/ + ``` + + 2. 创建新的 app 目录 + + ``` + mkdir app\_2.0.1\_d97c0e8a + ``` + + 3. 复制解压的那些文件到新的安装目录下 + + ``` + cd app\_2.0.1\_d97c0e8a + + cp -r /opengauss2.0.1/bin/ /opengauss2.0.1/etc/ /opengauss2.0.1/include/ /opengauss2.0.1/jre/ /opengauss2.0.1/share/ /opengauss2.0.1/share/ /opengauss2.0.1/simpleInstall/ /opengauss2.0.1/version.cfg/ /opt/gaussdb/app\_2.0.1\_d97c0e8a/ + + cp /opt/gaussdb/app\_392c0438/bin/cluster\_static\_config ./bin/ + + cp /opt/gaussdb/app\_392c0438/bin/upgrade\_version ./bin/ + + cp /opt/gaussdb/app\_392c0438/share/sslcert/grpc/cacertnew.pem ./share/sslcert/grpc/ + + cp /opt/gaussdb/app\_392c0438/share/sslcert/grpc/servernew.crt ./share/sslcert/grpc/ + + cp /opt/gaussdb/app\_392c0438/share/sslcert/grpc/servernew.key ./share/sslcert/grpc/ + ``` + + 4. 压缩包 + + ``` + tar -cvjf openGauss-Package-bak\_d97c0e8a.tar.gz ./\* + ``` + + 5. 软链 + + ``` + cd /opt/gaussdb/ + # 删除之前的软链 + rm -rf app + # 创建新的软链 + ln -s app\_2.0.1\_d97c0e8a ./app + ``` + +2. 切换到备节点 omm 用户 + + 1. 进入数据库安装目录 + + ``` + cd /opt/gaussdb + ``` + + 2. 创建新的 app 目录 + + ``` + mkdir app_2.0.1_d97c0e8a + ``` + + 3. 复制压缩包 + + ``` + scp 主机IP:/opt/gaussdb/app\_2.0.1\_d97c0e8a/openGauss-Package-bak\_d97c0e8a.tar.gz app\_2.0.1\_d97c0e8a/ + ``` + + 4. 解压压缩包 + + ``` + cd app\_2.0.1\_d97c0e8a/ + tar -xvf openGauss-Package-bak\_d97c0e8a.tar.gz + ``` + + 5. 软链 + + ``` + cd /opt/gaussdb/ + # 删除之前的软链 + rm -rf app + # 创建新的软链 + ln -s app\_2.0.1\_d97c0e8a ./app + ``` + +##### 4.3 重启数据库 + +``` +gs_om -t restart -D /gaussdb/data/dn1/ +``` + +##### 4.4 查看数据库 + +- ``` + gsql -p 端口 postgres + ``` + +- ``` + gaussdb --version + ``` diff --git "a/content/zh/post/enmo/openGauss MogDB\346\225\260\346\215\256\345\272\223\346\234\215\345\212\241\345\220\257\345\212\250\346\250\241\345\274\217\345\210\206\346\236\220.md" "b/content/zh/post/enmo/openGauss MogDB\346\225\260\346\215\256\345\272\223\346\234\215\345\212\241\345\220\257\345\212\250\346\250\241\345\274\217\345\210\206\346\236\220.md" new file mode 100644 index 0000000000000000000000000000000000000000..bddb1bf0955ded2328addc0a6899edd7aa1bf16c --- /dev/null +++ "b/content/zh/post/enmo/openGauss MogDB\346\225\260\346\215\256\345\272\223\346\234\215\345\212\241\345\220\257\345\212\250\346\250\241\345\274\217\345\210\206\346\236\220.md" @@ -0,0 +1,70 @@ ++++ + +title = "openGauss/MogDB数据库服务启动模式分析" + +date = "2022-04-11" + +tags = ["openGauss/MogDB数据库服务启动模式分析"] + +archives = "2022-04" + +author = "恩墨交付团队" + +summary = "openGauss/MogDB数据库服务启动模式分析" + +img = "/zh/post/enmo/title/img6.png" + +times = "10:20" + ++++ + +# openGauss/MogDB数据库服务启动模式分析 + +## SERVERMODE参数 + +我们查看gs_ctl命令的帮助,可以看到-M选项,也就是SERVERMODE服务启动模式 + +``` + -M the database start as the appointed mode +``` + +后面可以看到SERVERMODE参数的四种值 + +```sql +SERVERMODE are: + primary database system run as a primary server, send xlog to standby server + standby database system run as a standby server, receive xlog from primary server + cascade_standby database system run as a cascade standby server, receive xlog from standby server + pending database system run as a pending server, wait for promoting to primary or demoting to standby + +``` + +比较常见的是在搭建主备时使用primary及standby这两个值,本文只讨论这两种值,其它值的后续文章会探讨。 + +## 默认启动模式 + +如果我们使用gs_ctl启动服务时不指定SERVERMODE,默认会使用primary模式启动服务,这个在单机模式下是合适的。 + +## primary及standby模式 + +相比PostgreSQL主备搭建的方式,MogDB并不是在备库单独创建一个standby的触发文件,然后启动服务。 + +MogDB需要在主库和备库以不同的模式启动,主库是primary模式启动,备库是standby启动。 + +主库启动命令: + +``` +$ gs_ctl start -D data -M primary +``` + +启动完成之后查看进程可以看到启动模式为primary +![](../images/20211124-cc72a2d2-2d38-4bba-b79a-0e3b8ddbc7ff.png) + +备库启动命令: + +``` +$ gs_ctl start -D data -M standby +启动完成之后查看进程可以看到启动模式为standby +``` + +如果备库我们没有使用-M启动模式,或者启动模式不是standby,则不会建立主备关系,此时我们不能简单通过关闭,重新以standby模式来恢复主备关系,只能使用build操作来重建备库。所以在主备环境下启动备库一定要注意使用standby模式启动。 diff --git "a/content/zh/post/enmo/openGauss\344\271\213\344\270\273\345\244\207\345\210\207\346\215\242.md" "b/content/zh/post/enmo/openGauss\344\271\213\344\270\273\345\244\207\345\210\207\346\215\242.md" new file mode 100644 index 0000000000000000000000000000000000000000..fdd60d4de76e9fb382eb211a6c371ad17a46b0b7 --- /dev/null +++ "b/content/zh/post/enmo/openGauss\344\271\213\344\270\273\345\244\207\345\210\207\346\215\242.md" @@ -0,0 +1,256 @@ ++++ + +title = "openGauss之主备切换" + +date = "2022-05-12" + +tags = ["openGauss之主备切换"] + +archives = "2022-05" + +author = "云和恩墨" + +summary = "openGauss之主备切换" + +img = "/zh/post/enmo/title/img6.png" + +times = "10:20" ++++ + +# openGauss之主备切换 + +本文出处:[https://www.modb.pro/db/401852](https://www.modb.pro/db/401852) + +### 1.主备切换 + +主备节点都处于正常状态,主备切换只是交换双方在数据库集簇中的角色,通过gs_ctl工具实现主备实例切换,操作步骤如下: +1)查看主备情况,以omm用户登录任意节点主机: + +``` +gs_om -t status --detail +``` + +2)以omm用户登录备节点主机OG2: +准确来说,执行下述命令的意思是将当前节点升主,其他节点降备,如果在主库上执行,本来就是主库,执行之后当然没有影响了。所以为了达到试验效果,我们在备库上执行下述命令: + +``` +gs_ctl switchover -D /data/og2 +[omm@OG2 ~]$ gs_ctl switchover -D /data/og2 +[2022-05-11 10:51:07.804][19219][][gs_ctl]: gs_ctl switchover ,datadir is /data/og2 +[2022-05-11 10:51:07.804][19219][][gs_ctl]: switchover term (1) +[2022-05-11 10:51:07.821][19219][][gs_ctl]: waiting for server to switchover........... +[2022-05-11 10:51:15.916][19219][][gs_ctl]: done +[2022-05-11 10:51:15.916][19219][][gs_ctl]: switchover completed (/data/og2) + +``` + +/data/og2为备节点的数据目录 + +3)记录主备机器信息 + +``` +gs_om -t refreshconf +[omm@OG2 ~]$ gs_om -t refreshconf +Generating dynamic configuration file for all nodes. +Successfully generated dynamic configuration file. + +``` + +4)查验 + +``` +[omm@OG2 ~]$ gs_om -t status --detail +[ Cluster State ] + +cluster_state : Normal +redistributing : No +current_az : AZ_ALL + +[ Datanode State ] + + nodenode_ip port instance state +------------------------------------------------------------------------ +1 OG1 192.168.1.100 15400 6001 /data/og1 P Standby Normal +2 OG2 192.168.1.101 15400 6002 /data/og2 S Primary Normal + +``` + +经查,备节点OG2确实切换成了主节点。 + +### 2.使用场景: + +需要进行主备切换时,例如数据库进行故障转移后需要恢复原来的主备关系,或发现数据库主机硬件异常,需要进行主备切换,保证数据库业务的持续性。 +说明: +(1)级联备机不能直接转换为主机,只能先通过switchover或者failover成为备机,然后再切换为主机 +(2)主备切换为维护操作,确保openGauss状态正常,所有业务结束后,再进行切换操作。 + +### 3.双主现象 + +#### 1)解决双主现象的基本步骤 + +如果由于网络故障、磁盘满等导致主备连接断开,出现双主现象,恢复步骤如下,否则会导致数据丢失: +(1)查看主备状态,如果都是两个数据库节点都为primary,说明出现故障 + +``` +gs_om -t status --detail +``` + +(2)以omm用户登录待降备的节点,然后关闭服务: + +``` +gs_ctl stop -D /data/og2 +``` + +(3)然后以standby模式启动服务: + +``` +gs_ctl start -D /data/og2 -M standby +``` + +(4)保存数据库主备机器信息 + +``` +gs_om -t refreshconf +``` + +(5)检查确认 + +``` +gs_om -t status --detail +``` + +#### 2)模拟双主现象 + +为了做上面的双主现象实验,在og1和og2成功进行主备切换之后,现在的状态为: +og1 备 +og2 主 +(1)为了达到双主,登录OG1主机,执行如下命令: + +``` +gs_ctl stop -D /data/og1 gs_ctl start -D /data/og1 -M primary //以primary模式启动OG1 +``` + +然后查看状态: + +``` +gs_om -t status --detail +[omm@OG1 ~]$ gs_om -t status --detail +[ Cluster State ] + +cluster_state : Unavailable +redistributing : No +current_az : AZ_ALL + +[ Datanode State ] + + nodenode_ip port instance state +------------------------------------------------------------------------ +1 OG1 192.168.1.100 15400 6001 /data/og1 P Primary Normal +2 OG2 192.168.1.101 15400 6002 /data/og2 S Primary Normal + +``` + +确实出现了双主现象。 +  当然我们可以通过failover模拟双主现象,例如:在主备正常的情况下,在备库下执行gs_ctl failover故障转移,将当前备库提升为主,此时查看数据库集簇的状态就会发现,两个节点都是主节点。当然failover故障转移是用在,当主库挂掉了,在备库上执行,将备库提升为主,继续向外提供服务,保持业务的持续稳定性。 + +#### 3)恢复 + +此时我想要将数据库集簇恢复到一主一备的状态,且og1为主,og2为备,根据恢复基本步骤,登录og2节点,进行如下操作: + +``` +gs_ctl stop -D /data/og2 gs_ctl start -D /data/og2 -M standby gs_om -t refreshconf +``` + +操作完毕之后,查看数据库集簇状态: + +``` +[omm@OG2 ~]$ gs_om -t status --detail +[ Cluster State ] + +cluster_state : Degraded +redistributing : No +current_az : AZ_ALL + +[ Datanode State ] + + nodenode_ip port instance state +------------------------------------------------------------------------ +1 OG1 192.168.1.100 15400 6001 /data/og1 P Primary Normal +2 OG2 192.168.1.101 15400 6002 /data/og2 S Standby Need repair(WAL) + +``` + +发现一主一备的关系恢复了,但是OG2出现故障需要恢复,可能是主备不一致导致的问题。 + +#### 4)修复 Standby Need repair + +方法一:重新安装openGauss: +卸载: +(1)以omm用户登录任意主机执行: +gs_uninstall --delete-data +(2)安装openGauss +gs_install -X /opt/software/openGauss/cluster_config.xml --gsinit-parameter="– +locale=en_US.utf8" +其中要求输入数据库密码。 +这种方法你估计不喜欢。 + +方法二: +概述:备份og2节点数据目录下的配置文件,将og2节点的数据库数据目录清空,然后将og1节点的数据目录下的文件拷贝过来,使用备份的配置文件替换拷贝过来的配置文件。 +(0)关闭数据库集簇 + +``` +gs_om -t stop +``` + +(1)备份og2节点数据目录下的配置文件 + +``` +[root@OG2 ~]# mkdir /og2conf [root@OG2 ~]# chown omm.dbgrp /og2conf [root@OG2 ~]# su - omm Last login: Wed May 11 09:49:38 CST 2022 on pts/0 [omm@OG2 ~]$ cp /data/og2/*.conf /og2conf +``` + +(2)清空og2节点的数据目录 + +``` +[omm@OG2 ~]$ rm -rf /data/og2/* +``` + +(3)登录og1,将og1节点的数据目录下的所有文件拷贝到og2节点的数据目录下: + +``` +[omm@OG1 ~]$ scp -rp /data/og1/* og2:/data/og2 +``` + +(4)登录og2,替换配置文件 + +``` +[omm@OG2 ~]$ cp /og2conf/* /data/og2 +``` + +(5)启动数据库集簇,并检查集簇状态 + +``` +[omm@OG1 ~]$ gs_om -t start +Starting cluster. +========================================= +[SUCCESS] OG1 +2022-05-11 11:23:30.967 627b2c32.1 [unknown] 140707753452480 [unknown] 0 dn_6001_6002 01000 0 [BACKEND] WARNING: Failed to initialize the memory protect for g_instance.attr.attr_storage.cstore_buffers (1024 Mbytes) or shared memory (2462 Mbytes) is larger. +[SUCCESS] OG2 +2022-05-11 11:23:34.410 627b2c36.1 [unknown] 140529291215808 [unknown] 0 dn_6001_6002 01000 0 [BACKEND] WARNING: Failed to initialize the memory protect for g_instance.attr.attr_storage.cstore_buffers (1024 Mbytes) or shared memory (2462 Mbytes) is larger. +Waiting for check cluster state... +========================================= +Successfully started. +[omm@OG1 ~]$ gs_om -t status --detail +[ Cluster State ] + +cluster_state : Normal +redistributing : No +current_az : AZ_ALL + +[ Datanode State ] + + nodenode_ip port instance state +------------------------------------------------------------------------ +1 OG1 192.168.1.100 15400 6001 /data/og1 P Primary Normal +2 OG2 192.168.1.101 15400 6002 /data/og2 S Standby Normal +可以发现,Standby Need repair故障就解决了。 +``` diff --git "a/content/zh/post/enmo/openGauss\344\271\213\346\225\260\346\215\256\345\272\223\345\257\271\350\261\241\347\256\200\345\215\225\347\256\241\347\220\206.md" "b/content/zh/post/enmo/openGauss\344\271\213\346\225\260\346\215\256\345\272\223\345\257\271\350\261\241\347\256\200\345\215\225\347\256\241\347\220\206.md" new file mode 100644 index 0000000000000000000000000000000000000000..7fd4b8fda5dd0c5f54a711985bcc54a3ec863290 --- /dev/null +++ "b/content/zh/post/enmo/openGauss\344\271\213\346\225\260\346\215\256\345\272\223\345\257\271\350\261\241\347\256\200\345\215\225\347\256\241\347\220\206.md" @@ -0,0 +1,392 @@ ++++ + +title = "openGauss之数据库对象简单管理" + +date = "2022-05-12" + +tags = ["openGauss之数据库对象简单管理"] + +archives = "2022-05" + +author = "云和恩墨" + +summary = "openGauss之数据库对象简单管理" + +img = "/zh/post/enmo/title/img6.png" + +times = "10:20" ++++ + +# openGauss之数据库对象简单管理 + +本文出处:[https://www.modb.pro/db/401060](https://www.modb.pro/db/401060) + +### 1.数据库管理 + +#### (1)创建数据库 + +基本语法:create database database_name 选项; + +``` +//简单创建一个库,默认会通过复制标准系统数据库template0来创建 +create database dbtest; +//创建自定义数据库 +create database dbtest1 encoding 'utf-8' template template0 owner test; +选项说明: +encoding 指定数据库的编码格式 +template 指定从哪个标准数据库复制 +owner 指定数据库的拥有者 +``` + +#### (2)查看数据库 + +``` +//简单创建一个库,默认会通过复制标准系统数据库template0来创建 +create database dbtest; +//创建自定义数据库 +create database dbtest1 encoding 'utf-8' template template0 owner test; +选项说明: +encoding 指定数据库的编码格式 +template 指定从哪个标准数据库复制 +owner 指定数据库的拥有者 +``` + +#### (3)修改数据库 + +``` +//修改数据库名字 +alter database dbtest rename to dbtest2; +//修改数据库的所有者 +alter database dbtest2 owner to test1; +//修改数据库的连接数限制 +alter database dbtest1 connection limit 10; +``` + +#### (4)删除数据库 + +``` +drop database dbtest1; +drop database dbtest2; +``` + +更多信息详见官网:[https://docs.mogdb.io/zh/mogdb/v2.1/overview](https://docs.mogdb.io/zh/mogdb/v2.1/overview) + +### 2.表管理 + +#### (1)创建表 + +基本语法:create table 表名(字段名 字段类型 字段约束,…) + +``` +//默认创建的是行存表,如果不指定模式,会在search_path中的第一个模式下创建 +create table student(id int, name varchar(20)); + +//通过like子句快速创建表,并指定模式 +create table test.student1 (like student); + +//gsql客户端查看表结构 +\d+ student + +//查看表的定义 +select pg_get_tabledef('student2'); + +//查看表信息 +select * from pg_tables where tablename='student2'; +``` + +#### (2)修改表 + +``` +//修改表名 +alter table student rename to student2; +//增加字段 +alter table student2 add age int; +//修改字段类型 +alter table student2 alter column name type varchar(10); +alter table student2 modify (name varchar(15)); +//删除字段 +alter table student2 drop column age; +``` + +#### (3)向表中插入数据 + +``` +//插入一条记录 +insert into student2 values (1,'张三'); +//向表中插入多条记录 +insert into student2 values (2,'李四'),(3,'王五'); +//通过select子句向表中插入数据 +insert into student1 select * from student2; +``` + +#### (4)删除表中的数据 + +``` +//删除满足指定条件的记录 +delete from student1 where name = '张三'; + +//删除表中所有数据 +delete from student1; +或 +truncate table student2;(推荐使用,对于大表速度明显快) + +delete和truncate区别: +delete:会进行表扫描,每删除一行,就会在事务日志中添加一条记录,删除内容,不删除定义,不释放空间,所以当表执行了大量的delete操作之后,记得执行vacuum进行垃圾回收(vacuum 表名)。 +Truncate:不会进行表扫描,删除内容,释放空间,不删除定义。 +``` + +#### (5)修改表中的数据 + +``` +//将name为张三的记录的name字段改为张三1 +update student2 set name = '张三1' where name = '张三'; +``` + +#### (6)删除表 + +``` +drop table student1; +``` + +### 3.索引管理 + +#### (1)创建索引 + +``` +//创建普通索引 +create index stu_idx1 on student2(id); + +//创建唯一索引 +create unique index stu_unq_indx1 on student2(name); + +//查看索引信息 +select * from pg_indexes where indexname='stu_idx1'; +``` + +#### (2)修改索引 + +``` +//重命名索引 +alter index stu_unq_indx1 rename to stu_unq; +//设置索引不可用 +alter index stu_unq unusable; +//重建索引 +alter index stu_unq rebuild; +``` + +#### (3)删除索引 + +``` +drop index stu_unq; +``` + +### 4.视图管理 + +#### (1)创建视图 + +``` +//创建student2表中id小于2的视图 +create view stu_view as select * from student2 where id <= 2; +``` + +#### (2)查看视图 + +``` +select * from stu_view; +//通过系统表pg_views查看视图 +select * from pg_views where viewname='stu_view'; +``` + +#### (3)删除视图 + +``` +drop view stu_view; +``` + +### 5.存储过程管理 + +#### (1)创建存储过程 + +``` +//创建一个指定id然后返回年龄的存储过程(在gsql中执行) +alter table student2 add age int default 18; +insert into student2 values(4,'小明',20); +reate or replace procedure get_age(i in out int) +as +begin +select age into i from student2 where id = i; +end; +/ + +//通过navicat查看刚刚创建的存储过程,定义如下: +CREATE OR REPLACE FUNCTION "test"."get_age"(INOUT "i" int4) + RETURNS "pg_catalog"."int4" AS $BODY$ DECLARE +begin +select age into i from student2 where id = i; +end $BODY$ + LANGUAGE plpgsql VOLATILE + COST 100 +//可以发现存储过程和函数是一样的 + +//查看存储过程信息 +select * from pg_proc where proname='get_age'; +``` + +#### (2)调用存储过程 + +``` +call get_age(4); +select * from get_age(4); +``` + +#### (3)删除存储过程 + +``` +drop procedure get_age; +``` + +### 6.用户管理 + +#### (1)创建用户 + +``` +//简单创建一个用户 +create user u1 password 'u1@12345'; +//创建具有管理员权限的用户 +create user u2 sysadmin identified by 'u2@12345'; + +//创建用户u3,要求u3用户第一次登录就要改密码 +create user u3 password 'u3@12345' expired; + +//通过系统视图查看用户信息 +select * from pg_user where usename='test'; +``` + +#### (2)修改用户 + +``` +//修改u1用户的密码为‘u1@23456’ +alter user u1 identified by 'u1@23456' replace 'u1@12345'; +//为u1用户追加createrole权限 +alter user u1 createrole; +//锁定u1用户 +alter user u1 account lock; +//解锁u1用户 +alter user u2 account unlock; +``` + +#### (3)删除用户 + +``` +drop user u1; +drop user u2; +drop user u3; +``` + +### 7.表空间管理 + +#### (1)创建表空间 + +``` +//创建一个表空间,通过relative关键字,会相对于数据库节点数据目录下的pg_location目录创建响应的目录 +create tablespace test_nsp relative location 'test'; + +//创建表空间并指定所有者 +create tablespace test_nsp1 location '/mogdb/data/pg_test'; + +//查看表空间信息 +select * from pg_tablespace a,pg_user b where a.spcowner = b.usesysid and a.spcname = 'test_nsp'; +``` + +#### (2)修改表空间 + +``` +//修改表空间的所有者 +alter tablespace test_nsp owner to test1; + +//修改表空间名字 +alter tablespace test_nsp rename to test_nsp_new; +``` + +#### (3)删除表空间 + +``` +drop tablespace test_nsp1; +drop tablespace test_nsp_new; +``` + +### 8.权限管理 + +#### (1)授权 + +``` +//创建一个测试用户 +create user jack password 'jack@123'; +//将对表的insert、select权限授予用户jack之前,需要将表student2所在模式的usage权限授予test +grant usage on schema test to jack; +grant insert,select on table test.student2 to jack; + +//将对表student2的age字段update权限授予jack用户 +grant update(age) on table test.student2 to jack; + +//将系统权限授予jack用户 +openGauss=> grant all privileges to jack; +ALTER ROLE +//发现授权之后的结果是alter role,说明对于系统权限的授予我们可以使用alter //user进行。 +//系统权限有:SYSADMIN、CREATEDB、CREATEROLE、AUDITADMIN、MONADMIN、OPRADMIN、POLADMIN和LOGIN。 + +//将createrole权限授予jack用户 +alter user jack createrole; +``` + +#### (2)查看权限 + +``` +//1.查看某用户的系统权限 +select * from pg_roles where rolname = 'jack'; +//2.查看某用户的表权限 +select * from information_schema.table_privileges where grantee = 'jack'; +//3.查看某用户的usage权限 +select * from information_schema.usage_privileges where grantee = 'jack'; +//4.查看某个用户在表的列上的权限 +select * from information_schema.column_privileges where grantee = 'jack'; +``` + +#### (3)撤销权限 + +``` +//撤销上述授予的权限 +revoke select,insert on table test.student2 from jack; +revoke update(age) on table test.student2 from jack; +revoke all privileges from jack; +revoke usage on schema test from jack; +``` + +### 9.模式管理 + +#### (1)创建模式 + +``` +//简单的创建一个模式 +create schema test_sch1; +//新建一个角色,然后创建一个与该角色同名的模式,并在该模式下创建一张表 +create role test2 identified by 'test@123'; +create schema authorization test2 create table student3(id int,name varchar(15)); + +//通过information_schema模式下的视图schemata查看模式 +select * from information_schema.schemata where schema_name='test_sch1'; +``` + +#### (2)修改模式 + +``` +//修改模式名 alter schema test_sch1 rename to test_sch2; //修改模式的所有者 alter schema test2 owner to test; +``` + +### (3)删除模式 + +``` +drop schema test_sch2; +//当模式下有对象时,需要使用cascade进行删除 +drop schema test2 cascade; +``` diff --git "a/content/zh/post/enmo/openGauss\346\225\260\346\215\256\345\272\223\347\232\204\344\272\213\345\212\241\351\224\201.md" "b/content/zh/post/enmo/openGauss\346\225\260\346\215\256\345\272\223\347\232\204\344\272\213\345\212\241\351\224\201.md" new file mode 100644 index 0000000000000000000000000000000000000000..1a251fd122bbc288106965800d37d7982da179be --- /dev/null +++ "b/content/zh/post/enmo/openGauss\346\225\260\346\215\256\345\272\223\347\232\204\344\272\213\345\212\241\351\224\201.md" @@ -0,0 +1,370 @@ ++++ + +title = "openGauss数据库的事务锁" + +date = "2022-04-07" + +tags = ["openGauss数据库的事务锁"] + +archives = "2022-04" + +author = "云和恩墨交付战队" + +summary = "openGauss数据库的事务锁" + +img = "/zh/post/enmo/title/img6.png" + +times = "10:20" + ++++ + +# openGauss数据库的事务锁 + +openGauss数据库事务锁分为两类:表级锁和行级锁 + +## 表级锁 + +表级锁有8种模式: + +**ACCESS SHARE** + +只与ACCESS EXCLUSIVE冲突。 + +SELECT命令在被引用的表上请求一个这种锁。通常,任何只读取表而不修改它的命令都请求这种锁模式。 + +**ROW SHARE** + +与EXCLUSIVE和ACCESS EXCLUSIVE锁模式冲突。 + +SELECT FOR UPDATE和SELECT FOR SHARE命令会自动在目标表上请求ROW SHARE锁(且所有被引用但不是FOR SHARE/FOR UPDATE的其他表上,还会自动加上ACCESS SHARE锁)。 + +**ROW EXCLUSIVE** + +与ROW SHARE锁相同,ROW EXCLUSIVE允许并发读取表,但是禁止修改表中数据。UPDATE,DELETE,INSERT命令会自动在目标表上请求这个锁(且所有被引用的其他表上还会自动加上的ACCESS SHARE锁)。通常情况下,所有会修改表数据的命令都会请求表的ROW EXCLUSIVE锁。 + +**SHARE UPDATE EXCLUSIVE** + +这个模式保护一个表的模式不被并发修改,以及禁止在目标表上执行垃圾回收命令(VACUUM )。 + +VACUUM(不带FULL选项),ANALYZE,CREATE INDEX CONCURRENTLY命令会自动请求这样的锁。 + +**SHARE** + +SHARE锁允许并发的查询,但是禁止对表进行修改。 + +CREATE INDEX(不带CONCURRENTLY选项)语句会自动请求这种锁。 + +**SHARE ROW EXCLUSIVE** + +SHARE ROW EXCLUSIVE锁禁止对表进行任何的并发修改,而且是独占锁,因此一个会话中只能获取一次。 + +任何SQL语句都不会自动请求这个锁模式。 + +**EXCLUSIVE** + +EXCLUSIVE锁允许对目标表进行并发查询,但是禁止任何其他操作。 + +这个模式只允许并发加ACCESS SHARE锁,也就是说,只有对表的读动作可以和持有这个锁模式的事务并发执行。 + +任何SQL语句都不会在用户表上自动请求这个锁模式。然而在某些操作的时候,会在某些系统表上请求它。 + +**ACCESS EXCLUSIVE** + +这个模式保证其所有者(事务)是可以访问该表的唯一事务。 + +ALTER TABLE,DROP TABLE,TRUNCATE,REINDEX命令会自动请求这种锁。 + +在LOCK TABLE命令没有明确声明需要的锁模式时,它是缺省锁模式。 + +**手动获取行级锁** + +可以在事务块内部执行lock table命令获取指定的表级锁。 + +LOCK [ TABLE ] {[ ONLY ] name [, …]| {name [ * ]} [, …]} +[ IN {ACCESS SHARE | ROW SHARE | ROW EXCLUSIVE | SHARE UPDATE EXCLUSIVE | SHARE | SHARE ROW EXCLUSIVE | EXCLUSIVE | ACCESS EXCLUSIVE} MODE ] +[ NOWAIT ]; + +```sql +postgres=# begin; +BEGIN +postgres=# lock table stu ; +LOCK TABLE +postgres=# select pg_backend_pid(); + pg_backend_pid +----------------- + 140645423245056 +(1 row) + +postgres=# select * from pg_locks where pid ='140645423245056'; + locktype | database | relation | page | tuple | bucket | virtualxid | transactionid | classid | objid | objsubid | virtualtransaction | pid | sessionid | mode + | granted | fastpath | locktag +---------------+----------+----------+------+-------+--------+------------+---------------+---------+-------+----------+--------------------+-----------------+-----------------+---------------- +-----+---------+----------+------------------- + virtualxid | | | | | | 3/6288 | | | | | 3/6288 | 140645423245056 | 140645423245056 | ExclusiveLock + | t | t | 3:1890:0:0:0:7 + relation | 15103 | 16385 | | | | | | | | | 3/6288 | 140645423245056 | 140645423245056 | AccessExclusive +Lock | t | f | 3aff:4001:0:0:0:0 + transactionid | | | | | | | 73662 | | | | 3/6288 | 140645423245056 | 140645423245056 | ExclusiveLock + | t | f | 11fbe:0:0:0:0:6 +(3 rows) + + +``` + +## 行级锁 + +行级锁有2种模式: + +**FOR UPDATE** + +FOR UPDATE会导致由SELECT语句检索到的行被锁定。这样避免它们在当前事务结束前被其他事务修改或者删除,即其他企图UPDATE、 DELETE、 SELECT FOR UPDATE这些行的事务将被阻塞,直到当前事务结束。 + +**FOR SHARE** + +在每个检索出来的行上要求一个共享锁,而不是一个排他锁。一个共享锁阻塞其它事务执行UPDATE、DELETE、SELECT,不阻塞SELECT FOR SHARE。 + +**手动获取表级锁** + +可以使用select…for update|share命令获取行级锁。 + +```sql +postgres=# begin; +BEGIN +postgres=# select *from stu for update; + id | name +----+------ + 1 | a + 2 | a + 3 | a + 4 | a +(4 rows) + +postgres=# select pg_backend_pid(); + pg_backend_pid +----------------- + 140645423245056 +(1 row) + +postgres=# +postgres=# select * from pg_locks where pid ='140645423245056'; + locktype | database | relation | page | tuple | bucket | virtualxid | transactionid | classid | objid | objsubid | virtualtransaction | pid | sessionid | mode | + granted | fastpath | locktag +---------------+----------+----------+------+-------+--------+------------+---------------+---------+-------+----------+--------------------+-----------------+-----------------+---------------+ +---------+----------+------------------- + relation | 15103 | 16385 | | | | | | | | | 3/6292 | 140645423245056 | 140645423245056 | RowShareLock | + t | t | 3aff:4001:0:0:0:0 + virtualxid | | | | | | 3/6292 | | | | | 3/6292 | 140645423245056 | 140645423245056 | ExclusiveLock | + t | t | 3:1894:0:0:0:7 + transactionid | | | | | | | 73665 | | | | 3/6292 | 140645423245056 | 140645423245056 | ExclusiveLock | + t | f | 11fc1:0:0:0:0:6 +(3 rows) + +``` + +**查询数据库中锁阻塞信息** + +查询视图pg_locks可以获取数据库中事务锁阻塞信息 + +select * from pg_locks; + +```sql +postgres=# select * from pg_locks; + locktype | database | relation | page | tuple | bucket | virtualxid | transactionid | classid | objid | objsubid | virtualtransaction | pid | sessionid | mode + | granted | fastpath | locktag +---------------+----------+----------+------+-------+--------+------------+---------------+---------+-------+----------+--------------------+-----------------+-----------------+---------------- +-----+---------+----------+------------------- +virtualxid | | | | | | 9/48 | | | | | 9/48 | 140645423245056 | 140645423245056 | ExclusiveLock + | t | t | 9:30:0:0:0:7 +virtualxid | | | | | | 3/6309 | | | | | 3/6309 | 140645440026368 | 140645440026368 | ExclusiveLock + | t | t | 3:18a5:0:0:0:7 +relation | 15103 | 11835 | | | | | | | | | 8/19332 | 140645607831296 | 140645607831296 | AccessShareLock + | t | t | 3aff:2e3b:0:0:0:0 +virtualxid | | | | | | 8/19332 | | | | | 8/19332 | 140645607831296 | 140645607831296 | ExclusiveLock + | t | t | 8:4b84:0:0:0:7 +relation | 15103 | 16385 | | | | | | | | | 9/48 | 140645423245056 | 140645423245056 | AccessExclusive +Lock | t | f | 3aff:4001:0:0:0:0 +relation | 15103 | 16385 | | | | | | | | | 3/6309 | 140645440026368 | 140645440026368 | AccessExclusive +Lock | f | f | 3aff:4001:0:0:0:0 +transactionid | | | | | | | 73670 | | | | 9/48 | 140645423245056 | 140645423245056 | ExclusiveLock + | t | f | 11fc6:0:0:0:0:6 +transactionid | | | | | | | 73671 | | | | 3/6309 | 140645440026368 | 140645440026368 | ExclusiveLock + | t | f | 11fc7:0:0:0:0:6 +(8 rows) + +postgres=# select locktag_decode('11fc7:0:0:0:0:6'); + locktag_decode +--------------------------------------------- +locktype:transactionid, transactionid:73671 +(1 row) + +``` + +其中各列的描述如下: + +locktype:被锁定对象的类型,共12种。 + +| Lock 等待事件 | 描述 | +| ---------------- | --------------------- | +| relation | 对表加锁。 | +| extend | 对表扩展空间时加锁。 | +| partition | 对分区表加锁。 | +| partition_seq | 对分区表的分区加锁。 | +| page | 对表页面加锁。 | +| tuple | 对页面上的tuple加锁。 | +| transactionid | 对事务ID加锁。 | +| virtualxid | 对虚拟事务ID加锁。 | +| object | 加对象锁。 | +| cstore_freespace | 对列存空闲空间加锁。 | +| userlock | 加用户锁。 | +| advisory | 加advisory锁。 | + +database:被锁定对象所在数据库的OID。 + +relation:被锁定对象的OID。 + +page:被锁定对象的页面编号。 + +tuple:被锁定对象的页面里边的行编号。 + +bucket:子表对应的bucket number。 + +virtualxid:事务的虚拟ID。 + +transactionid:事务的ID。 + +classid:包含该被锁定对象的系统表的OID。 + +objid:被锁定对象在其系统表内的OID。 + +objsubid:对于表的一个字段,这是字段编号;对于其他对象类型,这个字段是零。 + +virtualtransaction:持有此锁或者在等待此锁的事务的虚拟ID。 + +pid:持有或者等待这个锁的服务器线程的逻辑ID。 + +sessionid:持有或者等待这个锁的会话ID。 + +mode:这个线程持有的或者是期望的锁模式。 + +granted: 如果锁是持有锁,则为TRUE。如果锁是等待锁,则为FALSE。 + +fastpath:如果通过fast-path获得锁,则为TRUE;如果通过主要的锁表获得,则为FALSE。 + +locktag:会话等待锁信息,可通过locktag_decode()函数解析。 + +**阻塞会话查询** + +```sql +with lock as (select usename,granted,locktag,query_start,query,l.pid +from pg_locks l,pg_stat_activity a +where l.pid=a.pid and locktag in(select locktag from pg_locks where granted=‘f’)) +select locker.usename locker_user,locker.query_start locker_query_start,locker.granted locker_granted,locker.query locker_query,locker.pid locker_pid,locked.pid locked_pid,locked.query locked_query,locked.query_start locked_query_start,locked.granted locked_granted,locked.usename locked_user,extract(epoch from now() - locked.query_start) as locked_times +from (select * from lock where granted=‘t’) as locker,(select * from lock where granted=‘f’) locked +where locker.locktag=locked.locktag +order by 1; +``` + + + +```sql +postgres=# with lock as (select usename,granted,locktag,query_start,query,l.pid +postgres(# from pg_locks l,pg_stat_activity a +postgres(# where l.pid=a.pid and locktag in(select locktag from pg_locks where granted='f')) +postgres-# select locker.usename locker_user,locker.query_start locker_query_start,locker.granted locker_granted,locker.query locker_query,locker.pid locker_pid,locked.pid locked_pid,locked.query locked_query,locked.query_start locked_query_start,locked.granted locked_granted,locked.usename locked_user,extract(epoch from now() - locked.query_start) as locked_times +postgres-# from (select * from lock where granted='t') as locker,(select * from lock where granted='f') locked +postgres-# where locker.locktag=locked.locktag +postgres-# order by 1; + locker_user | locker_query_start | locker_granted | locker_query | locker_pid | locked_pid | locked_query | l +ocked_query_start | locked_granted | locked_user | locked_times +-------------+------------------------------+----------------+-------------------------------------------+-----------------+-----------------+-------------------------------------------+------- +------------------------+----------------+-------------+-------------- + omm | 2021-12-13 16:13:04.75289+08 | t | lock table stu in ACCESS EXCLUSIVE mode; | 140645423245056 | 140645440026368 | lock table stu in ACCESS EXCLUSIVE mode; | 2021-1 +2-13 16:13:09.585616+08 | f | omm | 143.055227 +(1 row) + +``` + +**手动kill掉锁阻塞会话** + +```sql +postgres=# select pg_terminate_backend(140645423245056); + pg_terminate_backend +---------------------- + t +(1 row) +``` + +**锁等待超时** + +当申请锁的操作等待时间超过数据库lockwait_timeout参数的设定值时,系统会报错。lockwait_timeout参数默认20分钟。 + +当更新操作等待时间操作数据库update_lockwait_timeout参数的设定值时,系统会报错。update_lockwait_timeout参数默认2分钟。 + +**死锁检测** + +openGauss数据库能够自动侦测到死锁,然后退出其中一个事务,从而允许其他事物执行。 + +当申请锁的等待时间超过数据库参数deadlock_timeout的设定值时,系统会检查是否产生了死锁。deadlock_timeout参数默认值为1s。 + +**测试** + +测试前修改数据库参数log_lock_waits参数值为on,控制当一个会话为获得一个锁等到超过deadlock_timeout时,产生一个日志消息。默认是off不记录。 + +测试1:会话2执行锁表操作时,发生锁等待,等待时间达到1S时,系统进行死锁检测。锁等待时间达到lockwait_timeout设置的20min时,会话2执行的锁表操作报错退出,提示锁等待超时。 + +```sql +#会话1: +postgres=# begin; +BEGIN +postgres=# lock table stu in ACCESS EXCLUSIVE mode; +LOCK TABLE +postgres=# select now(); + now +------------------------------- + 2021-12-13 15:25:23.535138+08 +(1 row) +#会话2: +postgres=# begin; +BEGIN +postgres=# lock table stu in ACCESS EXCLUSIVE mode; +ERROR: Lock wait timeout: thread 140645440026368 on node dn_6001 waiting for AccessExclusiveLock on relation 16385 of database 15103 after 1199000.033 ms +DETAIL: blocked by hold lock thread 140645423245056, statement , hold lockmode AccessExclusiveLock. +2021-12-13 15:46:40.395 61b6f5ab.5064 postgres 140645440026368 gsql 73671 dn_6001 YY003 2533274790414298 [BACKEND] STATEMENT: lock table stu in ACCESS EXCLUSIVE mode; + +``` + +测试2:更新操作,锁等待时间达到update_lockwait_timeout设置的2min时,会话2执行的update操作报错退出,提示锁等待超时。 + +```sql +#会话1: +postgres=# begin; +BEGIN +postgres=# select *from stu for update; + id | name +----+------ + 1 | a + 2 | a + 3 | a + 4 | a +(4 rows) + +#会话2: +postgres=# begin; +BEGIN +postgres=# update stu set name = 'b' where id=2; +ERROR: Lock wait timeout: thread 140645607831296 on node dn_6001 waiting for ShareLock on transaction 73665 after 119000.041 ms +DETAIL: blocked by hold lock thread 140645423245056, statement , hold lockmode ExclusiveLock. +2021-12-13 14:56:24.802 61b6ee19.5067 postgres 140645607831296 gsql 73666 dn_6001 YY003 2533274790414263 [BACKEND] STATEMENT: update stu set name = 'b' where id=2; +``` diff --git "a/content/zh/post/enmo/openGauss\346\225\260\346\215\256\345\272\223\347\273\264\346\212\244\347\233\270\345\205\263\345\221\275\344\273\244.md" "b/content/zh/post/enmo/openGauss\346\225\260\346\215\256\345\272\223\347\273\264\346\212\244\347\233\270\345\205\263\345\221\275\344\273\244.md" new file mode 100644 index 0000000000000000000000000000000000000000..3a708b9fc557cffc61b08b08bbe514b214dd5b7b --- /dev/null +++ "b/content/zh/post/enmo/openGauss\346\225\260\346\215\256\345\272\223\347\273\264\346\212\244\347\233\270\345\205\263\345\221\275\344\273\244.md" @@ -0,0 +1,122 @@ ++++ + +title = "openGauss数据库维护相关命令" + +date = "2022-04-07" + +tags = ["openGauss数据库维护相关命令"] + +archives = "2022-04" + +author = "云和恩墨交付战队" + +summary = "openGauss数据库维护相关命令" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" + ++++ + +# openGauss数据库维护常用命令 + +### 查看数据库版本: + +select version(); + +### 查看数据库启动时间: + +select pg_postmaster_start_time(); + +### 查看最后加载配置文件的时间: + +select pg_conf_load_time(); + +### 查看数据库时区: + +show timezone; + +### 查看数据库当前时间: + +select now(); + +### 查看当前会话pid: + +select pg_backend_pid(); + +### 查看当前数据库会话信息: + +select pid,datname,usename,application_name,client_addr,client_port,state,now()-query_start query_duration,query,waiting from pg_stat_activity; + +### 终止一个后台服务进程: + +select pg_terminate_backend(pid); + +### 查看当前的WAL日志相关信息: + +select pg_current_xlog_location(),pg_xlogfile_name(pg_current_xlog_location()),pg_xlogfile_name_offset(pg_current_xlog_location()); + +其中: +pg_current_xlog_location():获得当前wal日志写入位置。 +pg_xlogfile_name():转换wal日志位置为文件名。 +pg_xlogfile_name_offset():返回转换后的wal日志文件名和偏移量。 + +### 查看WAL日志缓存有多少字节未写入磁盘: + +select pg_xlog_location_diff(pg_current_xlog_insert_location(),pg_current_xlog_location()); + +其中: +pg_current_xlog_location():获得当前预写式日志写入位置 +pg_current_xlog_insert_location():获得当前预写式日志插入位置 +pg_xlog_location_diff(location pg_lsn, location pg_lsn)计算两个预写式日志位置间的差别 + +### 切换WAL日志文件: + +select pg_switch_xlog(); + +### 手动产生一次checkpoint: + +checkpoint; + +### 切换数据库日志文件: + +select pg_rotate_logfile(); + +### 停止数据库备份: + +select pg_stop_backup(); + +### 查看数据库是否为备库: + +select pg_is_in_recovery(); + +### 查看数据库大小: + +select pg_size_pretty(pg_database_size(‘database_name’)); + +其中: +pg_size_pretty()函数,可以根据情况将字节转换为KB、MB、GB 或者 TB。 + +### 查看表数据大小: + +select pg_size_pretty(pg_relation_size(‘table_name’)); + +### 查看表数据和表上索引的总大小: + +select pg_size_pretty(pg_total_relation_size(‘table_name’)); + +### 查看表上所有索引的大小: + +select pg_size_pretty(pg_indexes_size(‘table_name’)); + +### 查看表空间大小: + +select pg_size_pretty(pg_tablespace_size(‘tablespace_name’)); + +### 查看表所在数据文件: + +select pg_relation_filepath(‘table_name’); + + + +(未完待续) diff --git "a/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\344\271\213\345\256\232\344\271\211\346\225\260\346\215\256\347\261\273\345\236\213.md" "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\344\271\213\345\256\232\344\271\211\346\225\260\346\215\256\347\261\273\345\236\213.md" new file mode 100644 index 0000000000000000000000000000000000000000..8d704d02a9b80e14d07f400f51185d5306ea383d --- /dev/null +++ "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\344\271\213\345\256\232\344\271\211\346\225\260\346\215\256\347\261\273\345\236\213.md" @@ -0,0 +1,105 @@ ++++ + +title = "openGauss每日一练之定义数据类型" + +date = "2022-04-21" + +tags = ["openGauss每日一练之定义数据类型"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "openGauss每日一练之定义数据类型" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# openGauss每日一练之定义数据类型 + +本文出处:[https://www.modb.pro/db/222625](https://www.modb.pro/db/222625) + +## 学习地址 + +[https://www.modb.pro/course/133](https://www.modb.pro/course/133) + +## 学习目标 + +**学习openGauss定义数据类型** + +## 课后作业 + +**1.创建一个复合类型,重命名复合类型,为复合类型增加属性、删除属性** + +```sql +--SQL文本 +create type comtype as (id integer,name char(10)); +alter type comtype rename to comtype_new; +alter type comtype_new add attribute age integer; +alter type comtype_new drop attribute name; + +omm=# create type comtype as (id integer,name char(10)); +CREATE TYPE +omm=# alter type comtype rename to comtype_new; +ALTER TYPE +omm=# alter type comtype_new add attribute age integer; +ALTER TYPE +omm=# alter type comtype_new drop attribute name; +ALTER TYPE +omm=# + +``` + +**2.创建一个枚举类型,新增标签值,重命名标签值** + +```sql +--SQL文本 create type enumtype as enum ('Monday','Tuesday','Friday'); alter type enumtype add value if not exists 'Wednesday' before 'Friday'; alter type enumtype rename value 'Friday' to 'Thursday'; select * from pg_enum; +omm=# create type comtype as (id integer,name char(10)); +CREATE TYPE +omm=# alter type comtype rename to comtype_new; +ALTER TYPE +omm=# alter type comtype_new add attribute age integer; +ALTER TYPE +omm=# alter type comtype_new drop attribute name; +ALTER TYPE +omm=# create type enumtype as enum ('Monday','Tuesday','Friday'); +CREATE TYPE +omm=# alter type enumtype add value if not exists 'Wednesday' before 'Friday'; +ALTER TYPE +omm=# alter type enumtype rename value 'Friday' to 'Thursday'; +ALTER TYPE +omm=# select * from pg_enum; + enumtypid | enumsortorder | enumlabel +-----------+---------------+----------- + 33462 | 1 | Monday + 33462 | 2 | Tuesday + 33462 | 2.5 | Wednesday + 33462 | 3 | Thursday +(4 rows) + +omm=# +``` + +**3.使用新创建的类型创建表** + +```sql +--SQL文本 +create table t_enumtype (a integer,b comtype_new,c enumtype); + +omm=# create table t_enumtype (a integer,b comtype_new,c enumtype); CREATE TABLE omm=# +``` + +**4.删除类型** + +```sql +--SQL文本 drop table t_enumtype; drop type comtype_new; drop type enumtype; +omm=# drop table t_enumtype; +DROP TABLE +omm=# drop type comtype_new; +DROP TYPE +omm=# drop type enumtype; +DROP TYPE +omm=# +``` diff --git "a/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\344\271\213\345\257\274\345\205\245\346\225\260\346\215\256.md" "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\344\271\213\345\257\274\345\205\245\346\225\260\346\215\256.md" new file mode 100644 index 0000000000000000000000000000000000000000..0aaaa15471641fafeb4e0c4ac8ac77da5a61567d --- /dev/null +++ "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\344\271\213\345\257\274\345\205\245\346\225\260\346\215\256.md" @@ -0,0 +1,134 @@ ++++ + +title = "openGauss每日一练之导入数据" + +date = "2022-04-22" + +tags = ["openGauss每日一练之导入数据"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "openGauss每日一练之导入数据" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# openGauss每日一练之导入数据 + +本文出处:[https://www.modb.pro/db/222633](https://www.modb.pro/db/222633) + +## 学习地址 + +[https://www.modb.pro/course/133](https://www.modb.pro/course/133) + +## 学习目标 + +**学习openGauss导入数据** + +## 课后作业 + +### **1.创建表1并在表中插入数据,分别指定字段和整行为缺省值** + +``` +omm=# create table emp1 ( +omm(# id integer, +omm(# name char(10), +omm(# age integer +omm(# ); +CREATE TABLE +omm=# insert into emp1 values (1,'zhao',25); +INSERT 0 1 +omm=# insert into emp1 values (2,'qian',default); +INSERT 0 1 +omm=# insert into emp1 default values ; +INSERT 0 1 +omm=# + +``` + +### **2.创建表2并将表1的数据全部导入表2中** + +``` +omm=# create table emp2 ( +omm(# id integer, +omm(# name char(10), +omm(# age integer +omm(# ); +CREATE TABLE +omm=# insert into emp2 select * from emp1; +INSERT 0 3 +omm=# + +``` + +### **3.创建表3和表4,并合并两个表的数据到表3** + +``` +omm=# create table emp3 ( +omm(# id integer, +omm(# name char(10), +omm(# age integer +omm(# ); +CREATE TABLE +omm=# create table emp4 ( +omm(# id integer, +omm(# name char(10), +omm(# age integer +omm(# ); +CREATE TABLE +omm=# insert into emp3 values +omm-# (1,'zhao',25), +omm-# (2,'qian',27), +omm-# (3,'shun',29); +INSERT 0 3 +omm=# insert into emp4 values +omm-# (1,'zhao',25), +omm-# (2,'li',26), +omm-# (4,'zhou',28); +INSERT 0 3 +omm=# merge into emp3 +omm-# using emp4 +omm-# on (emp3.id=emp4.id) +omm-# when matched then +omm-# update set emp3.name=emp4.name,emp3.age=emp4.age +omm-# when not matched then +omm-# insert values (emp4.id,emp4.name,emp4.age); +MERGE 3 +omm=# select * from emp3; + id | name | age +----+------------+----- + 3 | shun | 29 + 1 | zhao | 25 + 2 | li | 26 + 4 | zhou | 28 +(4 rows) + +omm=# + +``` + +### **4.将表3的数据输出到文件,再将文件中的数据导入到表5** + +``` +omm=# copy emp3 to '/home/omm/emp3.dat'; +COPY 4 +omm=# create table emp5 (like emp3); +CREATE TABLE +omm=# +omm=# copy emp5 from '/home/omm/emp3.dat'; +COPY 4 +omm=# select * from emp5; + id | name | age +----+------------+----- + 3 | shun | 29 + 1 | zhao | 25 + 2 | li | 26 + 4 | zhou | 28 +(4 rows) + +omm=# +``` diff --git "a/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\344\271\213\345\257\274\345\207\272\346\225\260\346\215\256.md" "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\344\271\213\345\257\274\345\207\272\346\225\260\346\215\256.md" new file mode 100644 index 0000000000000000000000000000000000000000..2653f147e49c414f6d3e3728789fed6b161d9d70 --- /dev/null +++ "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\344\271\213\345\257\274\345\207\272\346\225\260\346\215\256.md" @@ -0,0 +1,108 @@ ++++ + +title = "openGauss每日一练之导出数据" + +date = "2022-04-22" + +tags = ["openGauss每日一练之导出数据"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "openGauss每日一练之导出数据" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# openGauss每日一练之导出数据 + +本文出处:[https://www.modb.pro/db/222633](https://www.modb.pro/db/222633) + +## 学习地址 + +[https://www.modb.pro/course/133](https://www.modb.pro/course/133) + +## 学习目标 + +**学习openGauss导出数据** + +## 课后作业 + +### **1.创建数据库tpcc,在数据库tpcc中创建模式schema1,在模式schema1中建表products** + +```sql +omm=# create database tpcc; +CREATE DATABASE +omm=# \c tpcc +Non-SSL connection (SSL connection is recommended when requiring high-security) +You are now connected to database "tpcc" as user "omm". +tpcc=# create schema schema1; +CREATE SCHEMA +tpcc=# create table products +tpcc-# (id integer, +tpcc(# name char(10), +tpcc(# age integer +tpcc(# ); +CREATE TABLE +tpcc=# insert into products values +tpcc-# (1,'zhang',25), +tpcc-# (2,'qian',27), +tpcc-# (3,'shun',29), +tpcc-# (4,'li',30); +INSERT 0 4 +tpcc=# + +``` + +### **2.使用gs_dump工具以文本格式导出数据库tpcc的全量数据** + +```sql +[omm@mogdb ~]$ gs_dump -f '/home/omm/tpcc_all.sql' -F p tpcc +gs_dump[port='26000'][tpcc][2021-12-24 20:31:39]: The total objects number is 389. +gs_dump[port='26000'][tpcc][2021-12-24 20:31:39]: [100.00%] 389 objects have been dumped. +gs_dump[port='26000'][tpcc][2021-12-24 20:31:39]: dump database tpcc successfully +gs_dump[port='26000'][tpcc][2021-12-24 20:31:39]: total time: 170 ms +[omm@mogdb ~]$ + +``` + +### **3.使用gs_dump工具以文本格式导出模式schema1的定义** + +```sql +[omm@mogdb ~]$ gs_dump -f '/home/omm/schema1_define.sql' -F p -n schema1 -s +gs_dump[port='26000'][omm][2021-12-24 20:34:30]: The total objects number is 378. +gs_dump[port='26000'][omm][2021-12-24 20:34:30]: [100.00%] 378 objects have been dumped. +gs_dump[port='26000'][omm][2021-12-24 20:34:30]: dump database omm successfully +gs_dump[port='26000'][omm][2021-12-24 20:34:30]: total time: 137 ms +[omm@mogdb ~]$ + +``` + +### **4.使用gs_dump工具以文本格式导出数据库tpcc的数据,不包含定义** + +```sql +[omm@mogdb ~]$ gs_dump -f '/home/omm/tpcc_data.sql' -F p tpcc -a +gs_dump[port='26000'][tpcc][2021-12-24 20:35:33]: dump database tpcc successfully +gs_dump[port='26000'][tpcc][2021-12-24 20:35:33]: total time: 115 ms +[omm@mogdb ~]$ + +``` + +### **5.删除表、模式和数据库** + +```sql +tpcc=# drop table products; +DROP TABLE +tpcc=# drop schema schema1; +DROP SCHEMA +tpcc=# \c omm +Non-SSL connection (SSL connection is recommended when requiring high-security) +You are now connected to database "omm" as user "omm". +omm=# drop database tpcc; +DROP DATABASE +omm=# + +``` diff --git "a/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\344\270\200\345\244\251.md" "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\344\270\200\345\244\251.md" new file mode 100644 index 0000000000000000000000000000000000000000..6ad782eecbd43c7b8ea37f134887180d0a9724c6 --- /dev/null +++ "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\344\270\200\345\244\251.md" @@ -0,0 +1,157 @@ ++++ + +title = "openGauss每日一练第一天" + +date = "2022-04-19" + +tags = ["openGauss每日一练第一天"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "openGauss每日一练第一天" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# openGauss每日一练第一天 + +本文出处:[https://www.modb.pro/db/192962](https://www.modb.pro/db/192962) + +
+ +## 学习地址 + +[https://www.modb.pro/course/133](https://www.modb.pro/course/133) + +## 学习目标 + + 学习openGauss数据库创建表、插入记录、查询记录和删除表基本使用。 + +## 课后作业 + +### 1.创建一个表products + +| 字段名 | 数据类型 | 含义 | +| ------------ | -------- | -------- | +| product_id | INTEGER | 产品编号 | +| product_name | Char(30) | 产品名 | +| category | Char(20) | 种类 | + +``` +SQL文本: +create table products +(product_id integer, + product_name char(30), + category char(20) +); +``` + +### 2.向表中插入数据,采用一次插入一条和多条记录的方式 + +| product_id | product_name | category | +| ---------- | -------------- | --------- | +| 1502 | olympus camera | electrncs | +| 1601 | lamaze | toys | +| 1700 | wait interface | Books | +| 1666 | harry potter | toys | + +``` +SQL文本: +insert into products(product_id,product_name,category) values (1502,'olympus camera','electrncs'); +insert into products values (1601,'lamaze','toys'); +insert into products(product_id,product_name,category) values +(1700,'wait interface','Books'), +(1666,'harry potter','toys'); + +omm=# insert into products(product_id,product_name,category) values (1502,'olympus camera','electrncs'); +INSERT 0 1 +omm=# insert into products values (1601,'lamaze','toys'); +INSERT 0 1 +omm=# insert into products(product_id,product_name,category) values +omm-# (1700,'wait interface','Books'), +omm-# (1666,'harry potter','toys'); +INSERT 0 2 +omm=# +``` + +### 3.查询表中所有记录及记录数 + +``` +SQL文本: +查询表中所有记录: +select * from products; + +查询表中记录数: +select count(*) from products; + +查询表中所有记录: +omm=# select * from products; + product_id | product_name | category +------------+--------------------------------+---------------------- + 1502 | olympus camera | electrncs + 1601 | lamaze | toys + 1700 | wait interface | Books + 1666 | harry potter | toys +(4 rows) +omm=# + +查询表中记录数: +omm=# select count(*) from products; + count +------- + 4 +(1 row) + +omm=# +``` + +### 4.查询表中所有category记录,并将查询结果按升序排序 + +``` +SQL文本: +select category from products order by category; --利用order by进行升序排序 + +omm=# select category from products order by category; + category +---------------------- + Books + electrncs + toys + toys +(4 rows) + +omm=# +``` + +### 5.查询表中category为toys的记录 + +``` +SQL文本: +select category from products order by category; --利用order by进行升序排序 + +omm=# select category from products order by category; + category +---------------------- + Books + electrncs + toys + toys +(4 rows) + +omm=# +``` + +### 6.删除表products + +``` +SQL文本: +drop table products; + +omm=# drop table products; +DROP TABLE +omm=# +``` diff --git "a/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\344\270\203\345\244\251.md" "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\344\270\203\345\244\251.md" new file mode 100644 index 0000000000000000000000000000000000000000..baf9d996ff2b22377b5e3cd552fa65d804e28fc3 --- /dev/null +++ "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\344\270\203\345\244\251.md" @@ -0,0 +1,138 @@ ++++ + +title = "openGauss每日一练第7天" + +date = "2022-04-20" + +tags = ["openGauss每日一练第7天"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "openGauss每日一练第7天" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# openGauss每日一练第7天 + +本文出处:[https://www.modb.pro/db/193181](https://www.modb.pro/db/193181) + +## 学习地址 + +[https://www.modb.pro/course/133](https://www.modb.pro/course/133) + +## 学习目标 + +学习openGauss表空间 + +表空间用于管理数据对象,与磁盘上的一个目录对应 + +## 课后作业 + +### **1.创建表空间,表空间tspc1使用相对路径指定所在目录,表空间tspc2指定owner为Lucy** + +```sql +SQL文本: +create tablespace tspc1 relative location 'tbs/tspc1'; +create user lucy password 'lucy_123'; +alter tablespace tspc1 owner to lucy; +\db+ + +omm=# create tablespace tspc1 relative location 'tbs/tspc1'; +CREATE TABLESPACE +omm=# create user lucy password 'lucy_123'; +CREATE ROLE +omm=# alter tablespace tspc1 owner to lucy; +ALTER TABLESPACE +omm=# \db+ + List of tablespaces + Name | Owner | Location | Access privileges | Description +------------+-------+-----------+-------------------+------------- + pg_default | omm | | | + pg_global | omm | | | + tspc1 | lucy | tbs/tspc1 | | +(3 rows) + +omm=# + +``` + +### **2.在表空间tspc1中建表,并使用视图pg_tables查看信息** + +```sql +SQL文本: +create table customer +( c_customer_sk integer, + c_customer_id char(5), + c_first_name char(6), + c_last_name char(8) +) +tablespace tspc1; +select * from pg_tables where tablename = 'customer'; + +omm=# create table customer +omm-# ( c_customer_sk integer, +omm(# c_customer_id char(5), +omm(# c_first_name char(6), +omm(# c_last_name char(8) +omm(# ) +omm-# tablespace tspc1; +CREATE TABLE +omm=# select * from pg_tables where tablename = 'customer'; + schemaname | tablename | tableowner | tablespace | hasindexes | hasrules | hastriggers | tablecreator | created | last_ddl_time +------------+-----------+------------+------------+------------+----------+-------------+--------------+-------------------------------+------------------------------- + public | customer | omm | tspc1 | f | f | f | omm | 2021-12-08 15:07:35.751084+08 | 2021-12-08 15:07:35.751084+08 +(1 row) + +omm=# + +``` + +### **3.重命名tspc1,修改tspc2的用户为Lily,使用\db查看表空间信息** + +```sql +SQL文本: +alter tablespace tspc1 rename to tspc2; +create user lily password 'lily_123'; +alter tablespace tspc2 owner to lily; +\db+ + +omm=# alter tablespace tspc1 rename to tspc2; +ALTER TABLESPACE +omm=# create user lily password 'lily_123'; +CREATE ROLE +omm=# alter tablespace tspc2 owner to lily; +ALTER TABLESPACE +omm=# \db+ + List of tablespaces + Name | Owner | Location | Access privileges | Description +------------+-------+-----------+-------------------+------------- + pg_default | omm | | | + pg_global | omm | | | + tspc2 | lily | tbs/tspc1 | | +(3 rows) + +omm=# +``` + +### **4.删除表空间** + +``` +SQL文本: +drop tablespace if exists tspc2; +drop table customer; +drop tablespace if exists tspc2; + +omm=# drop tablespace if exists tspc2; +ERROR: tablespace "tspc2" is not empty +omm=# drop table customer; +DROP TABLE +omm=# drop tablespace if exists tspc2; +DROP TABLESPACE +omm=# + +``` diff --git "a/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\344\270\211\345\244\251.md" "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\344\270\211\345\244\251.md" new file mode 100644 index 0000000000000000000000000000000000000000..74fc5679fd030ad4538ce039925d58e74ac7e429 --- /dev/null +++ "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\344\270\211\345\244\251.md" @@ -0,0 +1,169 @@ ++++ + +title = "openGauss每日一练第三天" + +date = "2022-04-19" + +tags = ["openGauss每日一练第三天"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "openGauss每日一练第三天" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# openGauss每日一练第三天 + +本文出处:[https://www.modb.pro/db/193083](https://www.modb.pro/db/193083) + +
+ +## 学习目标 + +学习openGauss创建数据库、修改数据库属性和删除数据库 + +## 课后作业 + +### **1.分别创建名为tpcc1和tpcc2的数据库** + +``` +SQL文本: +create database tpcc1; +create database tpcc2; +\l + +omm=# create database tpcc1; +CREATE DATABASE +omm=# create database tpcc2; +CREATE DATABASE +omm=# omm=# \l + List of databases + Name | Owner | Encoding | Collate | Ctype | Access privileges +-----------+-------+----------+-------------+-------------+------------------- + omm | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | + postgres | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | + template0 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/omm + + | | | | | omm=CTc/omm + template1 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/omm + + | | | | | omm=CTc/omm + tpcc1 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | + tpcc2 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | +(6 rows) + +omm=# +``` + +### **2.将tpcc1数据库重命名为tpcc10** + +``` +SQL文本: +alter database tpcc1 rename to tpcc10; + +omm=# alter database tpcc1 rename to tpcc10; +ALTER DATABASE +omm=# +``` + +### **3.分别使用\l和\l+两个元命令查看数据库信息** + +``` +SQL文本: +\l +\l+ + +omm=# \l + List of databases + Name | Owner | Encoding | Collate | Ctype | Access privileges +-----------+-------+----------+-------------+-------------+------------------- + omm | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | + postgres | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | + template0 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/omm + + | | | | | omm=CTc/omm + template1 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/omm + + | | | | | omm=CTc/omm + tpcc10 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | + tpcc2 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | +(6 rows) + +omm=# \l+ + List of databases + Name | Owner | Encoding | Collate | Ctype | Access privileges | Size | Tablespace | Description +-----------+-------+----------+-------------+-------------+-------------------+-------+------------+-------------------------------------------- + omm | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | | 11 MB | pg_default | + postgres | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | | 41 MB | pg_default | default administrative connection database + template0 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/omm +| 10 MB | pg_default | default template for new databases + | | | | | omm=CTc/omm | | | + template1 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/omm +| 10 MB | pg_default | unmodifiable empty database + | | | | | omm=CTc/omm | | | + tpcc10 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | | 10 MB | pg_default | + tpcc2 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | | 10 MB | pg_default | +(6 rows) + +omm=# +``` + +### **4.在数据库tpcc2中创建customer表,字段自定义** + +``` +SQL文本: +\c tpcc2 +create table customer_t +(c_customer_sk integer, + c_customer_id char(5), + c_first_name char(6), + c_last_name char(8) + ) ; + \d + +tpcc2=# \c tpcc2 +Non-SSL connection (SSL connection is recommended when requiring high-security) +You are now connected to database "tpcc2" as user "omm". +tpcc2=# create table customer_t +tpcc2-# (c_customer_sk integer, +tpcc2(# c_customer_id char(5), +tpcc2(# c_first_name char(6), +tpcc2(# c_last_name char(8) +tpcc2(# ) ; +CREATE TABLE +tpcc2=# \d + List of relations + Schema | Name | Type | Owner | Storage +---------+------------+-------+-------+---------------------------------- + schema2 | customer_t | table | omm | {orientation=row,compression=no} +(1 row) + +tpcc2=# +``` + +### **5.删除新创建的数据库** + +``` +SQL文本: +\c omm +drop database tpcc2; +drop database tpcc10; + +tpcc2=# \c omm +Non-SSL connection (SSL connection is recommended when requiring high-security) +You are now connected to database "omm" as user "omm". +omm=# drop database tpcc2; +DROP DATABASE +omm=# drop database tpcc10; +DROP DATABASE +omm=# +``` + +### **6.退出gsql程序** + +``` +SQL文本: +\q + +omm=# \q +[omm@modb ~]$ +``` diff --git "a/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\344\272\214\345\244\251.md" "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\344\272\214\345\244\251.md" new file mode 100644 index 0000000000000000000000000000000000000000..2b44ecf346095c7e9a46750805e3354f00c6662e --- /dev/null +++ "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\344\272\214\345\244\251.md" @@ -0,0 +1,209 @@ ++++ + +title = "openGauss每日一练第二天" + +date = "2022-04-19" + +tags = ["openGauss每日一练第二天"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "openGauss每日一练第二天" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# openGauss每日一练第二天 + +本文出处:[https://www.modb.pro/db/193083](https://www.modb.pro/db/193083) + +
+ +## 学习地址 + +[https://www.modb.pro/course/133](https://www.modb.pro/course/133) + +## 学习目标 + +学习openGauss数据库查询、更新和删除基本使用。 + +## 课后作业 + +#### 1.创建一个表products + +| 字段名 | 数据类型 | 含义 | +| ------------ | -------- | -------- | +| product_id | INTEGER | 产品编号 | +| product_name | Char(20) | 产品名 | +| category | Char(30) | 种类 | + +``` +SQL文本: +create table products +(product_id integer, + product_name char(30), + category char(20) +); + +omm=# create table products +omm-# (product_id integer, +omm(# product_name char(30), +omm(# category char(20) +omm(# ); +CREATE TABLE +omm=# +omm=# \dt+ products + List of relations + Schema | Name | Type | Owner | Size | Storage | Description +---------+----------+-------+-------+---------+----------------------------------+------------- + schema2 | products | table | omm | 0 bytes | {orientation=row,compression=no} | +(1 row) + +omm=# +omm=# \d+ products + Table "schema2.products" + Column | Type | Modifiers | Storage | Stats target | Description +--------------+---------------+-----------+----------+--------------+------------- + product_id | integer | | plain | | + product_name | character(30) | | extended | | + category | character(20) | | extended | | +Has OIDs: no +Options: orientation=row, compression=no + +omm=# + +``` + +#### 2.向表中插入数据,采用一次插入一条和多条记录的方式 + +| product_id | product_name | category | +| ---------- | -------------- | --------- | +| 1502 | olympus camera | electrncs | +| 1601 | lamaze | toys | +| 1700 | wait interface | Books | +| 1666 | harry potter | toys | + +``` +SQL文本: +insert into products(product_id,product_name,category) values (1502,'olympus camera','electrncs'); +insert into products values (1601,'lamaze','toys'); +insert into products(product_id,product_name,category) values +(1700,'wait interface','Books'), +(1666,'harry potter','toys'); + +omm=# insert into products(product_id,product_name,category) values (1502,'olympus camera','electrncs'); +INSERT 0 1 +omm=# insert into products values (1601,'lamaze','toys'); +INSERT 0 1 +omm=# insert into products(product_id,product_name,category) values +omm-# (1700,'wait interface','Books'), +omm-# (1666,'harry potter','toys'); +INSERT 0 2 +omm=# + +``` + +#### 3.获取表中一条记录、三条记录和所有记录 + +``` +SQL文本: +select * from products limit 1; +select * from products limit 3; +select * from products; + +omm=# select * from products limit 1; + product_id | product_name | category +------------+--------------------------------+---------------------- + 1502 | olympus camera | electrncs +(1 row) + +omm=# select * from products limit 3; + product_id | product_name | category +------------+--------------------------------+---------------------- + 1502 | olympus camera | electrncs + 1601 | lamaze | toys + 1700 | wait interface | Books +(3 rows) + +omm=# select * from products; + product_id | product_name | category +------------+--------------------------------+---------------------- + 1502 | olympus camera | electrncs + 1601 | lamaze | toys + 1700 | wait interface | Books + 1666 | harry potter | toys +(4 rows) + +omm=# + +``` + +#### 4.将满足product_id > 1600的记录的product_id更新为product_id – 1000,并查看products中所有记录是否更新成功 + +``` +SQL文本: +update products set product_id = product_id - 1000 where product_id > 1600; + +omm=# update products set product_id = product_id - 1000 where product_id > 1600; +UPDATE 3 +omm=# select * from products; + product_id | product_name | category +------------+--------------------------------+---------------------- + 1502 | olympus camera | electrncs + 601 | lamaze | toys + 700 | wait interface | Books + 666 | harry potter | toys +(4 rows) + +omm=# + +``` + +#### 5.删除category为toys的所有记录,并查看products中数据是否删除成功 + +``` +SQL文本: +delete from products where category = 'toys'; + +omm=# delete from products where category = 'toys'; +DELETE 2 +omm=# select * from products; + product_id | product_name | category +------------+--------------------------------+---------------------- + 1502 | olympus camera | electrncs + 700 | wait interface | Books +(2 rows) + +omm=# +``` + +#### 6.删除products中所有数据,并查看数据是否删除成功 + +``` +SQL文本: +delete from products; + +omm=# delete from products; +DELETE 2 +omm=# select * from products; + product_id | product_name | category +------------+--------------+---------- +(0 rows) + +omm=# +``` + +#### 7.删除表products + +``` +SQL文本: +drop table products; + +omm=# drop table products; +DROP TABLE +omm=# +``` diff --git "a/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\344\272\224\345\244\251.md" "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\344\272\224\345\244\251.md" new file mode 100644 index 0000000000000000000000000000000000000000..67e32d05a6eda2e539e2f59b9478ef5778452fbe --- /dev/null +++ "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\344\272\224\345\244\251.md" @@ -0,0 +1,171 @@ ++++ + +title = "openGauss每日一练第5天" + +date = "2022-04-20" + +tags = ["openGauss每日一练第5天"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "openGauss每日一练第5天" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# openGauss每日一练第5天 + +本文出处:https://www.modb.pro/db/193101 + +## 学习地址 + +https://www.modb.pro/course/133 + +## 学习目标 + +学习openGauss创建用户、修改用户属性、更改用户权限和删除用户 + +用户是用来登录数据库的,通过对用户赋予不同的权限,可以方便地管理用户对数据库的访问及操作 + +## 课程作业 + +过程中使用\du或\du+查看用户信息 + +### **1.创建用户user1、user2和user3,user1具有CREATEROLE权限,user2具有CREATEDB权限,要求使用两种不同的方法设置密码** + +```sql +SQL文本: +create user user1 createrole password 'user1_123'; +create user user2 createdb identified by 'user2_123'; +create user user3 password 'user3_123'; +\du+ + +omm=# create user user1 createrole password 'user1_123'; +CREATE ROLE +omm=# create user user2 createdb identified by 'user2_123'; +CREATE ROLE +omm=# create user user3 password 'user3_123'; +CREATE ROLE +omm=# \du+ + List of roles + Role name | Attributes | Member of | Description +-----------+------------------------------------------------------------------------------------------------------------------+-----------+------------- + lizi | Create role, Create DB, Replication, Administer audit, Sysadmin, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + omm | Sysadmin, Create role, Create DB, Replication, Administer audit, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + user1 | Create role | {} | + user2 | Create DB | {} | + user3 | | {} | + +omm=# + +``` + +### **2.修改用户user1的密码** + +```sql +SQL文本: +alter user user1 identified by 'user1_456' replace 'user1_123'; +或者 +alter user user1 password 'user1_789' ; + +omm=# alter user user1 identified by 'user1_456' replace 'user1_123'; +ALTER ROLE +omm=# alter user user1 password 'user1_789' ; +ALTER ROLE +omm=# \du+ + List of roles + Role name | Attributes | Member of | Description +-----------+------------------------------------------------------------------------------------------------------------------+-----------+------------- + lizi | Create role, Create DB, Replication, Administer audit, Sysadmin, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + omm | Sysadmin, Create role, Create DB, Replication, Administer audit, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + user1 | Create role | {} | + user2 | Create DB | {} | + user3 | | {} | + +omm=# + +``` + +### **3.重命名用户user2** + +```sql +SQL文本: +alter user user2 rename to user222; +\du+ + +omm=# alter user user2 rename to user222; +ALTER ROLE +omm=# \du+ + List of roles + Role name | Attributes | Member of | Description +-----------+------------------------------------------------------------------------------------------------------------------+-----------+------------- + lizi | Create role, Create DB, Replication, Administer audit, Sysadmin, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + omm | Sysadmin, Create role, Create DB, Replication, Administer audit, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + user1 | Create role | {} | + user222 | Create DB | {} | + user3 | | {} | + +omm=# + +``` + +### **4.将用户user1的权限授权给用户user3,再回收用户user3的权限** + +``` +SQL文本: +grant user1 to user3; +\du+ +revoke all privilege from user3; +\du+ + +omm=# grant user1 to user3; +GRANT ROLE +omm=# \du+ + List of roles + Role name | Attributes | Member of | Description +-----------+------------------------------------------------------------------------------------------------------------------+-----------+------------- + lizi | Create role, Create DB, Replication, Administer audit, Sysadmin, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + omm | Sysadmin, Create role, Create DB, Replication, Administer audit, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + user1 | Create role | {} | + user222 | Create DB | {} | + user3 | | {user1} | + +omm=# revoke all privilege from user3; +ALTER ROLE +omm=# \du+ + List of roles + Role name | Attributes | Member of | Description +-----------+------------------------------------------------------------------------------------------------------------------+-----------+------------- + lizi | Create role, Create DB, Replication, Administer audit, Sysadmin, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + omm | Sysadmin, Create role, Create DB, Replication, Administer audit, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + user1 | Create role | {} | + user222 | Create DB | {} | + user3 | | {user1} | + +omm=# + +``` + +### **5.删除所有创建用户** + +```sql +SQL文本: drop user user1; drop user user222; drop user user3; \du+ +omm=# drop user user1; +DROP ROLE +omm=# drop user user222; +DROP ROLE +omm=# drop user user3; +DROP ROLE +omm=# \du+ + List of roles + Role name | Attributes | Member of | Description +-----------+------------------------------------------------------------------------------------------------------------------+-----------+------------- + lizi | Create role, Create DB, Replication, Administer audit, Sysadmin, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + omm | Sysadmin, Create role, Create DB, Replication, Administer audit, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + +omm=# +``` diff --git "a/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\345\205\255\345\244\251.md" "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\345\205\255\345\244\251.md" new file mode 100644 index 0000000000000000000000000000000000000000..5ff692f52874cf2b062b1189beaafba2012085b9 --- /dev/null +++ "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\345\205\255\345\244\251.md" @@ -0,0 +1,209 @@ ++++ + +title = "openGauss每日一练第6天" + +date = "2022-04-20" + +tags = ["openGauss每日一练第6天"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "openGauss每日一练第6天" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# openGauss每日一练第6天 + +本文出处:[https://www.modb.pro/db/193150](https://www.modb.pro/db/193150) + +## 学习地址 + +[https://www.modb.pro/course/133](https://www.modb.pro/course/133) + +## 学习目标 + +学习openGauss创建模式、修改模式属性和删除模式 + +模式是一组数据库对象的集合,主要用于控制对数据库对象的访问 + +## 课后作业 + +### **1.创建一个名为tpcds的模式** + +```sql +SQL文本: +create schema tpcds; +\dn tpcds + +omm=# create schema tpcds; +CREATE SCHEMA +omm=# \dn tpcds +List of schemas + Name | Owner +-------+------- + tpcds | omm +(1 row) + +omm-# +``` + +### **2.创建一个用户tim, 并将tpcds的owner修改为tim,且修改owner前后分别使用\dn+查看模式信息** + +``` +SQL文本: +create user tim password 'tim_1234'; +\dn+ +alter schema tpcds owner to tim; +\dn+ + +omm=# create user tim password 'tim_1234'; +omm=# CREATE ROLE +omm=# \dn+ + List of schemas + Name | Owner | Access privileges | Description +-------------+-------+-------------------+---------------------------------- + cstore | omm | | reserved schema for DELTA tables + dbe_perf | omm | | dbe_perf schema + pkg_service | omm | | pkg_service schema + public | omm | omm=UC/omm +| standard public schema + | | =U/omm | + schema2 | omm | | + snapshot | omm | | snapshot schema + tim | tim | | + tpcds | omm | | +(8 rows) + +omm=# alter schema tpcds owner to tim; +ALTER SCHEMA +omm=# \dn+ + List of schemas + Name | Owner | Access privileges | Description +-------------+-------+-------------------+---------------------------------- + cstore | omm | | reserved schema for DELTA tables + dbe_perf | omm | | dbe_perf schema + pkg_service | omm | | pkg_service schema + public | omm | omm=UC/omm +| standard public schema + | | =U/omm | + schema2 | omm | | + snapshot | omm | | snapshot schema + tim | tim | | + tpcds | tim | | +(8 rows) + +omm=# + +``` + +### **3.重命名tpcds为tpcds1** + +```sql +SQL文本: +alter schema tpcds rename to tpcds1; +\dn+ + +omm=# alter schema tpcds rename to tpcds1; +ALTER SCHEMA +omm=# \dn+ + List of schemas + Name | Owner | Access privileges | Description +-------------+-------+-------------------+---------------------------------- + cstore | omm | | reserved schema for DELTA tables + dbe_perf | omm | | dbe_perf schema + pkg_service | omm | | pkg_service schema + public | omm | omm=UC/omm +| standard public schema + | | =U/omm | + schema2 | omm | | + snapshot | omm | | snapshot schema + tim | tim | | + tpcds1 | tim | | +(8 rows) + +omm=# +``` + +### **4.在模式tpcds1中建表customer、插入记录和查询记录** + +```sql +SQL文本: +建表 +create table tpcds1.customer +( c_customer_sk integer, + c_customer_id char(5), + c_first_name char(6), + c_last_name char(8) +); + +插入记录 +INSERT INTO tpcds1.customer (c_customer_sk, c_customer_id, c_first_name,c_last_name) VALUES +(6885, 1, 'Joes', 'Hunter'), +(4321, 2, 'Lily','Carter'), +(9527, 3, 'James', 'Cook'), +(9500, 4, 'Lucy', 'Baker'); + +查询记录 +select * from tpcds1.customer; + +omm=# create table tpcds1.customer +omm-# ( c_customer_sk integer, +omm(# c_customer_id char(5), +omm(# c_first_name char(6), +omm(# c_last_name char(8) +omm(# ); +CREATE TABLE +omm=# INSERT INTO tpcds1.customer (c_customer_sk, c_customer_id, c_first_name,c_last_name) VALUES +omm-# (6885, 1, 'Joes', 'Hunter'), +omm-# (4321, 2, 'Lily','Carter'), +omm-# (9527, 3, 'James', 'Cook'), +omm-# (9500, 4, 'Lucy', 'Baker'); +INSERT 0 4 +omm=# select * from tpcds1.customer; + c_customer_sk | c_customer_id | c_first_name | c_last_name +---------------+---------------+--------------+------------- + 6885 | 1 | Joes | Hunter + 4321 | 2 | Lily | Carter + 9527 | 3 | James | Cook + 9500 | 4 | Lucy | Baker +(4 rows) + +omm=# \dt customer + List of relations + Schema | Name | Type | Owner | Storage +--------+----------+-------+-------+---------------------------------- + tpcds1 | customer | table | omm | {orientation=row,compression=no} +(1 row) + +omm=# +``` + +### **5.删除模式tpcds1** + +```sql +SQL文本: drop schema tpcds1; drop schema tpcds1 cascade; \dn+ +omm=# drop schema tpcds1; +ERROR: cannot drop schema tpcds1 because other objects depend on it +DETAIL: table customer depends on schema tpcds1 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +omm=# drop schema tpcds1 cascade; +NOTICE: drop cascades to table customer +DROP SCHEMA +omm=# \dn+ + List of schemas + Name | Owner | Access privileges | Description +-------------+-------+-------------------+---------------------------------- + cstore | omm | | reserved schema for DELTA tables + dbe_perf | omm | | dbe_perf schema + pkg_service | omm | | pkg_service schema + public | omm | omm=UC/omm +| standard public schema + | | =U/omm | + schema2 | omm | | + snapshot | omm | | snapshot schema + tim | tim | | +(7 rows) + +omm=# +``` diff --git "a/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\345\233\233\345\244\251.md" "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\345\233\233\345\244\251.md" new file mode 100644 index 0000000000000000000000000000000000000000..9ea4bb791f223068a703b6f464abb437416a707a --- /dev/null +++ "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\347\254\254\345\233\233\345\244\251.md" @@ -0,0 +1,184 @@ ++++ + +title = "openGauss每日一练第四天" + +date = "2022-04-19" + +tags = ["openGauss每日一练第四天"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "openGauss每日一练第四天" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# openGauss每日一练第四天 + +本文出处:[https://www.modb.pro/db/193083](https://www.modb.pro/db/193083) + +
+ +## 学习地址 + +[https://www.modb.pro/course/133](https://www.modb.pro/course/133) + +## 学习目标 + +学习openGauss创建角色、修改角色属性、更改角色权限和删除角色 +角色是用来管理权限的,从数据库安全的角度考虑,可以把所有的管理和操作权限划分到不同的角色上 + +## 课后作业 + +过程中使用\du或\du+查看角色信息 + +### **1.创建角色role1为系统管理员, role2指定生效日期, role3具有LOGIN属性** + +``` +SQL文本: +create role role1 sysadmin identified by 'role1_123'; +create role role2 identified by 'role2_123' vaild begein '2021-11-11'; +create role role3 login identified by 'role3_123'; +\du+ + +omm=# create role role1 sysadmin identified by 'role1_123'; +CREATE ROLE +omm=# create role role2 identified by 'role2_123' valid begin '2021-11-11'; +CREATE ROLE +omm=# create role role3 login identified by 'role3_123'; +CREATE ROLE +omm=# \du+ + List of roles + Role name | Attributes | Member of | Description +-----------+------------------------------------------------------------------------------------------------------------------+-----------+------------- + lizi | Create role, Create DB, Replication, Administer audit, Sysadmin, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + omm | Sysadmin, Create role, Create DB, Replication, Administer audit, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + role1 | Cannot login, Sysadmin | {} | + role2 | Cannot login +| {} | + | Role valid begin 2021-11-11 00:00:00+08 | | + role3 | | {} | + +omm=# + +``` + +### **2.重命名role1** + +``` +SQL文本: +alter role role1 rename to role111; +\du+ + +omm=# alter role role1 rename to role111; +ALTER ROLE +omm=# \du+ + List of roles + Role name | Attributes | Member of | Description +-----------+------------------------------------------------------------------------------------------------------------------+-----------+------------- + lizi | Create role, Create DB, Replication, Administer audit, Sysadmin, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + omm | Sysadmin, Create role, Create DB, Replication, Administer audit, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + role111 | Cannot login, Sysadmin | {} | + role2 | Cannot login +| {} | + | Role valid begin 2021-11-11 00:00:00+08 | | + role3 | | {} | + +omm=# + +``` + +### **3.修改role2密码** + +``` +SQL文本: +alter role role2 identified by 'role2_456' replace 'role2_123' +或者 +alter role role2 identified by 'role2_789'; + +omm=# alter role role2 identified by 'role2_456' replace 'role2_456'; +ALTER ROLE +omm=# alter role role2 identified by 'role2_789'; +ALTER ROLE +omm=# \du+ + List of roles + Role name | Attributes | Member of | Description +-----------+------------------------------------------------------------------------------------------------------------------+-----------+------------- + lizi | Create role, Create DB, Replication, Administer audit, Sysadmin, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + omm | Sysadmin, Create role, Create DB, Replication, Administer audit, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + role111 | Cannot login, Sysadmin | {} | + role2 | Cannot login +| {} | + | Role valid begin 2021-11-11 00:00:00+08 | | + role3 | | {} | + +omm=# +``` + +### **4.将omm权限授权给role3,再回收role3的权限** + +``` +SQL文本: +grant omm to role3; +\du+ +revoke all privilege from role3; +\du+ + + +omm=# grant omm to role3; +GRANT ROLE +omm=# \du+ + List of roles + Role name | Attributes | Member of | Description +-----------+------------------------------------------------------------------------------------------------------------------+-----------+------------- + lizi | Create role, Create DB, Replication, Administer audit, Sysadmin, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + omm | Sysadmin, Create role, Create DB, Replication, Administer audit, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + role111 | Cannot login, Sysadmin | {} | + role2 | Cannot login +| {} | + | Role valid begin 2021-11-11 00:00:00+08 | | + role3 | | {omm} | + utest | | {} | + +omm=# revoke all privilege from role3; +ALTER ROLE +omm=# \du+ + List of roles + Role name | Attributes | Member of | Description +-----------+------------------------------------------------------------------------------------------------------------------+-----------+------------- + lizi | Create role, Create DB, Replication, Administer audit, Sysadmin, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + omm | Sysadmin, Create role, Create DB, Replication, Administer audit, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + role111 | Cannot login, Sysadmin | {} | + role2 | Cannot login +| {} | + | Role valid begin 2021-11-11 00:00:00+08 | | + role3 | | {omm} | + +omm=# + +``` + +### 5.删除所有创建角色 + +``` +SQL文本: +drop role role1; +drop role role2; +drop role role3; +\du+ + +omm=# drop role role111; +DROP ROLE +omm=# drop role role2; +drop role role3; +DROP ROLE +omm=# drop role role3; +DROP ROLE +omm=# \du+ + List of roles + Role name | Attributes | Member of | Description +-----------+------------------------------------------------------------------------------------------------------------------+-----------+------------- + lizi | Create role, Create DB, Replication, Administer audit, Sysadmin, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + omm | Sysadmin, Create role, Create DB, Replication, Administer audit, Monitoradmin, Operatoradmin, Policyadmin, UseFT | {} | + +omm=# +``` diff --git "a/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\345\205\250\346\226\207\346\243\200\347\264\242\357\274\211.md" "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\345\205\250\346\226\207\346\243\200\347\264\242\357\274\211.md" new file mode 100644 index 0000000000000000000000000000000000000000..1f3835ae3b190558dd5b472ac5da828cae639ade --- /dev/null +++ "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\345\205\250\346\226\207\346\243\200\347\264\242\357\274\211.md" @@ -0,0 +1,91 @@ ++++ + +title = "openGauss每日一练(全文检索)" + +date = "2022-04-25" + +tags = ["openGauss每日一练(全文检索)"] + +archives = "2022-04" + +author = "云和恩墨" + +summary ="openGauss每日一练(全文检索)" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# openGauss每日一练(全文检索) + +本文出处:[https://www.modb.pro/db/224179](https://www.modb.pro/db/224179) + +## 学习目标 + +**学习openGauss全文检索** + +openGauss提供了两种数据类型用于支持全文检索。tsvector类型表示为文本搜索优化的文件格式,tsquery类型表示文本查询 + +## 课后作业 + +### **1.用tsvector @@ tsquery和tsquery @@ tsvector完成两个基本文本匹配** + +``` +omm=# SELECT 'a fat cat sat on a mat and ate a fat rat'::tsvector @@ 'cat & rat'::tsquery AS RESULT; + result +-------- + t +(1 row) + +omm=# SELECT 'fat & cow'::tsquery @@ 'a fat cat sat on a mat and ate a fat rat'::tsvector AS RESULT; + result +-------- + f +(1 row) + +omm=# +``` + +### **2.创建表且至少有两个字段的类型为 text类型,在创建索引前进行全文检索** + +``` +omm=# CREATE SCHEMA tsearch; +CREATE TABLE tsearch.pgweb(id int, body text, title text, last_mod_date date); +CREATE SCHEMA +omm=# CREATE TABLE tsearch.pgweb(id int, body text, title text, last_mod_date date); +INSERT INTO tsearch.pgweb VALUES(1, 'China, officially the People''s Republic of China(PRC), located in Asia, is the world''s most populous state.', 'China', '2010-1-1'); +CREATE TABLE +omm=# INSERT INTO tsearch.pgweb VALUES(1, 'China, officially the People''s Republic of China(PRC), located in Asia, is the world''s most populous state.', 'China', '2010-1-1'); +INSERT INTO tsearch.pgweb VALUES(2, 'America is a rock band, formed in England in 1970 by multi-instrumentalists Dewey Bunnell, Dan Peek, and Gerry Beckley.', 'America', '2010-1-1'); +INSERT 0 1 +omm=# INSERT INTO tsearch.pgweb VALUES(2, 'America is a rock band, formed in England in 1970 by multi-instrumentalists Dewey Bunnell, Dan Peek, and Gerry Beckley.', 'America', '2010-1-1'); +INSERT 0 1 +omm=# INSERT INTO tsearch.pgweb VALUES(3, 'England is a country that is part of the United Kingdom. It shares land borders with Scotland to the north and Wales to the west.', 'England','2010-1-1'); +INSERT 0 1 +omm=# +omm=# SELECT id, body, title FROM tsearch.pgweb WHERE to_tsvector(body) @@ to_tsquery('america'); + + id | body | title +----+-------------------------------------------------------------------------------------------------------------------------+--------- + 2 | America is a rock band, formed in England in 1970 by multi-instrumentalists Dewey Bunnell, Dan Peek, and Gerry Beckley. | America +(1 row) + +omm=# +``` + +### **3.创建GIN索引** + +``` +omm=# CREATE INDEX pgweb_idx_1 ON tsearch.pgweb USING gin(to_tsvector('english', body)); +CREATE INDEX +omm=# +``` + +### **4.清理数据** + +``` +omm=# drop schema tsearch; +DROP SCHEMA +omm=# +``` diff --git "a/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\345\210\206\345\214\272\350\241\250\347\264\242\345\274\225\357\274\211.md" "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\345\210\206\345\214\272\350\241\250\347\264\242\345\274\225\357\274\211.md" new file mode 100644 index 0000000000000000000000000000000000000000..59066e1befb82a3b185fb133896ff22aab2c0fc9 --- /dev/null +++ "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\345\210\206\345\214\272\350\241\250\347\264\242\345\274\225\357\274\211.md" @@ -0,0 +1,251 @@ ++++ + +title = "openGauss每日一练(分区表索引)" + +date = "2022-04-21" + +tags = ["openGauss每日一练(分区表索引)"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "openGauss每日一练(分区表索引)" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# openGauss每日一练(分区表索引) + +本文出处:[https://www.modb.pro/db/222617](https://www.modb.pro/db/222617) + +## 学习地址 + +[https://www.modb.pro/course/133](https://www.modb.pro/course/133) + +## 学习目标 + +**学习openGauss分区表索引** + +## 课后作业 + +### **1.创建范围分区表products, 为表创建分区表索引1,不指定索引分区的名称,创建分区表索引2,并指定索引分区的名称,创建GLOBAL分区索引3** + +``` +--SQL文本: +create table products +( id integer, + name char(8) +) +partition by range (id) +(partition products_p1 values less than (10), + partition products_p2 values less than (20), + partition products_p3 values less than (30), + partition products_p4 values less than (40), + partition products_p5 values less than (50) +); +create index products_index1 on products(id) local; +create index products_index2 on products(id) local +( +partition id_index1, +partition id_index2, +partition id_index3, +partition id_index4, +partition id_index5 +); +create index products_index3 on products(name) global; + +omm=# create table products +omm-# ( id integer, +omm(# name char(8) +omm(# ) +omm-# partition by range (id) +omm-# (partition products_p1 values less than (10), +omm(# partition products_p2 values less than (20), +omm(# partition products_p3 values less than (30), +omm(# partition products_p4 values less than (40), +omm(# partition products_p5 values less than (50) +omm(# ); +CREATE TABLE +omm=# create index products_index1 on products(id) local; +CREATE INDEX +omm=# create index products_index2 on products(id) local +omm-# ( +omm(# partition id_index1, +omm(# partition id_index2, +omm(# partition id_index3, +omm(# partition id_index4, +omm(# partition id_index5 +omm(# ); +CREATE INDEX +omm=# create index products_index3 on products(name) global; +CREATE INDEX +omm=# + +``` + +### **2.在分区表索引1上,修改分区表索引的表空间,重命名分区表索引** + +``` +--SQL文本: +alter index products_index1 move partition products_p1_id_idx tablespace pgtbs1; +alter index products_index1 move partition products_p2_id_idx tablespace pgtbs1; +alter index products_index1 move partition products_p3_id_idx tablespace pgtbs1; +alter index products_index1 move partition products_p4_id_idx tablespace pgtbs1; +alter index products_index1 move partition products_p5_id_idx tablespace pgtbs1; +alter index products_index1 rename partition products_p1_id_idx to products_p1_id_idx_new; +alter index products_index1 rename partition products_p2_id_idx to products_p2_id_idx_new; +alter index products_index1 rename partition products_p3_id_idx to products_p3_id_idx_new; +alter index products_index1 rename partition products_p4_id_idx to products_p4_id_idx_new; +alter index products_index1 rename partition products_p5_id_idx to products_p5_id_idx_new; + +omm=# alter index products_index1 move partition products_p1_id_idx tablespace pgtbs1; +ALTER INDEX +omm=# alter index products_index1 move partition products_p2_id_idx tablespace pgtbs1; +ALTER INDEX +omm=# alter index products_index1 move partition products_p3_id_idx tablespace pgtbs1; +ALTER INDEX +omm=# alter index products_index1 move partition products_p4_id_idx tablespace pgtbs1; +ALTER INDEX +omm=# alter index products_index1 move partition products_p5_id_idx tablespace pgtbs1; +ALTER INDEX +omm=# alter index products_index1 rename partition products_p1_id_idx to products_p1_id_idx_new; +ALTER INDEX +omm=# alter index products_index1 rename partition products_p2_id_idx to products_p2_id_idx_new; +ALTER INDEX +omm=# alter index products_index1 rename partition products_p3_id_idx to products_p3_id_idx_new; +ALTER INDEX +omm=# alter index products_index1 rename partition products_p4_id_idx to products_p4_id_idx_new; +ALTER INDEX +omm=# alter index products_index1 rename partition products_p5_id_idx to products_p5_id_idx_new; +ALTER INDEX +omm=# + +``` + +### **3.在分区表索引2上,重建单个索引分区和分区上的所有索引** + +``` +--SQL文本: +reindex index products_index2 partition id_index1; +reindex index products_index2 partition id_index2; +reindex index products_index2 partition id_index3; +reindex index products_index2 partition id_index4; +reindex index products_index2 partition id_index5; + +omm=# reindex index products_index2 partition id_index5; +REINDEX +omm=# reindex index products_index2 partition id_index1; +REINDEX +omm=# reindex index products_index2 partition id_index2; +REINDEX +omm=# reindex index products_index2 partition id_index3; +REINDEX +omm=# reindex index products_index2 partition id_index4; +REINDEX +omm=# reindex index products_index2 partition id_index5; +REINDEX +omm=# + +``` + +### **4.使用\d+、系统视图pg_indexes和pg_partition查看索引信息** + +``` +--SQL文本: +\d+ products +select * from pg_indexes where tablename='products'; +select * from pg_partition where tablename='products'; + +omm=# \d+ products + Table "public.products" + Column | Type | Modifiers | Storage | Stats target | Description +--------+--------------+-----------+----------+--------------+------------- + id | integer | | plain | | + name | character(8) | | extended | | +Indexes: + "products_index1" btree (id) LOCAL(PARTITION products_p1_id_idx_new TABLESPACE pgtbs1, PARTITION products_p2_id_idx_new TABLESPACE pgtbs1, PARTITION products_p3_id_idx_new TABLESPACE pgtbs1, PARTITION products_p4_id_idx_new TABLESPACE pgtbs1, PARTITION products_p5_id_idx_new TABLESPACE pgtbs1) TABLESPACE pg_default + "products_index2" btree (id) LOCAL(PARTITION id_index1, PARTITION id_index2, PARTITION id_index3, PARTITION id_index4, PARTITION id_index5) TABLESPACE pg_default + "products_index3" btree (name) TABLESPACE pg_default +Range partition by(id) +Number of partition: 5 (View pg_partition to check each partition range.) +Has OIDs: no +Options: orientation=row, compression=no + +omm=# select * from pg_indexes where tablename='products'; + schemaname | tablename | indexname | tablespace | indexde +f +------------+-----------+-----------------+------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + public | products | products_index1 | | CREATE INDEX products_index1 ON products USING btree (id) LOCAL(PARTITION products_p1_id_idx_new TABLESPACE pgtbs1, PARTITION products_p2_id_idx_new TABLESPACE pgtbs1, PARTITIO +N products_p3_id_idx_new TABLESPACE pgtbs1, PARTITION products_p4_id_idx_new TABLESPACE pgtbs1, PARTITION products_p5_id_idx_new TABLESPACE pgtbs1) TABLESPACE pg_default + public | products | products_index2 | | CREATE INDEX products_index2 ON products USING btree (id) LOCAL(PARTITION id_index1, PARTITION id_index2, PARTITION id_index3, PARTITION id_index4, PARTITION id_index5) TABLES +PACE pg_default + public | products | products_index3 | | CREATE INDEX products_index3 ON products USING btree (name) TABLESPACE pg_default +(3 rows) + +omm=# select * from pg_partition; + relname | parttype | parentid | rangenum | intervalnum | partstrategy | relfilenode | reltablespace | relpages | reltuples | relallvisible | reltoastrelid | reltoastidxid | indextblid | indisusable | reldeltarelid | r +eldeltaidx | relcudescrelid | relcudescidx | relfrozenxid | intspnum | partkey | intervaltablespace | interval | boundaries | transit | reloptions | relfrozenxid64 +------------------------+----------+----------+----------+-------------+--------------+-------------+---------------+----------+-----------+---------------+---------------+---------------+------------+-------------+---------------+-- +-----------+----------------+--------------+--------------+----------+---------+--------------------+----------+------------+---------+---------------------------------------------------+---------------- + products | r | 33404 | 0 | 0 | r | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | t | 0 | + 0 | 0 | 0 | 0 | | 1 | | | | | {orientation=row,compression=no,wait_clean_gpi=n} | 0 + products_p1 | p | 33404 | 0 | 0 | r | 33408 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | t | 0 | + 0 | 0 | 0 | 213746 | | | | | {10} | | {orientation=row,compression=no} | 213746 + products_p2 | p | 33404 | 0 | 0 | r | 33409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | t | 0 | + 0 | 0 | 0 | 213746 | | | | | {20} | | {orientation=row,compression=no} | 213746 + products_p3 | p | 33404 | 0 | 0 | r | 33410 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | t | 0 | + 0 | 0 | 0 | 213746 | | | | | {30} | | {orientation=row,compression=no} | 213746 + products_p4 | p | 33404 | 0 | 0 | r | 33411 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | t | 0 | + 0 | 0 | 0 | 213746 | | | | | {40} | | {orientation=row,compression=no} | 213746 + products_p5 | p | 33404 | 0 | 0 | r | 33412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | t | 0 | + 0 | 0 | 0 | 213746 | | | | | {50} | | {orientation=row,compression=no} | 213746 + id_index5 | x | 33419 | 0 | 0 | n | 33441 | 0 | 1 | 0 | 0 | 0 | 0 | 33412 | t | 0 | + 0 | 0 | 0 | 0 | | | | | | | | 0 + products_p1_id_idx_new | x | 33413 | 0 | 0 | n | 33427 | 33426 | 1 | 0 | 0 | 0 | 0 | 33408 | t | 0 | + 0 | 0 | 0 | 0 | | | | | | | | 0 + products_p2_id_idx_new | x | 33413 | 0 | 0 | n | 33428 | 33426 | 1 | 0 | 0 | 0 | 0 | 33409 | t | 0 | + 0 | 0 | 0 | 0 | | | | | | | | 0 + products_p3_id_idx_new | x | 33413 | 0 | 0 | n | 33429 | 33426 | 1 | 0 | 0 | 0 | 0 | 33410 | t | 0 | + 0 | 0 | 0 | 0 | | | | | | | | 0 + products_p4_id_idx_new | x | 33413 | 0 | 0 | n | 33430 | 33426 | 1 | 0 | 0 | 0 | 0 | 33411 | t | 0 | + 0 | 0 | 0 | 0 | | | | | | | | 0 + products_p5_id_idx_new | x | 33413 | 0 | 0 | n | 33431 | 33426 | 1 | 0 | 0 | 0 | 0 | 33412 | t | 0 | + 0 | 0 | 0 | 0 | | | | | | | | 0 + id_index1 | x | 33419 | 0 | 0 | n | 33437 | 0 | 1 | 0 | 0 | 0 | 0 | 33408 | t | 0 | + 0 | 0 | 0 | 0 | | | | | | | | 0 + id_index2 | x | 33419 | 0 | 0 | n | 33438 | 0 | 1 | 0 | 0 | 0 | 0 | 33409 | t | 0 | + 0 | 0 | 0 | 0 | | | | | | | | 0 + id_index3 | x | 33419 | 0 | 0 | n | 33439 | 0 | 1 | 0 | 0 | 0 | 0 | 33410 | t | 0 | + 0 | 0 | 0 | 0 | | | | | | | | 0 + id_index4 | x | 33419 | 0 | 0 | n | 33440 | 0 | 1 | 0 | 0 | 0 | 0 | 33411 | t | 0 | + 0 | 0 | 0 | 0 | | | | | | | | 0 +(16 rows) + +omm=# + +``` + +### **5.删除索引、表和表空间** + +``` +--SQL文本:drop index products_index1; +drop index products_index2; +drop index products_index3;drop table products; +drop tablespace pgtbs1; + +omm=# drop index products_index1; +DROP INDEX +omm=# drop index products_index2; +DROP INDEX +omm=# drop index products_index3; +DROP INDEX +omm=# drop table products; +DROP TABLE +omm=# drop tablespace pgtbs1; +DROP TABLESPACEomm=# + +``` diff --git "a/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\345\210\206\345\214\272\350\241\250\357\274\211.md" "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\345\210\206\345\214\272\350\241\250\357\274\211.md" new file mode 100644 index 0000000000000000000000000000000000000000..53b6198364dcf1c8a806cf0da3db64edf05fc339 --- /dev/null +++ "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\345\210\206\345\214\272\350\241\250\357\274\211.md" @@ -0,0 +1,200 @@ ++++ + +title = "openGauss每日一练(分区表)" + +date = "2022-04-21" + +tags = ["openGauss每日一练(分区表)"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "openGauss每日一练(分区表)" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# openGauss每日一练(分区表) + +本文出处:[https://www.modb.pro/db/218257](https://www.modb.pro/db/218257) + +## 学习地址 + +[https://www.modb.pro/course/133](https://www.modb.pro/course/133) + +## 学习目标 + +**学习openGauss分区表** + +分区表是把逻辑上的一张表根据某种方案分成几张物理块进行存储,这张逻辑上的表称之为分区表,物理块称之为分区。 + +分区表是一张逻辑表,不存储数据,数据实际是存储在分区上的。 + +## 课后作业 + +### **1.创建一个含有5个分区的范围分区表store,在每个分区中插入记录** + +``` +SQL文本: +建表语句 +create table store +( id integer, + name char(8) +) +partition by range (id) +(partition store_p1 values less than (10), + partition store_p2 values less than (20), + partition store_p3 values less than (30), + partition store_p4 values less than (40), + partition store_p5 values less than (50) +); +插入语句 +insert into store select n,'test'||n from generate_series(1,45) n; + +omm=# create table store +omm-# ( id integer, +omm(# name char(8) +omm(# ) +omm-# partition by range (id) +omm-# (partition store_p1 values less than (10), +omm(# partition store_p2 values less than (20), +omm(# partition store_p3 values less than (30), +omm(# partition store_p4 values less than (40), +omm(# partition store_p5 values less than (50) +omm(# ); +CREATE TABLE +omm=# insert into store select n,'test'||n from generate_series(1,45) n; +INSERT 0 45 +omm=# + +``` + +### **2.查看分区1上的数据** + +``` +SQL文本: +select count(*) from store paratition(store_p1); + +omm=# select count(*) from store paratition(store_p1); + count +------- + 9 +(1 row) + +omm=# + +``` + +### **3.重命名分区2** + +``` +SQL文本: +alter table store rename partition store_p2 to store_p2_new; +select relname,parttype,boundaries from pg_partition; + +omm=# alter table store rename partition store_p2 to store_p2_new; +ALTER TABLE +omm=# select relname,parttype,boundaries from pg_partition; + relname | parttype | boundaries +--------------+----------+------------ + store | r | + store_p1 | p | {10} + store_p3 | p | {30} + store_p4 | p | {40} + store_p5 | p | {50} + store_p2_new | p | {20} +(6 rows) + +omm=# +``` + +### **4.删除分区5** + +``` +SQL文本: +alter table store drop partition store_p5; +select relname,parttype,boundaries from pg_partition; + +omm=# alter table store drop partition store_p5; +ALTER TABLE +omm=# select relname,parttype,boundaries from pg_partition; + relname | parttype | boundaries +--------------+----------+------------ + store | r | + store_p1 | p | {10} + store_p3 | p | {30} + store_p4 | p | {40} + store_p2_new | p | {20} +(5 rows) + +omm=# + +``` + +### **5.增加分区6** + +``` +SQL文本: +alter table store add partition store_p6 values less than (60); +select relname,parttype,boundaries from pg_partition; + +omm=# alter table store add partition store_p6 values less than (60); +ALTER TABLE +omm=# select relname,parttype,boundaries from pg_partition; + relname | parttype | boundaries +--------------+----------+------------ + store | r | + store_p1 | p | {10} + store_p3 | p | {30} + store_p4 | p | {40} + store_p2_new | p | {20} + store_p6 | p | {60} +(6 rows) + +omm=# + +``` + +### **6.在系统表pg_partition中查看分区信息** + +``` +SQL文本: +select * from pg_partition; + +omm=# select * from pg_partition; + relname | parttype | parentid | rangenum | intervalnum | partstrategy | relfilenode | reltablespace | relpages | reltuples | relallvisible | reltoastrelid | reltoastidxid | indextblid | indisusable | reldeltarelid | reldeltaidx + | relcudescrelid | relcudescidx | relfrozenxid | intspnum | partkey | intervaltablespace | interval | boundaries | transit | reloptions | relfrozenxid64 +--------------+----------+----------+----------+-------------+--------------+-------------+---------------+----------+-----------+---------------+---------------+---------------+------------+-------------+---------------+------------ +-+----------------+--------------+--------------+----------+---------+--------------------+----------+------------+---------+---------------------------------------------------+---------------- + store | r | 33243 | 0 | 0 | r | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | t | 0 | 0 + | 0 | 0 | 0 | | 1 | | | | | {orientation=row,compression=no,wait_clean_gpi=n} | 0 + store_p1 | p | 33243 | 0 | 0 | r | 33247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | t | 0 | 0 + | 0 | 0 | 193289 | | | | | {10} | | {orientation=row,compression=no} | 193289 + store_p3 | p | 33243 | 0 | 0 | r | 33249 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | t | 0 | 0 + | 0 | 0 | 193289 | | | | | {30} | | {orientation=row,compression=no} | 193289 + store_p4 | p | 33243 | 0 | 0 | r | 33250 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | t | 0 | 0 + | 0 | 0 | 193289 | | | | | {40} | | {orientation=row,compression=no} | 193289 + store_p2_new | p | 33243 | 0 | 0 | r | 33248 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | t | 0 | 0 + | 0 | 0 | 193289 | | | | | {20} | | {orientation=row,compression=no} | 193289 + store_p6 | p | 33243 | 0 | 0 | r | 33252 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | t | 0 | 0 + | 0 | 0 | 193294 | | | | | {60} | | {orientation=row,compression=no} | 193294 +(6 rows) + +omm=# + +``` + +### **7.删除分区表** + +``` +SQL文本: +drop table store; + +omm=# drop table store; +DROP TABLE +omm=# + +``` diff --git "a/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\345\256\232\344\271\211\346\270\270\346\240\207\357\274\211.md" "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\345\256\232\344\271\211\346\270\270\346\240\207\357\274\211.md" new file mode 100644 index 0000000000000000000000000000000000000000..c20586d9cecb86512efea76a520fb1170c9a85d0 --- /dev/null +++ "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\345\256\232\344\271\211\346\270\270\346\240\207\357\274\211.md" @@ -0,0 +1,138 @@ ++++ + +title = "openGauss每日一练(定义游标)" + +date = "2022-04-25" + +tags = ["openGauss每日一练(定义游标)"] + +archives = "2022-04" + +author = "云和恩墨" + +summary ="openGauss每日一练(定义游标)" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# openGauss每日一练(定义游标) + +本文出处:[https://www.modb.pro/db/224157](https://www.modb.pro/db/224157) + +## 学习地址 + +[https://www.modb.pro/course/133](https://www.modb.pro/course/133) + +## 学习目标 + +**学习openGauss定义游标** + +为了处理SQL语句,存储过程进程分配一段内存区域来保存上下文联系,游标是指向上下文区域的句柄或指针。借助游标,存储过程可以控制上下文区域的变化。 + +## 课后作业 + +### **1.创建游标,且使用select子句指定游标返回的行,分别使用FETCH抓取数据,MOVE重定位游标** + +``` +omm=# create table t1(id integer,name varchar(20)); +CREATE TABLE +omm=# insert into t1 values +omm-# (1,'zhao'),(2,'qian'),(3,'sun'),(4,'li'); +INSERT 0 4 +omm=# begin; +BEGIN +omm=# cursor cursor1 for select * from t1 order by id; +DECLARE CURSOR +omm=# fetch forward 1 from cursor1; + id | name +----+------ + 1 | zhao +(1 row) + +omm=# move forward 2 from cursor1; +MOVE 2 +omm=# fetch forward 1 from cursor1; + id | name +----+------ + 4 | li +(1 row) +omm=# +``` + +### **2.在系统视图pg_cursors中查看游标** + +``` +omm=# select * from pg_cursors; + name | statement | is_holdable | is_binary | is_scrollable | creation_time +---------+---------------------------------------------------+-------------+-----------+---------------+------------------------------- + cursor1 | cursor cursor1 for select * from t1 order by id; | f | f | t | 2021-12-27 15:39:12.614502+08 +(1 row) + +omm=# close cursor1; +CLOSE CURSOR +omm=# end; +COMMIT +omm=# select * from pg_cursors; + name | statement | is_holdable | is_binary | is_scrollable | creation_time +------+-----------+-------------+-----------+---------------+--------------- +(0 rows) + +omm=# +``` + +### **3.创建一个使用游标的存储过程** + +``` +omm=# create table company(name varchar(100), loc varchar(100), no integer); +CREATE TABLE +omm=# insert into company values ('macrosoft', 'usa', 001); +INSERT 0 1 +omm=# insert into company values ('oracle', 'usa', 002); +INSERT 0 1 +omm=# insert into company values ('backberry', 'canada', 003); +INSERT 0 1 +omm=# create or replace procedure test_cursor_1 +omm-# as +omm$# company_name varchar(100); +omm$# company_loc varchar(100); +omm$# company_no integer; +omm$# +omm$# cursor c1_all is --cursor without args +omm$# select name, loc, no from company order by 1, 2, 3; +omm$# begin +omm$# if not c1_all%isopen then +omm$# open c1_all; +omm$# end if; +omm$# loop +omm$# fetch c1_all into company_name, company_loc, company_no; +omm$# RAISE INFO 'company_name: %' ,company_name; +omm$# exit when c1_all%notfound; +omm$# end loop; +omm$# if c1_all%isopen then +omm$# close c1_all; +omm$# end if; +omm$# end; +omm$# / +CREATE PROCEDURE +omm=# call test_cursor_1(); +INFO: company_name: backberry +INFO: company_name: macrosoft +INFO: company_name: oracle +INFO: company_name: oracle + test_cursor_1 +--------------- + +(1 row) + +omm=# +``` + +### **4.清理数据** + +``` +omm=# drop table company; +DROP TABLE +omm=# +``` diff --git "a/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\346\224\266\351\233\206\347\273\237\350\256\241\344\277\241\346\201\257\343\200\201\346\211\223\345\215\260\346\211\247\350\241\214\350\256\241\345\210\222\343\200\201\345\236\203\345\234\276\346\224\266\351\233\206\345\222\214checkpoint\357\274\211.md" "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\346\224\266\351\233\206\347\273\237\350\256\241\344\277\241\346\201\257\343\200\201\346\211\223\345\215\260\346\211\247\350\241\214\350\256\241\345\210\222\343\200\201\345\236\203\345\234\276\346\224\266\351\233\206\345\222\214checkpoint\357\274\211.md" new file mode 100644 index 0000000000000000000000000000000000000000..95fdb180a99547be1ed0f158494c0004b6d6b0a2 --- /dev/null +++ "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\346\224\266\351\233\206\347\273\237\350\256\241\344\277\241\346\201\257\343\200\201\346\211\223\345\215\260\346\211\247\350\241\214\350\256\241\345\210\222\343\200\201\345\236\203\345\234\276\346\224\266\351\233\206\345\222\214checkpoint\357\274\211.md" @@ -0,0 +1,188 @@ ++++ + +title = "openGauss每日一练(收集统计信息、打印执行计划、垃圾收集和checkpoint)" + +date = "2022-04-25" + +tags = ["openGauss每日一练(收集统计信息、打印执行计划、垃圾收集和checkpoint)"] + +archives = "2022-04" + +author = "云和恩墨" + +summary ="openGauss每日一练(收集统计信息、打印执行计划、垃圾收集和checkpoint)" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# openGauss每日一练(收集统计信息、打印执行计划、垃圾收集和checkpoint) + +本文出处:[https://www.modb.pro/db/224177](https://www.modb.pro/db/224177) + +## 学习地址 + +[https://www.modb.pro/course/133](https://www.modb.pro/course/133) + +## 学习目标 + +**学习openGauss收集统计信息、打印执行计划、垃圾收集和checkpoint** + +## 课后作业 + +### **1.创建分区表,并用generate_series(1,N)函数对表插入数据** + +``` +omm=# create table store +omm-# ( id integer, +omm(# name char(8) +omm(# ) +omm-# partition by range (id) +omm-# (partition store_p1 values less than (10), +omm(# partition store_p2 values less than (50), +omm(# partition store_p3 values less than (100), +omm(# partition store_p4 values less than (150), +omm(# partition store_p5 values less than (200) +The connection to the server was lost. Attempting reset: +CREATE TABLE +omm=# insert into store select n,'test'||n from generate_series(1,180) n; +INSERT 0 180 +omm=# +``` + +### **2.收集表统计信息** + +``` +omm=# select relname,relpages,reltuples from pg_class where relname='store'; + relname | relpages | reltuples +---------+----------+----------- + store | 0 | 0 +(1 row) + +omm=# analyze verbose store; +INFO: analyzing "public.store"(dn_6001_6002 pid=71262) +INFO: ANALYZE INFO : "store": scanned 1 of 1 pages, containing 9 live rows and 0 dead rows; 9 rows in sample, 9 estimated total rows(dn_6001_6002 pid=71262) +INFO: ANALYZE INFO : "store": scanned 1 of 1 pages, containing 40 live rows and 0 dead rows; 40 rows in sample, 40 estimated total rows(dn_6001_6002 pid=71262) +INFO: ANALYZE INFO : "store": scanned 1 of 1 pages, containing 50 live rows and 0 dead rows; 50 rows in sample, 50 estimated total rows(dn_6001_6002 pid=71262) +INFO: ANALYZE INFO : "store": scanned 1 of 1 pages, containing 50 live rows and 0 dead rows; 50 rows in sample, 50 estimated total rows(dn_6001_6002 pid=71262) +INFO: ANALYZE INFO : "store": scanned 1 of 1 pages, containing 31 live rows and 0 dead rows; 31 rows in sample, 31 estimated total rows(dn_6001_6002 pid=71262) +ANALYZE +omm=# select relname,relpages,reltuples from pg_class where relname='store'; + relname | relpages | reltuples +---------+----------+----------- + store | 5 | 180 +(1 row) + +omm=# +``` + +### **3.显示简单查询的执行计划;建立索引并显示有索引条件的执行计划** + +``` +omm=# explain select * from store; + QUERY PLAN +-------------------------------------------------------------------------- + Partition Iterator (cost=0.00..6.80 rows=180 width=13) + Iterations: 5 + -> Partitioned Seq Scan on store (cost=0.00..6.80 rows=180 width=13) + Selected Partitions: 1..5 +(4 rows) + +omm=#omm=# create index store_index1 on store(id) local +omm-# ( +omm(# partition id_index1, +omm(# partition id_index2, +omm(# partition id_index3, +omm(# partition id_index4, +omm(# partition id_index5 +omm(# ); +CREATE INDEX +omm=# explain select * from store where id=100; + QUERY PLAN +------------------------------------------------------------------------ + Partition Iterator (cost=0.00..3.25 rows=1 width=13) + Iterations: 1 + -> Partitioned Seq Scan on store (cost=0.00..3.25 rows=1 width=13) + Filter: (id = 100) + Selected Partitions: 4 +(5 rows) + +omm=# +``` + +### **4.更新表数据,并做垃圾收集** + +``` +omm=# update store set id=id+1; +UPDATE 180 +omm=# vacuum (verbose,analyze) store; +INFO: vacuuming "public.store"(dn_6001_6002 pid=71262) +INFO: index "store_index1" now contains 17 row versions in 2 pages(dn_6001_6002 pid=71262) +DETAIL: 0 index row versions were removed. +0 index pages have been deleted, 0 are currently reusable. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +INFO: "store": found 0 removable, 17 nonremovable row versions in 1 out of 1 pages(dn_6001_6002 pid=71262) +DETAIL: 9 dead row versions cannot be removed yet. +There were 0 unused item pointers. +0 pages are entirely empty. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +INFO: vacuuming "public.store"(dn_6001_6002 pid=71262) +INFO: index "store_index1" now contains 80 row versions in 2 pages(dn_6001_6002 pid=71262) +DETAIL: 0 index row versions were removed. +0 index pages have been deleted, 0 are currently reusable. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +INFO: "store": found 0 removable, 80 nonremovable row versions in 1 out of 1 pages(dn_6001_6002 pid=71262) +DETAIL: 40 dead row versions cannot be removed yet. +There were 0 unused item pointers. +0 pages are entirely empty. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +INFO: vacuuming "public.store"(dn_6001_6002 pid=71262) +INFO: index "store_index1" now contains 100 row versions in 2 pages(dn_6001_6002 pid=71262) +DETAIL: 0 index row versions were removed. +0 index pages have been deleted, 0 are currently reusable. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +INFO: "store": found 0 removable, 100 nonremovable row versions in 1 out of 1 pages(dn_6001_6002 pid=71262) +DETAIL: 50 dead row versions cannot be removed yet. +There were 0 unused item pointers. +0 pages are entirely empty. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +INFO: vacuuming "public.store"(dn_6001_6002 pid=71262) +INFO: index "store_index1" now contains 100 row versions in 2 pages(dn_6001_6002 pid=71262) +DETAIL: 0 index row versions were removed. +0 index pages have been deleted, 0 are currently reusable. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +INFO: "store": found 0 removable, 100 nonremovable row versions in 1 out of 1 pages(dn_6001_6002 pid=71262) +DETAIL: 50 dead row versions cannot be removed yet. +There were 0 unused item pointers. +0 pages are entirely empty. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +INFO: vacuuming "public.store"(dn_6001_6002 pid=71262) +INFO: index "store_index1" now contains 63 row versions in 2 pages(dn_6001_6002 pid=71262) +DETAIL: 0 index row versions were removed. +0 index pages have been deleted, 0 are currently reusable. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +INFO: "store": found 0 removable, 63 nonremovable row versions in 1 out of 1 pages(dn_6001_6002 pid=71262) +DETAIL: 31 dead row versions cannot be removed yet. +There were 0 unused item pointers. +0 pages are entirely empty. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +INFO: analyzing "public.store"(dn_6001_6002 pid=71262) +INFO: ANALYZE INFO : "store": scanned 1 of 1 pages, containing 8 live rows and 9 dead rows; 8 rows in sample, 8 estimated total rows(dn_6001_6002 pid=71262) +INFO: ANALYZE INFO : "store": scanned 1 of 1 pages, containing 40 live rows and 40 dead rows; 40 rows in sample, 40 estimated total rows(dn_6001_6002 pid=71262) +INFO: ANALYZE INFO : "store": scanned 1 of 1 pages, containing 50 live rows and 50 dead rows; 50 rows in sample, 50 estimated total rows(dn_6001_6002 pid=71262) +INFO: ANALYZE INFO : "store": scanned 1 of 1 pages, containing 50 live rows and 50 dead rows; 50 rows in sample, 50 estimated total rows(dn_6001_6002 pid=71262) +INFO: ANALYZE INFO : "store": scanned 1 of 1 pages, containing 32 live rows and 31 dead rows; 32 rows in sample, 32 estimated total rows(dn_6001_6002 pid=71262) +VACUUM +omm=# +``` + +### **5.清理数据** + +``` +omm=# drop index store_index1; +DROP INDEX +omm=# drop table store; +DROP TABLE +omm=# +``` diff --git "a/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\346\231\256\351\200\232\350\241\250\347\264\242\345\274\225\357\274\211.md" "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\346\231\256\351\200\232\350\241\250\347\264\242\345\274\225\357\274\211.md" new file mode 100644 index 0000000000000000000000000000000000000000..e194eec67ffe6f1c58f4386aaa2429daa6a01b85 --- /dev/null +++ "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\346\231\256\351\200\232\350\241\250\347\264\242\345\274\225\357\274\211.md" @@ -0,0 +1,152 @@ ++++ + +title = "openGauss每日一练(普通表索引)" + +date = "2022-04-21" + +tags = ["openGauss每日一练(普通表索引)"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "openGauss每日一练(普通表索引)" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# openGauss每日一练(普通表索引) + +本文出处:[https://www.modb.pro/db/218262](https://www.modb.pro/db/218262) + +## 学习地址 + +[https://www.modb.pro/course/133](https://www.modb.pro/course/133) + +## 学习目标 + +**学习openGauss普通表索引** + +索引是对数据库表中一列或多列的值进行排序的一种结构,使用索引可快速访问数据库表中的特定信息 + +## 课后作业 + +### **1.创建表products, 分别为表创建一个unique索引1,指定b-tree索引2和表达式索引3** + +``` +--SQL文本: +create table products +(product_id integer, + product_name char(30), + category char(20) +); +create unique index idx_products_id on products(product_id); +create index idx_products_name on products using btree(product_name); +create index idx_products_category on products(category)where category='toys'; + +omm=# create table products +omm-# (product_id integer, +omm(# product_name char(30), +omm(# category char(20) +omm(# ); +CREATE TABLE +omm=# create unique index idx_products_id on products(product_id); +CREATE INDEX +omm=# create index idx_products_name on products using btree(product_name); +CREATE INDEX +omm=# create index idx_products_category on products(category)where category='toys'; +CREATE INDEX +omm=# + +``` + +### **2.设置索引1不可用,修改索引2的表空间,重命名索引3** + +``` +--SQL文本: +alter index idx_products_id unusable; +create tablespace tspc1 relative location 'tbs/tspc1'; +alter index idx_products_name set tablespace tspc1; +alter index idx_products_category rename to idx_products_category_new; + +omm=# alter index idx_products_id unusable; +ALTER INDEX +omm=# create tablespace tspc1 relative location 'tbs/tspc1'; +CREATE TABLESPACE +omm=# alter index idx_products_name set tablespace tspc1; +ALTER INDEX +omm=# alter index idx_products_category rename to idx_products_category_new; +ALTER INDEX +omm=# + +``` + +### **3.重建索引2和products的所有索引** + +``` +--SQL文本: +alter index idx_products_name rebuild; +reindex index idx_products_name; +reindex table products; + +omm=# alter index idx_products_name rebuild; +REINDEX +omm=# reindex index idx_products_name; +REINDEX +omm=# reindex table products; +REINDEX +omm=# + +``` + +### **4.使用\d+和系统视图pg_indexes查看索引信息** + +``` +--SQL文本: +\d+ +select * from pg_indexes where tablename='products'; + +omm=# \d+ + List of relations + Schema | Name | Type | Owner | Size | Storage | Description +--------+----------+-------+-------+---------+----------------------------------+------------- + public | products | table | omm | 0 bytes | {orientation=row,compression=no} | +(1 rows) + +omm=# select * from pg_indexes where tablename='products'; + schemaname | tablename | indexname | tablespace | indexdef +------------+-----------+---------------------------+------------+----------------------------------------------------------------------------------------------------------------------------------- + public | products | idx_products_id | | CREATE UNIQUE INDEX idx_products_id ON products USING btree (product_id) TABLESPACE pg_default + public | products | idx_products_name | tspc1 | CREATE INDEX idx_products_name ON products USING btree (product_name) TABLESPACE tspc1 + public | products | idx_products_category_new | | CREATE INDEX idx_products_category_new ON products USING btree (category) TABLESPACE pg_default WHERE (category = 'toys'::bpchar) +(3 rows) + +omm=# + +``` + +### **5.删除索引、表和表空间** + +``` +--SQL文本: +drop index idx_products_id; +drop index idx_products_name; +drop index idx_products_category_new; +drop table products; +drop tablespace tspc1; + +omm=# drop index idx_products_id; +DROP INDEX +omm=# drop index idx_products_name; +DROP INDEX +omm=# drop index idx_products_category_new; +DROP INDEX +omm=# drop table products; +DROP TABLE +omm=# drop tablespace tspc1; +DROP TABLESPACE +omm=# + +``` diff --git "a/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\350\241\214\345\255\230\345\222\214\345\210\227\345\255\230).md" "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\350\241\214\345\255\230\345\222\214\345\210\227\345\255\230).md" new file mode 100644 index 0000000000000000000000000000000000000000..9c4e2353f9e150501cd831784a3a5c70eb134b4b --- /dev/null +++ "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\350\241\214\345\255\230\345\222\214\345\210\227\345\255\230).md" @@ -0,0 +1,97 @@ ++++ + +title = "openGauss每日一练(行存和列存)" + +date = "2022-04-25" + +tags = ["openGauss每日一练(行存和列存)"] + +archives = "2022-04" + +author = "云和恩墨" + +summary ="openGauss每日一练(行存和列存)" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# openGauss每日一练(行存和列存) + +本文出处:[https://www.modb.pro/db/224180](https://www.modb.pro/db/224180) + +## 学习地址 + +[https://www.modb.pro/course/133](https://www.modb.pro/course/133) + +## 学习目标 + +**学习openGauss存储模型-行存和列存** + +行存储是指将表按行存储到硬盘分区上,列存储是指将表按列存储到硬盘分区上。默认情况下,创建的表为行存储。 + +行、列存储模型各有优劣,通常用于TP场景的数据库,默认使用行存储,仅对执行复杂查询且数据量大的AP场景时,才使用列存储 + +## 课后作业 + +### **1.创建行存表和列存表,并批量插入10万条数据(行存表和列存表数据相同)** + +``` +omm=# create table test1 (id integer,name varchar2(20)); +CREATE TABLE ^ +omm=# insert into test1 select n,'test'||n from generate_series(1,100000) n; +INSERT 0 100000 +omm=# create table test2 (id integer,name varchar2(20)) with (orientation = column); +CREATE TABLE +omm=# insert into test2 select * from test1; +INSERT 0 100000 +omm=# +``` + +### **2.对比行存表和列存表空间大小** + +``` +omm=# \d+ + List of relations + Schema | Name | Type | Owner | Size | Storage | Description +--------+----------------------+-------+-------+------------+--------------------------------------+------------- + public | test1 | table | omm | 4352 kB | {orientation=row,compression=no} | + public | test2 | table | omm | 648 kB | {orientation=column,compression=low} | +(2 rows) + +omm=# +``` + +### **3.对比查询一列和插入一行的速度** + +``` +omm=# explain analyze insert into test1 values(1,'zhang'); + QUERY PLAN +--------------------------------------------------------------------------------------------- + [Bypass] + Insert on test1 (cost=0.00..0.01 rows=1 width=0) (actual time=0.067..0.068 rows=1 loops=1) + -> Result (cost=0.00..0.01 rows=1 width=0) (actual time=0.000..0.001 rows=1 loops=1) + Total runtime: 0.119 ms +(4 rows) + +omm=# explain analyze insert into test2 values(1,'zhang'); + QUERY PLAN +--------------------------------------------------------------------------------------------- + Insert on test2 (cost=0.00..0.01 rows=1 width=0) (actual time=0.216..0.217 rows=1 loops=1) + -> Result (cost=0.00..0.01 rows=1 width=0) (actual time=0.001..0.001 rows=1 loops=1) + Total runtime: 0.262 ms +(3 rows) + +omm=# +``` + +### **4.清理数据** + +``` +omm=# drop table test1; +DROP TABLE +omm=# drop table test2; +DROP TABLE +omm=# +``` diff --git "a/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\350\247\206\345\233\276\357\274\211.md" "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\350\247\206\345\233\276\357\274\211.md" new file mode 100644 index 0000000000000000000000000000000000000000..25ed89194c2f158a6bceed365eaa5545b9bff77a --- /dev/null +++ "b/content/zh/post/enmo/openGauss\346\257\217\346\227\245\344\270\200\347\273\203\357\274\210\350\247\206\345\233\276\357\274\211.md" @@ -0,0 +1,120 @@ ++++ + +title = "openGauss每日一练(视图)" + +date = "2022-04-21" + +tags = ["openGauss每日一练(视图)"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "openGauss每日一练(视图)" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# openGauss每日一练(视图) + +本文出处:[https://www.modb.pro/db/222619](https://www.modb.pro/db/222619) + +## 学习地址 + +[https://www.modb.pro/course/133](https://www.modb.pro/course/133) + +## 学习目标 + +**学习openGauss视图** + +视图与基本表不同,是一个虚拟的表。数据库中仅存放视图的定义,而不存放视图对应的数据,这些数据仍存放在原来的基本表中。 + +## 课后作业 + +### **1.为系统表PG_DATABASE创建视图,重命名视图并修改owner为jim,** + +``` +--SQL文本 +create view v_pg_database as select * from pg_database; +alter view v_pg_database rename to v_pg_database_new +create user jim identified by 'jim@1234'; +alter view v_pg_database_new owner to jim; + +omm=# create view v_pg_database as select * from pg_database; +CREATE VIEW +omm=# alter view v_pg_database rename to v_pg_database_new; +ALTER VIEW +omm=# create user jim identified by 'jim@1234'; +CREATE ROLE +omm=# alter view v_pg_database_new owner to jim; +ALTER VIEW +omm=# + +``` + +### **2.创建一个用户表student,并在用户表上创建视图,修改视图schema;** + +``` +--SQL文本 +create table student +(id integer, +name char(10) +); +create view v_student as select * from student; +create schema schema1; +alter view v_student set schema schema1; + +omm=# create table student +omm-# (id integer, +omm(# name char(10) +omm(# ); +CREATE TABLE ^ +omm=# create view v_student as select * from student; +CREATE VIEW +omm=# create schema schema1; +CREATE SCHEMA +omm=# alter view v_student set schema schema1; +ALTER VIEW +omm=# + +``` + +### **3.使用pg_views查看视图信息** + +``` +--SQL文本 +select schemaname,viewname,viewowner from pg_views where viewname in ('v_pg_database_new','v_student'); + +omm=# select schemaname,viewname,viewowner from pg_views where viewname in ('v_pg_database_new','v_student'); + schemaname | viewname | viewowner +------------+-------------------+----------- + public | v_pg_database_new | jim + schema1 | v_student | omm +(2 rows) + +omm=# + +``` + +### **4.删除视图、表、用户** + +``` +--SQL文本 +drop view v_pg_database_new; +drop view schema1.v_student; +drop table student; +drop user jim; + +omm=# drop view v_pg_database_new; +DROP VIEW +omm=# drop view schema1.v_student; +DROP VIEW +omm=# drop table student; +DROP TABLE +omm=# drop user jim; +DROP ROLE +omm=# + +``` diff --git "a/content/zh/post/enmo/openGauss\350\247\246\345\217\221\345\231\250.md" "b/content/zh/post/enmo/openGauss\350\247\246\345\217\221\345\231\250.md" new file mode 100644 index 0000000000000000000000000000000000000000..07a4943d0ae8012745a9ea4b57cbe4b8e9cf5bcc --- /dev/null +++ "b/content/zh/post/enmo/openGauss\350\247\246\345\217\221\345\231\250.md" @@ -0,0 +1,183 @@ ++++ + +title = "openGauss触发器" + +date = "2022-04-22" + +tags = ["openGauss触发器"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "openGauss触发器" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# openGauss触发器 + +本文出处:[https://www.modb.pro/db/222642](https://www.modb.pro/db/222642) + +## 学习地址 + +[https://www.modb.pro/course/133](https://www.modb.pro/course/133) + +## 学习目标 + +**学习openGauss触发器** + +触发器是对应用动作的响应机制,当应用对一个对象发起DML操作时,就会产生一个触发事件(Event),如果该对象上拥有该事件对应的触发器,那么就会检查触发器的触发条件(Condition)是否满足,如果满足触发条件,那么就会执行触发动作(Action) + +## 课后作业 + +### **1.创建源表和触发表,在源表上创建insert触发器,创建操作触发表的触发器函数** + +``` +omm=# CREATE TABLE test_trigger_src_tbl(id1 INT, id2 INT, id3 INT) ; +CREATE TABLE +omm=# CREATE TABLE test_trigger_des_tbl(id1 INT, id2 INT, id3 INT); +CREATE TABLE +omm=# CREATE OR REPLACE FUNCTION tri_insert_func() RETURNS TRIGGER AS +omm-# $$ +omm$# DECLARE +omm$# BEGIN +omm$# INSERT INTO test_trigger_des_tbl VALUES(NEW.id1, NEW.id2, NEW.id3); +omm$# RETURN NEW; +omm$# END +omm$# $$ LANGUAGE PLPGSQL; +CREATE FUNCTION +omm=# CREATE TRIGGER insert_trigger +omm-# BEFORE INSERT ON test_trigger_src_tbl +omm-# FOR EACH ROW +omm-# EXECUTE PROCEDURE tri_insert_func(); +CREATE TRIGGER +omm=# +omm=# CREATE OR REPLACE FUNCTION tri_update_func() RETURNS TRIGGER AS +omm-# $$ +omm$# DECLARE +omm$# BEGIN +omm$# UPDATE test_trigger_des_tbl SET id3 = NEW.id3 WHERE id1=OLD.id1; +omm$# RETURN OLD; +omm$# END +omm$# $$ LANGUAGE PLPGSQL; +CREATE FUNCTION +omm=# CREATE TRIGGER update_trigger +omm-# AFTER UPDATE ON test_trigger_des_tbl +omm-# FOR EACH ROW +omm-# EXECUTE PROCEDURE tri_update_func(); +CREATE TRIGGER +omm=# CREATE OR REPLACE FUNCTION TRI_DELETE_FUNC() RETURNS TRIGGER AS +omm-# $$ +omm$# DECLARE +omm$# BEGIN +omm$# DELETE FROM test_trigger_des_tbl WHERE id1=OLD.id1; +omm$# RETURN OLD; +omm$# END +omm$# $$ LANGUAGE PLPGSQL; +CREATE FUNCTION +omm=# CREATE TRIGGER delete_trigger +omm-# BEFORE DELETE ON test_trigger_des_tbl +omm-# FOR EACH ROW +omm-# EXECUTE PROCEDURE tri_delete_func(); +CREATE TRIGGER +omm=# +``` + +### **2.在源表上执行insert操作,查看触发操作是否生效;禁用触发器后,再次查看触发操作是否生效** + +``` +omm=# INSERT INTO test_trigger_src_tbl VALUES(100,200,300); +INSERT 0 1 +omm=# SELECT * FROM test_trigger_src_tbl; + id1 | id2 | id3 +-----+-----+----- + 100 | 200 | 300 +(1 row) + +omm=# SELECT * FROM test_trigger_des_tbl; + id1 | id2 | id3 +-----+-----+----- + 100 | 200 | 300 +(1 row) + +omm=# +omm=# ALTER TABLE test_trigger_src_tbl DISABLE TRIGGER insert_trigger; +ALTER TABLE +omm=# INSERT INTO test_trigger_src_tbl VALUES(100,200,300); +INSERT 0 1 +omm=# SELECT * FROM test_trigger_src_tbl; + id1 | id2 | id3 +-----+-----+----- + 100 | 200 | 300 + 100 | 200 | 300 +(2 rows) + +omm=# SELECT * FROM test_trigger_des_tbl; + id1 | id2 | id3 +-----+-----+----- + 100 | 200 | 300 +(1 row) + +omm=# +``` + +### **3.使用系统表PG_TRIGGER和\dS+查看触发器** + +``` +omm=# INSERT INTO test_trigger_src_tbl VALUES(100,200,300); +INSERT 0 1 +omm=# SELECT * FROM test_trigger_src_tbl; + id1 | id2 | id3 +-----+-----+----- + 100 | 200 | 300 +(1 row) + +omm=# SELECT * FROM test_trigger_des_tbl; + id1 | id2 | id3 +-----+-----+----- + 100 | 200 | 300 +(1 row) + +omm=# +omm=# ALTER TABLE test_trigger_src_tbl DISABLE TRIGGER insert_trigger; +ALTER TABLE +omm=# INSERT INTO test_trigger_src_tbl VALUES(100,200,300); +INSERT 0 1 +omm=# SELECT * FROM test_trigger_src_tbl; + id1 | id2 | id3 +-----+-----+----- + 100 | 200 | 300 + 100 | 200 | 300 +(2 rows) + +omm=# SELECT * FROM test_trigger_des_tbl; + id1 | id2 | id3 +-----+-----+----- + 100 | 200 | 300 +(1 row) + +omm=# +``` + +### **4.重命名触发器** + +``` +omm=# ALTER TRIGGER delete_trigger ON test_trigger_des_tbl RENAME TO delete_trigger_renamed; +ALTER TRIGGER +omm=# +``` + +### **5.删除触发器** + +``` +omm=# DROP TRIGGER insert_trigger ON test_trigger_src_tbl; +DROP TRIGGER +omm=# DROP TRIGGER update_trigger ON test_trigger_des_tbl; +DROP TRIGGER +omm=# DROP TRIGGER delete_trigger_renamed ON test_trigger_des_tbl; +DROP TRIGGER +omm=# +``` diff --git "a/content/zh/post/enmo/opengauss2.1\345\215\207\347\272\247\345\210\260opengauss3.0\347\256\200\345\215\225\346\214\207\345\215\227.md" "b/content/zh/post/enmo/opengauss2.1\345\215\207\347\272\247\345\210\260opengauss3.0\347\256\200\345\215\225\346\214\207\345\215\227.md" new file mode 100644 index 0000000000000000000000000000000000000000..49cf6545844760c3b4c546abbeb107643c6f62d5 --- /dev/null +++ "b/content/zh/post/enmo/opengauss2.1\345\215\207\347\272\247\345\210\260opengauss3.0\347\256\200\345\215\225\346\214\207\345\215\227.md" @@ -0,0 +1,239 @@ ++++ + +title = "opengauss2.1升级到opengauss3.0简单指南" + +date = "2022-04-15" + +tags = ["opengauss2.1升级到opengauss3.0简单指南"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "opengauss2.1升级到opengauss3.0简单指南" + +img = "/zh/post/enmo/title/img6.png" + +times = "10:20" ++++ + +# opengauss2.1升级到opengauss3.0简单指南 + +本文出处:[https://www.modb.pro/db/391240](https://www.modb.pro/db/391240) + +
+ +本文记录升级中主要步骤,生产中升级需要做很多的准备工作,主要步骤具体参考官方的升级指南https://opengauss.org/zh/blogs/blogs.html?post/shine/opengauss%E5%8D%87%E7%BA%A7%E6%8C%87%E5%AF%BC%E4%B9%A6/ + +## 1.root用户登录节点,创建升级目录 + +```sql +[zf@mogdb-kernel-0003 dblink]$ gsql -d postgres -p 15400 -r +gsql ((openGauss 2.1.0 build 590b0f8e) compiled at 2021-09-30 14:29:27 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. +[root@mogdb-kernel-0003 dblink]$ mkdir -p /opt/software/gaussdb_upgrade +[root@mogdb-kernel-0003 dblink]$ cd /opt/software/gaussdb_upgrade +[root@mogdb-kernel-0003 gaussdb_upgrade]$ ls +[root@mogdb-kernel-0003 gaussdb_upgrade]$ + +``` + +## 2.下载升级包,并解压 + +下载地址:https://opengauss.org/zh/download.html + +```sql +[root@mogdb-kernel-0003 gaussdb_upgrade]# wget https://opengauss.obs.cn-south-1.myhuaweicloud.com/3.0.0/arm/openGauss-3.0.0-openEuler-64bit-all.tar.gz +openGauss-3.0.0-openEuler-64bit-all.tar.gz 100%[======================================================================================>] 107.10M 2.63MB/s in 27s +[root@mogdb-kernel-0003 gaussdb_upgrade]# ls +openGauss-3.0.0-openEuler-64bit-all.tar.gz +[root@mogdb-kernel-0003 gaussdb_upgrade]# tar -xf openGauss-3.0.0-openEuler-64bit-all.tar.gz +[root@mogdb-kernel-0003 gaussdb_upgrade]# ls +openGauss-3.0.0-openEuler-64bit-all.tar.gz openGauss-3.0.0-openEuler-64bit-om.sha256 openGauss-3.0.0-openEuler-64bit.tar.bz2 +openGauss-3.0.0-openEuler-64bit-cm.sha256 openGauss-3.0.0-openEuler-64bit-om.tar.gz upgrade_sql.sha256 +openGauss-3.0.0-openEuler-64bit-cm.tar.gz openGauss-3.0.0-openEuler-64bit.sha256 upgrade_sql.tar.gz +[root@mogdb-kernel-0003 gaussdb_upgrade]# tar -xf openGauss-3.0.0-openEuler-64bit-om.tar.gz + +``` + +## 3.进去升级目录,执行升级安装 + +```sql +cd /opt/software/gaussdb_upgrade/script +[root@mogdb-kernel-0003 script]# ./gs_preinstall -U zf -G zf -X /data1/softwarezf/mogdb/clusterconfig.xml +Parsing the configuration file. +Successfully parsed the configuration file. +Installing the tools on the local node. +Successfully installed the tools on the local node. +Setting host ip env +Successfully set host ip env. +Are you sure you want to create the user[zf] (yes/no)? yes +Preparing SSH service. +Successfully prepared SSH service. +Checking OS software. +Successfully check os software. +Checking OS version. +Successfully checked OS version. +Creating cluster's path. +Successfully created cluster's path. +Set and check OS parameter. +Setting OS parameters. +Successfully set OS parameters. +Warning: Installation environment contains some warning messages. +Please get more details by "/opt/software/gaussdb_upgrade/script/gs_checkos -i A -h mogdb-kernel-0003 --detail". +Set and check OS parameter completed. +Preparing CRON service. +Successfully prepared CRON service. +Setting user environmental variables. +Successfully set user environmental variables. +Setting the dynamic link library. +Successfully set the dynamic link library. +Setting Core file +Successfully set core path. +Setting pssh path +Successfully set pssh path. +Setting Cgroup. +Successfully set Cgroup. +Set ARM Optimization. +Successfully set ARM Optimization. +Fixing server package owner. +Setting finish flag. +Successfully set finish flag. +Preinstallation succeeded. +``` + +## 4.执行就地升级或者灰度升级 + +就地升级:升级期间需停止业务进行,一次性升级所有节点。 + +``` +gs_upgradectl -t auto-upgrade -X /data1/softwarezf/mogdb/clusterconfig.xml +``` + +灰度升级:灰度升级支持全业务操作,也是一次性升级所有节点。 + +``` +gs_upgradectl -t auto-upgrade -X /opt/software/GaussDB_Kernel/clusterconfig.xml --grey +``` + +切换至数据库用户,我这里是zf,执行升级操作,这里采用就地升级 + +```sql +[zf@mogdb-kernel-0003 ~]$ gs_upgradectl -t auto-upgrade -X /data1/softwarezf/mogdb/clusterconfig.xml +Static configuration matched with old static configuration files. +Performing inplace rollback. +Rollback succeeded. +Checking upgrade environment. +Successfully checked upgrade environment. +Successfully started cluster. +Start to do health check. +Successfully checked cluster status. +Backing up current application and configurations. +Successfully backed up current application and configurations. +Stop cluster with gs_om successfully. +Backing up cluster configuration. +Successfully backup hotpatch config file. +Successfully backed up cluster configuration. +Installing new binary. +Restoring cluster configuration. +Successfully restored cluster configuration. +Successfully started cluster. +Stop cluster with gs_om successfully. +Modifying the socket path. +Successfully modified socket path. +NOTICE: Failed to set upgrade_mode to 1, please set it manually. +Successfully started cluster. +copy certs from /data1/mogdbzf/app_compiled to /data1/mogdbzf/app_02c14696. +Successfully copy certs from /data1/mogdbzf/app_compiled to /data1/mogdbzf/app_02c14696. +Stop cluster with gs_om successfully. +Switch symbolic link to new binary directory. +Successfully switch symbolic link to new binary directory. +Successfully started cluster. +Stop cluster with gs_om successfully. +Successfully started cluster. +Waiting for the cluster status to become normal. +. +The cluster status is normal. +Start to do health check. +Successfully checked cluster status. +Upgrade main process has been finished, user can do some check now. +Once the check done, please execute following command to commit upgrade: + + gs_upgradectl -t commit-upgrade -X /data1/softwarezf/mogdb/clusterconfig.xml +``` + +## 5.检查升级是否成功 + +```sql +[zf@mogdb-kernel-0003 ~]$ gsql -d postgres -p 15400 -r +gsql ((openGauss 3.0.0 build 02c14696) compiled at 2022-04-01 18:12:00 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. +openGauss=# + +``` + +## 6.进行提交升级或者回滚 + +一旦提交操作完成,则不能再执行回滚操作。 + +```sql +gs_upgradectl -t commit-upgrade -X /data1/softwarezf/mogdb/clusterconfig.xml +[zf@mogdb-kernel-0003 ~]$ gs_upgradectl -t commit-upgrade -X /data1/softwarezf/mogdb/clusterconfig.xml +NOTICE: Start to commit binary upgrade. +Start to check whether can be committed. +Can be committed. +Start to set commit flag. +Set commit flag succeeded. +Start to do operations that cannot be rollback. +Cancel the upgrade status succeeded. +Start to clean temp files for upgrade. +Clean up backup catalog files. +Successfully cleaned old install path. +Stop cluster with gs_om successfully. +Successfully started cluster. +Clean temp files for upgrade succeeded. +NOTICE: Commit binary upgrade succeeded. +``` + +回滚 + +``` +[zf@mogdb-kernel-0003 ~]$ gs_upgradectl -t auto-rollback -X /data1/softwarezf/mogdb/clusterconfig.xml +Static configuration matched with old static configuration files. +Performing inplace rollback. +Checking static configuration files. +Successfully checked static configuration files. +Successfully started cluster. +Restoring cluster configuration. +Successfully rollback hotpatch config file. +Successfully restored cluster configuration. +Start roll back CM instance. +Switch symbolic link to old binary directory. +Successfully switch symbolic link to old binary directory. +Successfully started cluster. +Stop cluster with gs_om successfully. +Restoring application and configurations. +Successfully restored application and configuration. +Restoring cluster configuration. +Successfully rollback hotpatch config file. +Successfully restored cluster configuration. +Clean up backup catalog files. +Successfully started cluster. +Successfully cleaned new install path. +Rollback succeeded. +[zf@mogdb-kernel-0003 ~]$ gsql -d postgres -p 15400 -r +gsql ((openGauss 2.1.0 build 590b0f8e) compiled at 2021-09-30 14:29:27 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +openGauss=# + +``` + +强制回滚,如果数据库异常,可强制回滚 + +``` + gs_upgradectl -t auto-rollback -X /opt/software/GaussDB_Kernel/clusterconfig.xml +``` diff --git a/content/zh/post/enmo/title/img.png b/content/zh/post/enmo/title/img.png new file mode 100644 index 0000000000000000000000000000000000000000..86a420b92fb8289658d807d49f137b6d13862f6d Binary files /dev/null and b/content/zh/post/enmo/title/img.png differ diff --git a/content/zh/post/enmo/title/img6.png b/content/zh/post/enmo/title/img6.png new file mode 100644 index 0000000000000000000000000000000000000000..2ddddfa2858d77999b4cfec8e97e4f29ac0cab79 Binary files /dev/null and b/content/zh/post/enmo/title/img6.png differ diff --git "a/content/zh/post/enmo/\345\205\274\345\256\271\346\250\241\345\274\217\344\270\213\345\257\274\350\207\264\346\225\260\345\200\274\347\261\273\345\236\213\345\217\221\347\224\237\351\232\220\345\274\217\350\275\254\346\215\242\357\274\214SQL\345\234\250\347\224\237\344\272\247\344\270\212\346\227\240\346\263\225\346\255\243\345\270\270\344\275\277\347\224\250\346\241\210\344\276\213.md" "b/content/zh/post/enmo/\345\205\274\345\256\271\346\250\241\345\274\217\344\270\213\345\257\274\350\207\264\346\225\260\345\200\274\347\261\273\345\236\213\345\217\221\347\224\237\351\232\220\345\274\217\350\275\254\346\215\242\357\274\214SQL\345\234\250\347\224\237\344\272\247\344\270\212\346\227\240\346\263\225\346\255\243\345\270\270\344\275\277\347\224\250\346\241\210\344\276\213.md" new file mode 100644 index 0000000000000000000000000000000000000000..db9448f4f09c81f5eb4ed6ffbc6c0f8a93372a68 --- /dev/null +++ "b/content/zh/post/enmo/\345\205\274\345\256\271\346\250\241\345\274\217\344\270\213\345\257\274\350\207\264\346\225\260\345\200\274\347\261\273\345\236\213\345\217\221\347\224\237\351\232\220\345\274\217\350\275\254\346\215\242\357\274\214SQL\345\234\250\347\224\237\344\272\247\344\270\212\346\227\240\346\263\225\346\255\243\345\270\270\344\275\277\347\224\250\346\241\210\344\276\213.md" @@ -0,0 +1,88 @@ ++++ + +title = "兼容模式下导致数值类型发生隐式转换,SQL在生产上无法正常使用案例" + +date = "2022-05-18" + +tags = ["兼容模式下导致数值类型发生隐式转换,SQL在生产上无法正常使用案例"] + +archives = "2022-05" + +author = "云和恩墨" + +summary = "兼容模式下导致数值类型发生隐式转换,SQL在生产上无法正常使用案例" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# 兼容模式下导致数值类型发生隐式转换,SQL在生产上无法正常使用案例 + +本文出处:[https://www.modb.pro/db/403148](https://www.modb.pro/db/403148) + +基于MogDB版本V2.0.1 + +## 问题现象 + +厂商研发描述Insert SQL在生产上无法执行,而测试环境中同版本的数据库该SQL可以正常运行。 + +检查SQL后,发现是很简单的insert into values语句,故障点是将 ‘’ 值插入到了numeric数据类型的字段中,提示“invalid input syntax for type numeric”,中断SQL执行。一切都很正常。 + +但是之后跟研发沟通后,在研发的测试环境中, ‘’ 还就真的插入到了numeric中,而且语句也是简单insert into values,没有做任何数据类型转换。 + +## 场景复原 + +模拟现场如下: + +``` +create table t ( + id int, + trade_time timestamp(0) without time zone, + position numeric(24,2)); + +insert into t(id,trade_time,position) + values(1,to_date('2022-05-13 11:05:00','YYYY-MM-DD HH24:MI:SS'),''); + +ERROR: invalid input syntax for type numeric: "" +LINE 2: ...,to_date('2022-05-13 11:05:00','YYYY-MM-DD HH24:MI:SS'),''); + ^ +CONTEXT: referenced column: position +``` + +![图片.png](../images/20220513-706c424f-3e3b-4305-8798-706921389979.png) + +## 处理思路 + +第一思路是是否发生了自动或者隐式转换,但是翻了整个手册,还是没有发现线索,由于应用还在测试,问题不是很急,当天下午就处理别的问题了,在下班的时候重新复盘一下故障现象(不得不说上下班真的是头脑风暴的好时间啊),对将这种字符当做数值型处理隐约有些熟悉,回家闷头翻书,果然发现,在Mysql数据库中,对于一个numeric类型字段,当传入一个字符串时,会自动转换为0。 + +对应到Mogdb数据库,Mogdb数据库本身支持多数据库兼容模式,兼容模式包括Oracle,MySQL,PostgreSQL。由于测试环境的Mogdb数据库都是PG兼容模式,形成了了思维惯性。一直以为故障时由于发生隐式转换导致的。 + +在测试环境下,重新建立了一个MySQL兼容库,验证了一下,确认’'可以被插入到numeric字段中,并显示为0。 + +## 场景复现 + +``` +create database db_mysql DBCOMPATIBILITY='B'; + +select datname,datcompatibility from pg_database; + +\c db_mysql +create table t ( + id int, + trade_time timestamp(0) without time zone, + position numeric(24,2)); +insert into t(id,trade_time,position) + values(1,to_date('2022-05-13 11:05:00','YYYY-MM-DD HH24:MI:SS'),''); + +select * from t; + +``` + +![图片.png](../images/20220513-9759cb7a-7d71-46af-9ff9-bed99d762a0b.png) + +## 总结 + +正好昨天杨明翰杨老师的恩墨直播培训提到了这里,杨老师的分析更加清晰,截图奉上: + +![图片.png](../images/20220513-2acdcd6a-b6f8-49fb-b718-9196894b7011.png) diff --git "a/content/zh/post/enmo/\345\256\232\344\271\211\345\255\230\345\202\250\350\277\207\347\250\213\345\222\214\345\207\275\346\225\260.md" "b/content/zh/post/enmo/\345\256\232\344\271\211\345\255\230\345\202\250\350\277\207\347\250\213\345\222\214\345\207\275\346\225\260.md" new file mode 100644 index 0000000000000000000000000000000000000000..f9c1c5fdd0759ba3579affb1d5b6b14bd5be710b --- /dev/null +++ "b/content/zh/post/enmo/\345\256\232\344\271\211\345\255\230\345\202\250\350\277\207\347\250\213\345\222\214\345\207\275\346\225\260.md" @@ -0,0 +1,215 @@ ++++ + +title = "定义存储过程和函数" + +date = "2022-04-22" + +tags = ["定义存储过程和函数"] + +archives = "2022-04" + +author = "云和恩墨" + +summary = "定义存储过程和函数" + +img = "/zh/post/enmo/title/img.png" + +times = "10:20" ++++ + +# **定义存储过程和函数** + +本文出处:[https://www.modb.pro/db/222642](https://www.modb.pro/db/222642) + +## 学习地址 + +[https://www.modb.pro/course/133](https://www.modb.pro/course/133) + +## 学习目标 + +**学习openGauss定义存储过程和函数** + +## 课后作业 + +### **1.创建带有入参和出参的函数1,调用函数时使用按参数值传递和命名标记法传参** + +``` +omm=# create function func_multiply (a integer,b integer) return integer +omm-# as +omm$# begin +omm$# return a * b; +omm$# end ; +omm$# / +CREATE FUNCTION +omm=# call func_multiply(2,3); + func_multiply +--------------- + 6 +(1 row) +omm=# call func_multiply(a => 4,b => 5); + func_multiply +--------------- + 20 +(1 row) +omm=# call func_multiply(a := 3,b := 6); + func_multiply +--------------- + 18 +(1 row) +omm=# + +``` + +### **2.创建返回类型为record的函数2,重命名函数2** + +``` +omm=# create function func_add_multiply (a integer,out result1 integer,out result2 integer,out result3 integer,out result4 integer) +omm-# returns setof record +omm-# as $$ +omm$# begin +omm$# result1 = a + 5; +omm$# result2 = a - 5; +omm$# result3 = a * 5; +omm$# result4 = a / 5; +omm$# return next; +omm$# end; +omm$# $$language plpgsql; +CREATE FUNCTION +omm=# call func_add_multiply(5,1,1,1,1); + result1 | result2 | result3 | result4 +---------+---------+---------+--------- + 10 | 0 | 25 | 1 +(1 row) +omm=# call func_add_multiply(100,1,1,1,1); + result1 | result2 | result3 | result4 +---------+---------+---------+--------- + 105 | 95 | 500 | 20 +(1 row) +omm=# + +``` + +### **3.使用\sf和系统函数查看函数定义** + +``` +omm=# \sf func_multiply +CREATE OR REPLACE FUNCTION public.func_multiply(a integer, b integer) + RETURNS integer + LANGUAGE plpgsql + NOT FENCED NOT SHIPPABLE +AS $function$ DECLARE +begin +return a * b; +end $function$; +omm=# \sf func_add_multiply +CREATE OR REPLACE FUNCTION public.func_add_multiply(a integer, OUT result1 integer, OUT result2 integer, OUT result3 integer, OUT result4 integer) + RETURNS SETOF record + LANGUAGE plpgsql + NOT FENCED NOT SHIPPABLE +AS $function$ +begin +result1 = a + 5; +result2 = a - 5; +result3 = a * 5; +result4 = a / 5; +return next; +end; +$function$; +omm=# +omm=# \x +Expanded display is on. +omm=# select * from pg_proc where proname='func_multiply'; +-[ RECORD 1 ]----+-------------- +proname | func_multiply +pronamespace | 2200 +proowner | 10 +prolang | 11750 +procost | 100 +prorows | 0 +provariadic | 0 +protransform | - +proisagg | f +proiswindow | f +prosecdef | f +proleakproof | f +proisstrict | f +proretset | f +provolatile | v +pronargs | 2 +pronargdefaults | 0 +prorettype | 23 +proargtypes | 23 23 +proallargtypes | +proargmodes | +proargnames | {a,b} +proargdefaults | +prosrc | DECLARE + | begin + | return a * b; + | end +probin | +proconfig | +proacl | +prodefaultargpos | +fencedmode | f +proshippable | f +propackage | f +prokind | f + +omm=# select * from pg_proc where proname='func_add_multiply'; +-[ RECORD 1 ]----+------------------------------------ +proname | func_add_multiply +pronamespace | 2200 +proowner | 10 +prolang | 11750 +procost | 100 +prorows | 1000 +provariadic | 0 +protransform | - +proisagg | f +proiswindow | f +prosecdef | f +proleakproof | f +proisstrict | f +proretset | t +provolatile | v +pronargs | 1 +pronargdefaults | 0 +prorettype | 2249 +proargtypes | 23 +proallargtypes | {23,23,23,23,23} +proargmodes | {i,o,o,o,o} +proargnames | {a,result1,result2,result3,result4} +proargdefaults | +prosrc | + | begin + | result1 = a + 5; + | result2 = a - 5; + | result3 = a * 5; + | result4 = a / 5; + | return next; + | end; + | +probin | +proconfig | +proacl | +prodefaultargpos | +fencedmode | f +proshippable | f +propackage | f +prokind | f + +omm=# + +``` + +### **4.删除函数** + +``` +omm=# drop function func_multiply; +DROP FUNCTION +omm=# drop function func_add_multiply; +DROP FUNCTION +omm=# + +``` diff --git "a/content/zh/post/eygle/MogDB \345\256\211\350\243\205\350\247\243\345\216\213\351\224\231\350\257\257 cannot run bzip2 No such file or directory.md" "b/content/zh/post/eygle/MogDB \345\256\211\350\243\205\350\247\243\345\216\213\351\224\231\350\257\257 cannot run bzip2 No such file or directory.md" new file mode 100644 index 0000000000000000000000000000000000000000..bc7aefb91b64b6cf2a53ef2090bf13041dbb479d --- /dev/null +++ "b/content/zh/post/eygle/MogDB \345\256\211\350\243\205\350\247\243\345\216\213\351\224\231\350\257\257 cannot run bzip2 No such file or directory.md" @@ -0,0 +1,134 @@ ++++ + +title = "MogDB 安装解压错误:cannot run bzip2: No such file or directory" + +date = "2022-05-18" + +tags = ["MogDB 安装解压错误:cannot run bzip2: No such file or directory"] + +archives = "2022-05" + +author = "eygle" + +summary = "MogDB 安装解压错误:cannot run bzip2: No such file or directory" + +img = "/zh/post/eygle/title/img6.png" + +times = "10:20" ++++ + +# MogDB 安装解压错误:cannot run bzip2: No such file or directory + +本文出处:[https://www.modb.pro/db/403662](https://www.modb.pro/db/403662) + +## 问题症状 + +MogDB 安装时,涉及两个步骤解压,第一步解压缩 tar包: + +``` +[root@enmotech ~]# tar -xvf MogDB-2.1.1-CentOS-x86_64.tar +upgrade_sql.tar.gz +MogDB-2.1.1-CentOS-64bit.sha256 +MogDB-2.1.1-CentOS-64bit.tar.bz2 +MogDB-2.1.1-CentOS-64bit-Libpq.tar.gz +MogDB-2.1.1-CentOS-64bit-om.sha256 +MogDB-2.1.1-CentOS-64bit-om.tar.gz +MogDB-2.1.1-CentOS-64bit-tools.tar.gz +upgrade_sql.sha256 +``` + +第二步,解压缩 bz2 文件包,在这一步骤遇到错误,提示 bzip2 不能执行,这是因为操作系统上没有安装 bz2 解压缩工具: + +``` +[root@enmotech MogDB]# tar -jxf MogDB-2.1.1-CentOS-64bit.tar.bz2 +tar (child): bzip2: Cannot exec: No such file or directory +tar (child): Error is not recoverable: exiting now +tar: Child returned status 2 +tar: Error is not recoverable: exiting now +``` + +## 问题分析 + +手工安装,如果配置了 yum 源,通过命令 + +> yum -y install bzip2 + +来安装 bzip2 工具。 + +``` +[root@enmotech MogDB]# yum -y install bzip2 +Failed to set locale, defaulting to C +Loaded plugins: fastestmirror +Determining fastest mirrors +base | 3.6 kB 00:00:00 +epel | 4.7 kB 00:00:00 +extras | 2.9 kB 00:00:00 +mysql-connectors-community | 2.6 kB 00:00:00 +mysql-tools-community | 2.6 kB 00:00:00 +mysql57-community | 2.6 kB 00:00:00 +nginx | 2.9 kB 00:00:00 +updates | 2.9 kB 00:00:00 +webtatic | 3.6 kB 00:00:00 +(1/9): epel/x86_64/group_gz | 96 kB 00:00:00 +(2/9): epel/x86_64/updateinfo | 1.0 MB 00:00:00 +(3/9): extras/7/x86_64/primary_db | 246 kB 00:00:00 +(4/9): epel/x86_64/primary_db | 7.0 MB 00:00:00 +(5/9): updates/7/x86_64/primary_db | 15 MB 00:00:00 +(6/9): mysql-tools-community/x86_64/primary_db | 86 kB 00:00:00 +(7/9): mysql-connectors-community/x86_64/primary_db | 87 kB 00:00:01 +(8/9): mysql57-community/x86_64/primary_db | 306 kB 00:00:01 +(9/9): nginx/x86_64/primary_db | 72 kB 00:00:01 +Resolving Dependencies +--> Running transaction check +---> Package bzip2.x86_64 0:1.0.6-13.el7 will be installed +--> Finished Dependency Resolution + +Dependencies Resolved + +========================================================================================================================================================================================================================================= + Package Arch Version Repository Size +========================================================================================================================================================================================================================================= +Installing: + bzip2 x86_64 1.0.6-13.el7 base 52 k + +Transaction Summary +========================================================================================================================================================================================================================================= +Install 1 Package + +Total download size: 52 k +Installed size: 82 k +Downloading packages: +bzip2-1.0.6-13.el7.x86_64.rpm | 52 kB 00:00:00 +Running transaction check +Running transaction test +Transaction test succeeded +Running transaction + Installing : bzip2-1.0.6-13.el7.x86_64 1/1 + Verifying : bzip2-1.0.6-13.el7.x86_64 1/1 + +Installed: + bzip2.x86_64 0:1.0.6-13.el7 + +Complete! +``` + +## 问题解决 + +现在可以正常解压缩 bz2 文件: + +``` +[root@enmotech MogDB]# tar -xvf MogDB-2.1.1-CentOS-64bit.tar.bz2 +./bin/ +./bin/gstrace +./bin/kdb5_util +./bin/gs_dumpall +./bin/krb5kdc +./bin/gs_initdb +./bin/klist +./bin/mogdb +./bin/kinit +./bin/gs_basebackup +./bin/openssl +./bin/encrypt +... +``` diff --git a/content/zh/post/eygle/images/20220413-bf279e19-eb33-475f-8319-9bd07aaefaf5.png b/content/zh/post/eygle/images/20220413-bf279e19-eb33-475f-8319-9bd07aaefaf5.png new file mode 100644 index 0000000000000000000000000000000000000000..66f8d43186d5b3c16237c96c020dba3c15c3df27 Binary files /dev/null and b/content/zh/post/eygle/images/20220413-bf279e19-eb33-475f-8319-9bd07aaefaf5.png differ diff --git a/content/zh/post/eygle/title/img.png b/content/zh/post/eygle/title/img.png new file mode 100644 index 0000000000000000000000000000000000000000..86a420b92fb8289658d807d49f137b6d13862f6d Binary files /dev/null and b/content/zh/post/eygle/title/img.png differ diff --git a/content/zh/post/eygle/title/img6.png b/content/zh/post/eygle/title/img6.png new file mode 100644 index 0000000000000000000000000000000000000000..2ddddfa2858d77999b4cfec8e97e4f29ac0cab79 Binary files /dev/null and b/content/zh/post/eygle/title/img6.png differ diff --git "a/content/zh/post/eygle/\344\275\277\347\224\250openGauss jdbc 3.0\346\265\213\350\257\225\345\233\275\345\257\206SM3\347\224\250\346\210\267\350\256\244\350\257\201.md" "b/content/zh/post/eygle/\344\275\277\347\224\250openGauss jdbc 3.0\346\265\213\350\257\225\345\233\275\345\257\206SM3\347\224\250\346\210\267\350\256\244\350\257\201.md" new file mode 100644 index 0000000000000000000000000000000000000000..0523cf41bb8c2d77e14dce5f5c44d41d33dc473b --- /dev/null +++ "b/content/zh/post/eygle/\344\275\277\347\224\250openGauss jdbc 3.0\346\265\213\350\257\225\345\233\275\345\257\206SM3\347\224\250\346\210\267\350\256\244\350\257\201.md" @@ -0,0 +1,123 @@ ++++ + +title = "使用openGauss jdbc 3.0测试国密SM3用户认证" + +date = "2022-04-25" + +tags = ["使用openGauss jdbc 3.0测试国密SM3用户认证"] + +archives = "2022-04" + +author = "云和恩墨" + +summary ="使用openGauss jdbc 3.0测试国密SM3用户认证" + +img = "/zh/post/eygle/title/img.png" + +times = "16:04" ++++ + +# 使用openGauss jdbc 3.0测试国密SM3用户认证 + +本文出处:https://www.modb.pro/db/393728 + +openGauss现在支持四种用户认证方式,通过postgresql.conf文件中的参数password_encryption_type确定,认证方式与该参数的对应关系如下表所示: + +| 认证方式 | 参数 | +| ---------- | -------------------------- | +| md5 | password_encryption_type=0 | +| sha256+md5 | password_encryption_type=1 | +| sha256 | password_encryption_type=2 | +| sm3 | password_encryption_type=3 | + +### 配置SM3认证方式 + +在postgresql.conf文件中配置password_encryption_type=3,并重启数据库使该参数生效。 + +``` +openGauss=# show password_encryption_type; password_encryption_type +-------------------------- +3 +(1 row) +``` + +创建用户 + +``` +openGauss=# create user user_sm3 password 'Admin@1234'; +CREATE ROLE +``` + +检查数据库存储的密文 + +``` +openGauss=# select rolname,rolpassword from pg_authid where rolname='user_sm3'; +-[ RECORD 1 ]--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +rolname | user_sm3 +rolpassword | sm35235899e0406d85e4e086db4db7025d53acddb10b86d9988d7b2a1bc250322365df5fcbffc508287f1ef3205a81f8d63f43b87b16235ce7c582a0b6fa226154d7d038a0a9c2bd85aed2a9273cdd55864a481e967b62c63579441c1b03f6c550becdfecefade + +``` + +pg_hba.conf文件中配置认证方式为SM3 + +``` +host all user_sm3 0/0 sm3 + + +``` + +通过gsql进行测试可正常连接 + +``` +[omm@mogdb ~]$ gsql -Uuser_sm3 -p3000 -h192.168.137.250 postgres -r --password='Admin@1234' +gsql ((openGauss 3.0.0 build 02c14696) compiled at 2022-04-01 18:12:34 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +openGauss=> + +``` + +### 使用openGauss-jdbc 3.0进行连接测试 + +需要先下载bcprov-jdk15on依赖包,下载链接如下: +https://mvnrepository.com/artifact/org.bouncycastle/bcprov-jdk15on +选择最新版本即可。 + +将bcprov-jdk15on和opengauss-jdbc 3.0jar包直接导入工程或使用maven方式 + +``` +org.opengauss +opengauss-jdbc + +org.bouncycastle +bcprov-jdk15on +1.70 + +``` + +使用下面的java代码进行测试 + +``` +java + public static void main(String[] args) throws Exception{ + Class.forName("org.opengauss.Driver"); + Connection conn = DriverManager.getConnection("jdbc:opengauss://192.168.137.250:3000/postgres", + "user_sm3","Admin@1234"); + String sql = " insert into t values(?)"; + PreparedStatement ps = null; + try{ + ps = conn.prepareStatement(sql); + ps.setInt(1, 100); + ps.execute(); + } catch (Exception e) { + e.printStackTrace(); + } + conn.close(); + } + +``` + +运行结果测试正常 + +![img](../images/20220413-bf279e19-eb33-475f-8319-9bd07aaefaf5.png) diff --git "a/content/zh/post/fanjijie/openGauss\345\217\257\347\224\250\345\212\250\346\200\201\350\267\237\350\270\252\345\267\245\345\205\267.md" "b/content/zh/post/fanjijie/openGauss\345\217\257\347\224\250\345\212\250\346\200\201\350\267\237\350\270\252\345\267\245\345\205\267.md" new file mode 100644 index 0000000000000000000000000000000000000000..4f11867ebea654fa0f7795ae86f974327ec32142 --- /dev/null +++ "b/content/zh/post/fanjijie/openGauss\345\217\257\347\224\250\345\212\250\346\200\201\350\267\237\350\270\252\345\267\245\345\205\267.md" @@ -0,0 +1,346 @@ ++++ + +title = "openGauss可用动态跟踪工具" + +date = "2022-04-06" + +tags = ["openGauss可用动态跟踪工具"] + +archives = "2022-04" + +author = "范计杰" + +summary = "openGauss可用动态跟踪工具" + +img = "/zh/post/fanjijie/title/img20.png" + +times = "11:37" + ++++ + +# openGauss可用动态跟踪工具 + +分析疑难问题时,掌握数据库之外的一些动态跟踪工具可以事半功倍。 + +## openGauss获取OS线程ID + +```sql +omm=# select pg_backend_pid(); + pg_backend_pid +----------------- + 140535517730560 +(1 row) + +omm=# \d pg_os_threads + View "pg_catalog.pg_os_threads" + Column | Type | Modifiers +---------------+--------------------------+----------- + node_name | text | + pid | bigint | + lwpid | integer | + thread_name | text | + creation_time | timestamp with time zone | + +select lwpid from pg_os_threads where pid=pg_backend_pid(); + +omm=# select lwpid from pg_os_threads where pid=pg_backend_pid(); + lwpid +------- + 3565 +(1 row) +``` + +## gstrace + +gstrace是openGauss提供的用来跟踪内核代码执行路径,记录内核数据结构,分析代码性能的工具。Trace的有限点位和数据在版本中被固化,无法动态添加和删除。 + +```sql +omm=# select lwpid from pg_os_threads where pid=pg_backend_pid(); + + lwpid +------- + + 3565 +(1 row) + +[root@centos7 ~]# ps -ef |grep openGauss +omm 1783 1 2 18:48 pts/0 00:00:22 /opt/openGauss/app/bin/openGauss -D primary -M primary + +gstrace不能只针对某个Thread收集trace数据 +gstrace start -p 1783 +gstrace config -p 1783 + +---做vacuum操作 +omm=# vacuum t; +VACUUM + + +gstrace dump -p 1783 -o gstrace.log +gstrace codepath -f gstrace.log -o codepath.txt + +vi codepath.txt 查找tid: 3565 + +pid: 1783 tid: 3565 + +45 StartTransaction ENTRY +46 StartTransaction EXIT 26.000000 +47 PortalStart ENTRY +48 PortalStart EXIT 7.000000 +49 PortalRun ENTRY +50 | vacuum_rel ENTRY +51 | | StartTransaction ENTRY +52 | | StartTransaction EXIT 7.000000 +53 | | lazy_vacuum_rel ENTRY +54 | | | lazy_scan_heap ENTRY +55 | | | | heap_page_prune ENTRY +56 | | | | | heap_prune_chain ENTRY +57 | | | | | heap_prune_chain EXIT 2.000000 +58 | | | | | heap_prune_chain ENTRY +59 | | | | | heap_prune_chain EXIT 1.000000 +60 | | | | | heap_prune_chain ENTRY +61 | | | | | heap_prune_chain EXIT 1.000000 +62 | | | | | heap_prune_chain ENTRY +63 | | | | | heap_prune_chain EXIT 17.000000 +64 | | | | | heap_prune_chain ENTRY +65 | | | | | heap_prune_chain EXIT 1.000000 +``` + +gstrace analyze -f gstrace.log -o analyze.txt + + ![image20211209192219550.png](https://oss-emcsprod-public.modb.pro/image/editor/20211231-51a12427-664d-4add-9289-5053b41956d5.png)![img](https://www.modb.pro/db/openGauss%E5%8F%AF%E7%94%A8%E5%8A%A8%E6%80%81%E8%B7%9F%E8%B8%AA%E5%B7%A5%E5%85%B7.assets/image-20211209192219550.png) + +gstrace detail -f gstrace.log -o detail.txt + +``` +cat detail.txt |grep "Tid: 3565"|more +45 ENTRY, Pid: 1783, Tid: 3565, Function: StartTransaction MicroSecond:625972 Thu Dec 9 19:13:05 2021 +46 EXIT, Pid: 1783, Tid: 3565, Function: StartTransaction MicroSecond:625998 Thu Dec 9 19:13:05 2021 +47 ENTRY, Pid: 1783, Tid: 3565, Function: PortalStart MicroSecond:626117 Thu Dec 9 19:13:05 2021 +48 EXIT, Pid: 1783, Tid: 3565, Function: PortalStart MicroSecond:626124 Thu Dec 9 19:13:05 2021 +49 ENTRY, Pid: 1783, Tid: 3565, Function: PortalRun MicroSecond:626126 Thu Dec 9 19:13:05 2021 +50 ENTRY, Pid: 1783, Tid: 3565, Function: vacuum_rel MicroSecond:637168 Thu Dec 9 19:13:05 2021 +51 ENTRY, Pid: 1783, Tid: 3565, Function: StartTransaction MicroSecond:637172 Thu Dec 9 19:13:05 2021 +52 EXIT, Pid: 1783, Tid: 3565, Function: StartTransaction MicroSecond:637179 Thu Dec 9 19:13:05 2021 +53 ENTRY, Pid: 1783, Tid: 3565, Function: lazy_vacuum_rel MicroSecond:637211 Thu Dec 9 19:13:05 2021 +54 ENTRY, Pid: 1783, Tid: 3565, Function: lazy_scan_heap MicroSecond:637280 Thu Dec 9 19:13:05 2021 +55 ENTRY, Pid: 1783, Tid: 3565, Function: heap_page_prune MicroSecond:637329 Thu Dec 9 19:13:05 2021 +``` + +## gdb跟踪 + +UNIX及UNIX-like下的调试工具 + +```sql +omm=# select lwpid from pg_os_threads where pid=pg_backend_pid(); + lwpid +------- + 8226 +(1 row) + + +[omm@centos7 ~]$ ps -ef |grep openGauss +omm 8144 1 3 19:43 pts/0 00:00:01 /opt/openGauss/app/bin/openGauss -D primary -M primary + + +[omm@centos7 ~]$ gdb -p 8144 +(gdb) info threads + Id Target Id Frame + 35 Thread 0x7f96b73ff700 (LWP 8145) "jemalloc_bg_thd" 0x00007f96b89649f5 in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 + 34 Thread 0x7f968f5bf700 (LWP 8150) "openGauss" 0x00007f96b8968381 in sigwait () from /lib64/libpthread.so.0 + 33 Thread 0x7f96860ff700 (LWP 8151) "syslogger" 0x00007f96b867ebed in poll () from /lib64/libc.so.6 + 32 Thread 0x7f96850fe700 (LWP 8152) "alarm" 0x00007f96b867ebed in poll () from /lib64/libc.so.6 + 31 Thread 0x7f96840fd700 (LWP 8153) "reaper" 0x00007f96b867ebed in poll () from /lib64/libc.so.6 + 30 Thread 0x7f96779ff700 (LWP 8180) "checkpointer" 0x00007f96b867ebed in poll () from /lib64/libc.so.6 + 29 Thread 0x7f96769fe700 (LWP 8181) "pagewriter" 0x00007f96b867ebed in poll () from /lib64/libc.so.6 + 28 Thread 0x7f96759fd700 (LWP 8182) "pagewriter" 0x00007f96b867ebed in poll () from /lib64/libc.so.6 + 27 Thread 0x7f96749fc700 (LWP 8183) "bgwriter" 0x00007f96b867ebed in poll () from /lib64/libc.so.6 +........... + 3 Thread 0x7f96531cf700 (LWP 8211) "WalSender" 0x00007f96b867ebed in poll () from /lib64/libc.so.6 + 2 Thread 0x7f96521ce700 (LWP 8226) "worker" 0x00007f96b8967aab in recv () from /lib64/libpthread.so.0 +* 1 Thread 0x7f96c1212700 (LWP 8144) "openGauss" 0x00007f96b867ebed in poll () from /lib64/libc.so.6 + +(gdb) thread 2 +[Switching to thread 2 (Thread 0x7f96521ce700 (LWP 8226))] +#0 0x00007f96b8967aab in recv () from /lib64/libpthread.so.0 +(gdb) break fsm_vacuum_page thread 2 +Breakpoint 1 at 0x55f536c547a4 +(gdb) +Breakpoint 1 at 0x55f536c547a4 +(gdb) info break +Num Type Disp Enb Address What +1 breakpoint keep y 0x000055f536c547a4 thread 36 + stop only in thread 36 +(gdb) handle all nostop noprint #####让gdb不要在收到信号时中断,不然会频繁中断 +(gdb) c +Continuing. +[Switching to Thread 0x7f96521ce700 (LWP 10792)] + + +omm=# vacuum t; + +Breakpoint 1, 0x000055f536c547a4 in fsm_vacuum_page(RelationData*, FSMAddress const&, bool*) () +(gdb) + +(gdb) bt +#0 0x000055f536c547a4 in fsm_vacuum_page(RelationData*, FSMAddress const&, bool*) () +#1 0x000055f536c551dc in FreeSpaceMapVacuum(RelationData*) () +#2 0x000055f53664c171 in lazy_vacuum_rel(RelationData*, VacuumStmt*, BufferAccessStrategyData*) () +#3 0x000055f536647590 in vacuum_rel(unsigned int, VacuumStmt*, bool) () +#4 0x000055f536648fd1 in vacuum(VacuumStmt*, unsigned int, bool, BufferAccessStrategyData*, bool) () +#5 0x000055f53670d58c in DoVacuumMppTable(VacuumStmt*, char const*, bool, bool) () +#6 0x000055f5367106ca in standard_ProcessUtility(Node*, char const*, ParamListInfoData*, bool, _DestReceiver*, bool, char*) () +#7 0x00007f96ae686e7b in ?? () +#8 0x00007f96521a4690 in ?? () +#9 0x00007f96ae684bee in ?? () +#10 0x0000000000000000 in ?? () +(gdb) c +Continuing. +``` + +## systemtap跟踪 + +systemtap是Linux下一个非常有用的调试(跟踪/探测)工具,常用于Linux内核或者应用程序的信息采集,比如:获取一个函数里面运行时的变量、调用堆栈,甚至可以直接修改变量的值,对诊断性能或功能问题非常有帮助 + +``` +获取变量信息,可能编译时优化级别较高,没办法拿到变量信息 +stap -vL 'process("/opt/openGauss/app/bin/openGauss").function("*vacuum*")' + +stap -ve 'probe process("/opt/openGauss/app/bin/openGauss").function("*vacuum*"){if(tid()==$1)printf("%s\n",ppfunc())}' -x 1783 3565 + +----做vacuum操作 +vacuum t; + +-----跟踪出来的函数都带加了其它字符,而且比gstrace跟踪出来的少。 +_Z6vacuumP10VacuumStmtjbP24BufferAccessStrategyDatab +_Z18pgstat_vacuum_statv +_ZL10vacuum_reljP10VacuumStmtb +_Z15lazy_vacuum_relP12RelationDataP10VacuumStmtP24BufferAccessStrategyData +_Z21vacuum_set_xid_limitsP12RelationDatallPmS1_S1_ +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_Z18vacuum_delay_pointv +_ZL15fsm_vacuum_pageP12RelationDataRK10FSMAddressPb +_ZL15fsm_vacuum_pageP12RelationDataRK10FSMAddressPb +_ZL15fsm_vacuum_pageP12RelationDataRK10FSMAddressPb +_ZL15fsm_vacuum_pageP12RelationDataRK10FSMAddressPb +_ZL15fsm_vacuum_pageP12RelationDataRK10FSMAddressPb +_Z20pgstat_report_vacuumjjbl +``` + +## perf + +perf是Linux下的一款性能分析工具。 + +``` +---记录perf数据 +[root@centos7 ~]# ps -ef |grep openGauss +omm 13739 1 8 12:24 pts/0 00:00:42 /opt/openGauss/app/bin/openGauss -D /opt/openGauss/data/db1 +root 14953 12118 0 12:32 pts/3 00:00:00 grep --color=auto openGauss + + +omm=# select lwpid from pg_os_threads where pid=pg_backend_pid(); + lwpid +------- + 4448 +(1 row) + + + +-p 指定进程ID +-t 指定线程ID + + +[root@centos7 ~]# perf record -g -F 99 -a -p 13739 +info: Using a maximum frequency rate of 100,000 Hz +Warning: +PID/TID switch overriding SYSTEM +^C[ perf record: Woken up 1 times to write data ] +[ perf record: Captured and wrote 0.024 MB perf.data (3 samples) ] + + + + + +---离线分析 +[root@centos7 ~]# perf report -i perf.data + +Samples: 3 of event 'cpu-clock', Event count (approx.): 30000 + Children Self Command Shared Object Symbol ++ 66.67% 66.67% worker [kernel.kallsyms] [k] finish_task_switch ++ 66.67% 0.00% worker libpthread-2.17.so [.] __libc_recv ++ 66.67% 0.00% worker [unknown] [.] 0xffffffff9738bede ++ 66.67% 0.00% worker [unknown] [.] 0xffffffff9722f61e ++ 66.67% 0.00% worker [unknown] [.] 0xffffffff9722d668 ++ 66.67% 0.00% worker [unknown] [.] 0xffffffff9722d4f5 ++ 66.67% 0.00% worker [unknown] [.] 0xffffffff9730cb04 ++ 66.67% 0.00% worker [unknown] [.] 0xffffffff9730c439 ++ 66.67% 0.00% worker [unknown] [.] 0xffffffff9737cb51 ++ 66.67% 0.00% worker [unknown] [.] 0xffffffff9737f1c9 ++ 66.67% 0.00% worker [unknown] [.] 0xffffffff9737ec28 ++ 66.67% 0.00% worker [unknown] [.] 0xffffffff96cd3f04 ++ 33.33% 33.33% worker openGauss [.] errstart ++ 33.33% 0.00% worker libpthread-2.17.so [.] start_thread ++ 33.33% 0.00% worker openGauss [.] InternalThreadFunc ++ 33.33% 0.00% worker openGauss [.] GaussDbThreadMain<(knl_thread_role)1> ++ 33.33% 0.00% worker openGauss [.] BackendRun ++ 33.33% 0.00% worker openGauss [.] PostgresMain ++ 33.33% 0.00% worker openGauss [.] exec_simple_query ++ 33.33% 0.00% worker openGauss [.] PortalRun ++ 33.33% 0.00% worker openGauss [.] PortalRunMulti ++ 33.33% 0.00% worker openGauss [.] PortalRunUtility ++ 33.33% 0.00% worker openGauss [.] pgaudit_ProcessUtility ++ 33.33% 0.00% worker security_plugin.so [.] gsaudit_ProcessUtility_hook ++ 33.33% 0.00% worker openGauss [.] standard_ProcessUtility ++ 33.33% 0.00% worker openGauss [.] DoVacuumMppTable ++ 33.33% 0.00% worker openGauss [.] vacuum ++ 33.33% 0.00% worker openGauss [.] pgstat_vacuum_stat ++ 33.33% 0.00% worker openGauss [.] heap_getnext ++ 33.33% 0.00% worker openGauss [.] heapgettup ++ 33.33% 0.00% worker openGauss [.] HeapTupleSatisfiesVisibility ++ 33.33% 0.00% worker openGauss [.] HeapTupleSatisfiesNow + +---perf script可以转换收集的数据为可读的文本 +perf script -i perf.data > out.perf +``` diff --git "a/content/zh/post/fanjijie/openGauss\346\225\260\346\215\256\345\272\223Commit Sequence Number(CSN)\350\247\243\346\236\220.md" "b/content/zh/post/fanjijie/openGauss\346\225\260\346\215\256\345\272\223Commit Sequence Number(CSN)\350\247\243\346\236\220.md" new file mode 100644 index 0000000000000000000000000000000000000000..9bed399398276b6718946ddffa0912df09753e35 --- /dev/null +++ "b/content/zh/post/fanjijie/openGauss\346\225\260\346\215\256\345\272\223Commit Sequence Number(CSN)\350\247\243\346\236\220.md" @@ -0,0 +1,35 @@ ++++ + +title = "openGauss数据库Commit Sequence Number(CSN)解析" + +date = "2022-04-07" + +tags = ["openGauss数据库Commit Sequence Number(CSN)解析"] + +archives = "2022-04" + +author = "范计杰" + +summary = "openGauss数据库Commit Sequence Number(CSN)解析" + +img = "/zh/post/fanjijie/title/img20.png" + +times = "10:20" + ++++ + +# openGauss数据库Commit Sequence Number(CSN)解析 + +openGauss数据库中Commit Sequence Number简称CSN,使用一个全局自增的长整数作为逻辑的时间戳,模拟数据库内部的时序,与ORACLE中的SCN类似,该逻辑时间戳被称为提交顺序号。每当一个事务提交的时候,在提交序列号日志中(CSN LOG)会记录该事务号XID(事务的全局唯-标识)对应的逻辑时间戳(CSN值)。 + +CSN日志中记录的XID值与CSN值的对应关系,即决定了所有事务的状态函数f(t)。 + +如图所示,在一个事务的实际执行过程中,并不会在开始就加载全部的CSN日志,而是在扫描到某条记录以后,才会去CSN日志中查询该条记录头部xmin和xmax这两个事务号对应的CSN值,并基于此进行可见性判断。称为基于逻辑提交时间戳的可见性判断。是对PG中可见性判断时查询活跃事务组的改进。 + + ![img](https://oss-emcsprod-public.modb.pro/image/editor/20211124-001517a0-5e70-4400-bccd-dff81afc852c.png) + +PG的可见性判断如下: + + ![img](https://oss-emcsprod-public.modb.pro/image/editor/20211124-904243f0-ee56-4fe7-afc6-0a8560af54b3.png) + +PG在获取查询快照时需要获取活跃事务列表,openGauss虽然不用获取活跃事务列表,但在读取tuple后需要查询XMIN,XMAX对应的CSN。哪种方式更高效个人还没有搞清楚,可能openGauss在高并发的事务处理时更有优势,因为OLTP环境中活跃事务较多,并且多数基于索引的查询,需要XMIN,XMAX转CSN的记录很少。 diff --git "a/content/zh/post/foreverdragon/opengauss\345\256\211\350\243\205\344\270\216\344\275\277\347\224\250.md" "b/content/zh/post/foreverdragon/opengauss\345\256\211\350\243\205\344\270\216\344\275\277\347\224\250.md" new file mode 100644 index 0000000000000000000000000000000000000000..d50545b265eb722531aac59dc15e8d8a63a9be93 --- /dev/null +++ "b/content/zh/post/foreverdragon/opengauss\345\256\211\350\243\205\344\270\216\344\275\277\347\224\250.md" @@ -0,0 +1,905 @@ ++++ + +title = "openGauss安装与使用" + +date = "2021-12-04" + +tags = ["openGauss开发入门"] + +archives = "2021-12" + +author = "foreverdragon" + +summary = "openGauss的安装与使用的" + +img = "/zh/post/foreverdragon/title/title1.jpg" + +times = "19:20" + ++++ + +**一、opengauss数据库安装:** + +1.1实验环境:Virtual BOX 6.1.26+centos 7.8+openGauss1.1.0 + +1.2虚拟机Virtual BOX安装: + +在virtualbox.org/wiki/Downloads上下载WINDOS +hosts版本的安装包并安装(全部下一步默认安装) + +![](media/2897e5d2e80f1355a5b12fd9ad7dd511.png) + +1.3Centos7.8镜像下载 + +在华为开源镜像站下载: + +![](media/eb0b6463c550ce32bb7c57be4a305be6.png) + +1.4在virtualbox上安装centos7.8 + +①新建虚拟机: +![](media/350d9e06fecc01aa00cc57781bb16886.png) + +![](media/3dd46b4f5f324aa30da2ac3636237dc2.png) + +设置虚拟机并安装: + +1. 在系统里修改处理器数量为2,启动顺序为光驱、硬盘 + +2. 在存储里选择下载的虚拟机的镜像 + +3. 网络设置:网卡一:仅主机网络,网卡二:网络地址转换 + +4. 开始启动(选择第一个) + +5. 设置分区、在网络选项中如下设置:enp0s3,、enp0s8介如下 + +![](media/61f6f52d9cbf3ff117ca4ff819e3803a.png) + +![](media/211c50736b67aca6eee093b295622585.png) + +6软件选择如下: + +![](media/c22c65f519cc9e38260e633d20d3a2a1.png) + +7创建用户,设置root密码 + +8进入系统后通过ifconfig和ping命令查看是否联网 + +1.5操作系统环境准备 + +①:修改操作系统版本: + +[root@db1 \~]\# **vi /etc/redhat-release** + +CentOS Linux release \*\*7.6.\*\*2003 (Core + +②执行以下二个命令将防火墙关闭, + +[root@db1 \~]\# **systemctl stop firewalld.service** + +[root@db1 \~]\# **systemctl disable firewalld.service** + +Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service. + +Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service. + +[root@db1 \~]\# + +③设置字符集及环境变量。 + +![](media/baf2664904d1792d50b3f758f5dae03e.png) + +④验证变量是否生效。 + +![](media/0d756cd8a7cbc470362a92066548e838.png) + +⑤关闭swap交换内存 + +![](media/ce24d288e5e88dfef329c87504eb51fc.png) + +⑥准备yum环境 + +先备份一下: + +![](media/2f7f6c983d8f33ab61835760430a88f3.png) + +⑦下载可用源的repo文件 + +![](media/859c503011ec793d3e73f1faa536fcc6.png) + +查看repo文件内容是否正确: + +cat /etc/yum.repos.d/CentOS-Base.repo + +⑧:yum安装相关包。 + +执行以下命令,安装所需的包 + +yum install -y libaio-devel flex bison ncurses-devel glibc.devel patch +lsb_release wget python3 + +⑨:设置默认Python版本为3.x。 + +[root@db1 \~]\# cd /usr/bin + +[root@db1 bin]\# mv python python.bak + +[root@db1 bin]\# ln -s python3 /usr/bin/python + +[root@db1 bin]\# python -V + +Python 3.6.8 + +修改/usr/bin/yum文件,把\#!/usr/bin/python这行修改为\#!/usr/bin/python2.7(或者对应的python +2.x的版本) + +用yum -help确定yum是否能用。如下: + +![](media/ead7a1208ca580bbe2f34772131aaf49.png) + +⑩创建存放数据库安装目录 + +![](media/196bd6f4f6071e6305e125a9aba74890.png) + +11:下载数据库安装包 + +wget + + +结果如下: + +![](media/db5239d495b2d10fadcb8f939c7ca7b8.png) + +1.6安装opengauss数据库: + +1:创建XML配置文件,用于数据库安装 + +我在这里遇到了问题,无法复制粘贴,也无法输入中文。 + +解决:我通过安装图形界面解决了粘贴和中文的的问题。 + +①安装X(X Window System),命令如下: + +yum groupinstall "X Window System" + +②安装图形界面软件: + +yum groupinstall "GNOME Desktop" + +③设置开机自启图形化界面 + +systemctl get-default + +systemctl set-default graphical.target + +解决之后可以复制以下内容到/opt/software/openGauss/clusterconfig.xml中 + +\ + +\ + +\ Hosts --> Items --> Create Item + +![image.png](../figures/20211216-cd0ca2d6-dd3c-41d5-9643-775edc3e9035.png) + +**添加监控项信息** + +![image.png](../figures/20211216-05611555-f74d-47d5-8057-a86a6fd5e38f.png) + +![image.png](../figures/20211216-2e9cd439-b92e-4fcd-8180-ef7096c80a16.png) + +**查看监控项** + +![image.png](../figures/20211216-b9c6b9ce-6a77-4ce0-a064-291015801db2.png) + diff --git "a/content/zh/post/gaoyunlong/\345\205\263\344\272\216\351\233\206\347\276\244\350\212\202\347\202\271timeline\344\270\215\344\270\200\350\207\264\347\232\204\345\244\204\347\220\206\346\226\271\345\274\217.md" "b/content/zh/post/gaoyunlong/\345\205\263\344\272\216\351\233\206\347\276\244\350\212\202\347\202\271timeline\344\270\215\344\270\200\350\207\264\347\232\204\345\244\204\347\220\206\346\226\271\345\274\217.md" new file mode 100644 index 0000000000000000000000000000000000000000..14a3734e46383267527a0ce059e7518ad9323fa2 --- /dev/null +++ "b/content/zh/post/gaoyunlong/\345\205\263\344\272\216\351\233\206\347\276\244\350\212\202\347\202\271timeline\344\270\215\344\270\200\350\207\264\347\232\204\345\244\204\347\220\206\346\226\271\345\274\217.md" @@ -0,0 +1,161 @@ ++++ + +title = "关于集群节点timeline不一致的处理方式" + +date = "2022-05-18" + +tags = ["关于集群节点timeline不一致的处理方式"] + +archives = "2022-05" + +author = "高云龙" + +summary = "关于集群节点timeline不一致的处理方式" + +img = "/zh/post/gaoyunlong/title/img24.png" + +times = "10:20" ++++ + +# 关于集群节点timeline不一致的处理方式 + +本文出处:[https://www.modb.pro/db/400223](https://www.modb.pro/db/400223) + +在PostgreSQL/MogDB/openGauss数据库日常维护过程中,如果多次对数据库进行角色切换,可能会出现timeline不一致的情况,导致备库不能正常加入到数据库集群,现在以PG为例对这些可能发生的情况进行复现,并进行整理。 + +### timeline介绍 + +为了将基于时间点恢复后生成的WAL记录序列与初始数据库历史中产生的WAL记录序列区分开来,避免原来的wal文件被覆盖,同时也为了避免管理混乱,PostgreSQL数据库引入了“时间线”的概念,使其可以通过备份恢复到任何之前的状态,包括早先被放弃的时间线分支中的状态。 + +当一次归档恢复完成,一个新的时间线被创建来标识恢复之后生成的WAL记录序列。时间线ID号是WAL段文件名的一部分,因此一个新的时间线不会重写由之前的时间线生成的WAL数据。 + +#### 场景一 + +``` +--主库日志 +ERROR: requested starting point 0/8000000 on timeline 1 is not in this server's history +DETAIL: This server's history forked from timeline 1 at 0/6018D98. +STATEMENT: START_REPLICATION 0/8000000 TIMELINE 1 + +--备库日志 +LOG: new timeline 2 forked off current database system timeline 1 before current recovery point 0/80000A0 +FATAL: could not start WAL streaming: ERROR: requested starting point 0/8000000 on timeline 1 is not in this server's history +DETAIL: This server's history forked from timeline 1 at 0/6018D98. +``` + +**发生场景** + +- 备库promote为主库,源主库以备库的方式重新加入集群 +- 以备份的方式恢复为新主库,源主库以备库的方式加入集群 + +**处理方式** + +- 重建备库,适用数据量较小的数据库 +- 借助pg_rewind工具,推荐使用这种方式 + pg_rewind会把所有的配置文件都覆盖,建议提前做好备份 并在启动前添加recovery.conf 或 standby.signal文件 + +**pg_rewind相关报错** + +``` +pg_rewind: fatal: target server needs to use either data checksums or "wal_log_hints = on" +即使数据库已经开启了wal_log_hints = on,依然报这个错,这时需要以primary的形式重启一下数据库。 + +pg_rewind: source and target cluster are on the same timeline +pg_rewind: no rewind required +主备时间线一致,无法直接使用,这时需要让目标节点先以备库的方式运行,然后通过promote提升为主节点,增加timeline,再次执行pg_rewind + +pg_rewind: fatal: could not find common ancestor of the source and target cluster's timelines +建议直接重建备库 +``` + +#### 场景二 + +``` +--备库启动失败 +LOG: entering standby mode +FATAL: requested timeline 2 is not a child of this server's history +DETAIL: Latest checkpoint is at 0/8000028 on timeline 1, but in the history of the requested timeline, the server forked off from that timeline at 0/6018D98. +LOG: startup process (PID 1059) exited with exit code 1 + +``` + +**发生场景** + +- 在场景一中启动数据库,会将新主库的00000002.history传输到备库本地 + +``` +[postgres@bogon pg_wal]$ ls -l +total 49160 +-rw-------. 1 postgres postgres 332 May 5 20:52 000000010000000000000004.00000028.backup +-rw-------. 1 postgres postgres 16777216 May 6 08:54 000000010000000000000008 +-rw-------. 1 postgres postgres 16777216 May 6 08:49 000000010000000000000009 +-rw-------. 1 postgres postgres 16777216 May 6 08:54 00000001000000000000000A +-rw-------. 1 postgres postgres 32 May 6 08:58 00000002.history +drwx------. 2 postgres postgres 88 May 6 08:58 archive_status + +``` + +**处理方式** + +- 将**pg_wal、archive_status 和 归档目录** 中的00000002.history删除即可 + +``` +[postgres@bogon pg_wal]$ rm -f 00000002.history +[postgres@bogon pg_wal]$ cd archive_status/ +[postgres@bogon archive_status]$ ls -l +total 0 +-rw-------. 1 postgres postgres 0 May 5 20:52 000000010000000000000004.00000028.backup.done +-rw-------. 1 postgres postgres 0 May 6 08:58 00000002.history.done +[postgres@bogon archive_status]$ rm -rf * +[postgres@bogon archive_status]$ + +``` + +#### 场景三 + +``` +LOG: started streaming WAL from primary at 0/7000000 on timeline 2 +FATAL: could not receive data from WAL stream: ERROR: requested starting point 0/7000000 is ahead of the WAL flush position of this server 0/601A5D8 +cp: cannot stat ‘/data/pgarchive/00000003.history’: No such file or directory +cp: cannot stat ‘/data/pgarchive/000000020000000000000007’: No such file or directory + +``` + +**发生场景** + +- 备库以单机(未加入集群,以primary的角色)的方式启动过,虽然时间线没变,但是wal文件已经不一致 + +**处理方式** +此时由于备库的需要从0/7000000开始进行重放,已经比主库的0/601A5D8提前,说明此时数据库已经不一致。 +尝试过修改通过pg_resetwal修改timeline,也尝试过通过pg_switch_wal()切换wal文件,依然无法通过pg_rewind进行处理,原因是wal不连续,只能**选择重建** + +``` +--修改timeline +postgres=# SELECT timeline_id,redo_wal_file FROM pg_control_checkpoint(); + timeline_id | redo_wal_file +-------------+-------------------------- + 2 | 00000002000000000000000F +(1 row) + +$pg_resetwal -l 000000030000000000000010 /data/pgdata14/ +Write-ahead log reset + +--修改时间线 +postgres=# SELECT timeline_id,redo_wal_file FROM pg_control_checkpoint(); + timeline_id | redo_wal_file +-------------+-------------------------- + 3 | 000000030000000000000012 +(1 row) + +--切换wal +postgres=# select pg_switch_wal(); +$ pg_ctl promote -D /data/pgdata14 + +``` + +### 总结 + +- 备库在运行过程中,以promote的方式提升为主,即使有数据写入,只要wal完整,也可以使用pg_rewind回退. + **在pg_rewind完成后启动,注意修改参数文件、hba文件、清理归档日志及添加standby.signal/recovery.conf** +- 备库在运行过程中,以主库的方式重启过,即使没有任何操作,也没有办法回退,只能重建 + **只要中间以主库运行过,wal就没有办法连续了** diff --git "a/content/zh/post/gaoyunlong/\351\205\215\347\275\256MogDB openGauss\347\232\204grafana \347\232\204dashboard.md" "b/content/zh/post/gaoyunlong/\351\205\215\347\275\256MogDB openGauss\347\232\204grafana \347\232\204dashboard.md" new file mode 100644 index 0000000000000000000000000000000000000000..600513f58f6a296e8731d4439a28b9ac86d7f527 --- /dev/null +++ "b/content/zh/post/gaoyunlong/\351\205\215\347\275\256MogDB openGauss\347\232\204grafana \347\232\204dashboard.md" @@ -0,0 +1,75 @@ ++++ + +title = "配置MogDB/openGauss的grafana 的dashboard" + +date = "2022-05-05" + +tags = ["配置MogDB/openGauss的grafana 的dashboard"] + +archives = "2022-05" + +author = "高云龙" + +summary = "配置MogDB/openGauss的grafana 的dashboard" + +img = "/zh/post/gaoyunlong/title/img25.png" + +times = "10:20" ++++ + +# 配置MogDB/openGauss的grafana 的dashboard + +本文出处:[https://www.modb.pro/db/188684](https://www.modb.pro/db/188684) + +### 概述 + +我们已经介绍了[prometheus + grafana + opengauss_exporter](https://www.modb.pro/db/173483)完成对MogDB/openGauss 数据库的监控,但这只是第一步,我们还需要通过grafana的dashboard查看各个关注的指标项,本文主要介绍dashboard的配置 + +### 监控指标汇总 + +数据源选择的是prometheus,主要关注的监控指标分为:基础信息、内存信息、连接信息、复制信息、锁及等待事件、统计信息、query信息以及数据库对象 +![image.png](../figures/20211204-cfc47e9a-4272-48e2-9fba-ab5a17c9b323.png) + +#### 基础信息 + +基础信息是运维人员比较关注的,有变化第一时间可以看到的信息,比如实例IP、数据库版本、数据库运行时间、exporter状态、exporter运行时间等等 +![image.png](../figures/20211204-183e159b-ef0f-4134-b134-71f99ba6e89a.png) + +#### 内存信息 + +展示数据库内存总体使用情况,按会话状态分组占用内存情况,内存上下文占用内存情况以及占用内存最多的session及sql文本 +![image.png](../figures/20211204-ffad91b6-007a-441c-8af8-835a9c0e0597.png) +![image.png](../figures/20211204-b6e374da-906c-4f47-bc31-96f0ca3037fa.png) + +#### 连接信息 + +连接数总体使用情况,各状态连接使用情况以及各应用连接数 +![image.png](../figures/20211204-ec617df5-639c-43a2-a45e-5d84738909c5.png) + +#### 复制信息 + +复制槽使用占比、复制槽延时、备节点信息及主备之间的延迟 +![image.png](../figures/20211204-c0cfe4c4-d76b-4a8c-bd04-7a2f81f603a6.png) + +#### 锁及等待事件 + +锁阻塞源信息,锁阻塞详情,锁类型分布情况,锁冲突及死锁检测,等待事件汇总及等待时间汇总信息 +![image.png](../figures/20211204-aec67dd0-2b24-4f75-8d74-9ea4b2a22edd.png) +![image.png](../figures/20211204-cf9d6243-d31c-4e37-aa26-953e2822e0c1.png) + +#### 统计信息 + +![image.png](../figures/20211204-c8674984-9927-4b9d-bdde-fb9725ea88ee.png) + +#### query信息 + +![image.png](../figures/20211204-41c59db9-f61d-4dae-b29d-7036223ba567.png) + +#### 数据库对象 + +![image.png](../figures/20211204-25c40a97-f135-48be-af18-f1fe9986db5b.png) + +### json文件下载地址 + +[exporter监控单数据库实例](https://www.modb.pro/download/272899) +[exporter监控多数据库实例](https://www.modb.pro/download/293587) diff --git "a/content/zh/post/guohuan/MogDB openGauss\345\246\202\344\275\225\345\256\236\347\216\260\344\272\213\345\212\241\347\232\204rollback.md" "b/content/zh/post/guohuan/MogDB openGauss\345\246\202\344\275\225\345\256\236\347\216\260\344\272\213\345\212\241\347\232\204rollback.md" new file mode 100644 index 0000000000000000000000000000000000000000..5269c8b0bd4ca04b7da2ce7035cb8eb46514dfd0 --- /dev/null +++ "b/content/zh/post/guohuan/MogDB openGauss\345\246\202\344\275\225\345\256\236\347\216\260\344\272\213\345\212\241\347\232\204rollback.md" @@ -0,0 +1,95 @@ ++++ + +title = "MogDB/openGauss如何实现事务的rollback" + +date = "2022-04-12" + +tags = ["MogDB/openGauss如何实现事务的rollback"] + +archives = "2022-04" + +author = "郭欢" + +summary = "MogDB/openGauss如何实现事务的rollback" + +img = "/zh/post/guohuan/title/img6.png" + +times = "10:20" ++++ + +# MogDB/openGauss如何实现事务的rollback + +本文出处:https://www.modb.pro/db/113262 + + + +数据库最主要的功能就是存储数据,然而我们在进行数据库操作时,却很容易发生误操作数据的情况,那么在MogDB中该如何实现误操作数据恢复呢?本文通过具体示例简要介绍如何通过回滚还原到误操作前的状态。 + +使用gsql连接MogDB时,SQL语句执行完毕会自动提交,我们可以通过开启事务来避免误操作。事务的提交与回滚体现事务的原子性特征,事务语句要么提交,要么回滚。 + +**准备工作:** + +登录数据库。 + +**示例步骤:** + +1. 创建测试表并插入数据 + + ```sql + create table test (id int,name char(10)); + insert into test values(1,'aa'); + insert into test values(2,'bb'); + ``` + +2. 表中插入数据回滚 + + ```plsql + --查看当前表中数据 + select * from test; + --开启事务并插入数据,之后回滚操作 + begin; + insert into test values(3,'cc'); + select * from test; + rollback; + --查看insert已经回滚 + select * from test; + ``` + +通过上述示例可以看出,执行回滚之后,新插入的一行数据并未存入表中,更新和删除操作也是同样。 + +除此之外,事务执行过程中还可以创建保存点,如果不指定保存点,回滚操作会回滚到事务起始点,即begin开始的地方。在比较大的事务中,可以把执行过程分为几个步骤,每个步骤执行完成后创建一个保存点,后续步骤执行失败时,可回滚到之前的保存点,而不必回滚整个事务。 + +1. 开启事务,在上表中继续插入数据,并创建两个保存点 + + ```plsql + begin; + insert into test values(3,'cc'); + savepoint savepoint1; + insert into test values(4,'dd'); + savepoint savepoint2; + insert into test values(5,'ee'); + ``` + +2. 回滚至保存点 + + ```sql + --查看当前表数据 + select * from test; + --回滚到保存点savepoint2,查看数据 + rollback to savepoint savepoint2; + select * from test; + --回滚到保存点savepoint1,查看数据 + rollback to savepoint savepoint1; + select * from test; + ``` + +3. 提交事务,查看数据 + + ```sql + commit; + select * from test; + ``` + +通过上述示例可以看出,回滚到保存点1后,后续操作未提交,而保存点1之前的操作会提交,即最终表中存有3行数据。 + +显示开启事务并利用回顾机制是一种能够有效避免误操作的方法。 diff --git "a/content/zh/post/guohuan/MogDB openGauss\346\225\260\346\215\256\345\256\214\346\225\264\346\200\247\347\272\246\346\235\237\347\256\200\344\273\213.md" "b/content/zh/post/guohuan/MogDB openGauss\346\225\260\346\215\256\345\256\214\346\225\264\346\200\247\347\272\246\346\235\237\347\256\200\344\273\213.md" new file mode 100644 index 0000000000000000000000000000000000000000..b9f97790ace1be7a163e2254c28bd4912d523674 --- /dev/null +++ "b/content/zh/post/guohuan/MogDB openGauss\346\225\260\346\215\256\345\256\214\346\225\264\346\200\247\347\272\246\346\235\237\347\256\200\344\273\213.md" @@ -0,0 +1,116 @@ ++++ + +title = "MogDB/openGauss数据完整性约束简介" + +date = "2022-04-12" + +tags = ["MogDB/openGauss数据完整性约束简介"] + +archives = "2022-04" + +author = "郭欢" + +summary = "MogDB/openGauss数据完整性约束简介" + +img = "/zh/post/guohuan/title/img.png" + +times = "10:20" ++++ + +# MogDB/openGauss数据完整性约束简介 + +本文出处:https://www.modb.pro/db/124910 + + + +创建基本表的同时,还可以指定表中数据完整性约束,例如在创建warehouse基本表时,通过分析可以得到如下结论: + +1. 不同仓库必须有不同的w_id,且w_id不能为NULL。 +2. 仓库必须有具体的名称,不能为NULL。 +3. 仓库所在的街区地址长度不能为0。 +4. 仓库所在国家默认为“CN”。 + +因此可以在创建warehouse基本表时指定这些约束。 + +例1:创建带有完整性约束的基本表,语句如下: + +```sql +CREATE TABLE warehouse +( + w_id SMALLINT PRIMARY KEY, + w_name VARCHAR(10) NOT NULL, + w_street_1 VARCHAR(20) CHECK(LENGTH(w_street_1)<>0), + w_street_2 VARCHAR(20) CHECK(LENGTH(w_street_1)<>0), + w_city VARCHAR(20), + w_state CHAR(2) DEFAULT 'CN', + w_zip CHAR(9), + w_tax DECIMAL(4,2), + w_ytd DECIMAL(12,2) +); +``` + +如果向warehouse基本表中写入不符合完整性约束的值,那么数据不能被写入,数据库会提示错误。 + +例2:向w_name列中写入NULL值,不符合完整性约束,写入数据时会报错,数据写入不成功,语句如下: + +```sql +INSERT INTO warehouse VALUES(1,NULL,'','',NULL,'CN',NULL,1.0,1.0); +ERROR: null value in column "w_name" violates not-null constraint +DETAIL: Failing row contains (1, null, null, null, null, CN, null, 1.00, 1.00). +``` + +除了在列定义之后指定完整性约束之外,还可以使用表级的完整性约束来指定。 + +例3:在表定义上指定完整性约束,NULL约束只能在列定义上指定: + +```sql +CREATE TABLE warehouse +( + w_id SMALLINT, + w_name VARCHAR(10) NOT NULL, --设置NULL约束 + w_street_1 VARCHAR(20), + w_street_2 VARCHAR(20), + w_city VARCHAR(20), + w_state CHAR(2) DEFAULT 'CN', --设置默认值 + w_zip CHAR(9), + w_tax DECIMAL(4,2), + w_ytd DECIMAL(12,2), + CONSTRAINT w_id_pkey PRIMARY KEY(w_id), --增加主键约束 + CONSTRAINT w_street_1_chk CHECK(LENGTH(w_street_1) < 100), --增加CHECK约束 + CONSTRAINT w_street_2_chk CHECK(LENGTH(w_street_2) < 100), --增加CHECK约束 +); +``` + +当一个表中的某一列或多列恰好引用的是另一个表的主键(或具有唯一性)时,可以将其定义为外键,外键表示两个表之间相互的关联关系。外键的定义可以直接在属性上定义,也可以在基本表的创建语句中定义。 + +例4:在新订单表new_orders中引用仓库表warehouse的列称为外键,语句如下: + +```sql +CREATE TABLE new_orders ( no_o_id INTEGER NOT NULL, no_d_id SMALLINT NOT NULL, no_w_id SMALLINT NOT NULL REFERENCE warehouse(w_id) ); +``` + +还可以通过ALTER TABLE语句对完整性约束进行修改。 + +例5:在warehouse表中增加主键列: + +```sql +ALTER TABLE warehouse ADD PRIMARY KEY(w_id); +``` + +例6:在warehouse表中增加CHECK约束: + +```sql +ALTER TABLE warehouse ADD CHECK(LENGTH(w_street_1) < 100); +``` + +例7:在warehouse表中增加外键引用: + +```sql +ALTER TABLE warehouse ADD FOREIGN KEY(no_w_id) REFERENCES warehouse(w_id); +``` + +例8:在new_orders表中增加唯一列: + +```sql +ALTER TABLE new_orders ADD UNIQUE(no_o_id, no_d_id, no_w_id); +``` diff --git a/content/zh/post/guohuan/title/img.png b/content/zh/post/guohuan/title/img.png new file mode 100644 index 0000000000000000000000000000000000000000..86a420b92fb8289658d807d49f137b6d13862f6d Binary files /dev/null and b/content/zh/post/guohuan/title/img.png differ diff --git a/content/zh/post/guohuan/title/img6.png b/content/zh/post/guohuan/title/img6.png new file mode 100644 index 0000000000000000000000000000000000000000..2ddddfa2858d77999b4cfec8e97e4f29ac0cab79 Binary files /dev/null and b/content/zh/post/guohuan/title/img6.png differ diff --git "a/content/zh/post/hefang/MogDB openGauss\344\270\255merge\347\232\204\350\257\255\346\263\225\350\247\243\346\236\220.md" "b/content/zh/post/hefang/MogDB openGauss\344\270\255merge\347\232\204\350\257\255\346\263\225\350\247\243\346\236\220.md" new file mode 100644 index 0000000000000000000000000000000000000000..769d81309e9a873a8461de9eb111d8795238946f --- /dev/null +++ "b/content/zh/post/hefang/MogDB openGauss\344\270\255merge\347\232\204\350\257\255\346\263\225\350\247\243\346\236\220.md" @@ -0,0 +1,242 @@ ++++ + +title = "MogDB/openGauss中merge的语法解析" + +date = "2022-04-11" + +tags = ["MogDB/openGauss中merge的语法解析"] + +archives = "2022-04" + +author = "何放" + +summary = "MogDB/openGauss中merge的语法解析" + +img = "/zh/post/hefang/title/img6.png" + +times = "10:20" + ++++ + +# MogDB/openGauss中merge的语法解析 + +近期了解学习了MogDB/openGauss中merge的使用,merge语法是根据源表对目标表进行匹配查询,匹配成功时更新,不成功时插入。简单来说就是有则更新,无则插入,语句简洁,效率高。 + +下面展示MogDB/openGauss中merge的语法 + +```sql +openGauss=# \h merge +Command: MERGE +Description: insert, update, or delete rows of a table based upon source data +Syntax: +MERGE [/*+ plan_hint */] INTO table_name [ [ AS ] alias ] +USING { { table_name | view_name } | subquery } [ [ AS ] alias ] +ON ( condition ) +[ + WHEN MATCHED THEN + UPDATE SET { column_name = { expression | DEFAULT } | + ( column_name [, ...] ) = ( { expression | DEFAULT } [, ...] ) } [, ...] + [ WHERE condition ] +] +[ + WHEN NOT MATCHED THEN + INSERT { DEFAULT VALUES | + [ ( column_name [, ...] ) ] VALUES ( { expression | DEFAULT } [, ...] ) [, ...] [ WHERE condition ] } +]; +``` + +### 创建测试表 + +merge有几种匹配条件可以交叉选择。 +作用: 判断源表和目标表是否满足合并的条件 +如果满足 + +1. 用源表去更新目标表 +2. 用源表去删除目标表 +3. 什么也不干 + +如果不满足 + +1. 用源表去插入目标表 +2. 什么也不干 + +创建出满足的表 + +```sql +create table a_merge ( + id int not null, + name varchar not null, + year int +); + +create table b_merge ( + id int not null, + aid int not null, + name varchar not null, + year int, + city varchar +); + +create table c_merge ( + id int not null, + name varchar not null, + city varchar not null +); +``` + +#### 测试一:匹配则修改,无则插入 + +```sql +--插入数据 +insert into a_merge values(1,'liuwei',20); +insert into a_merge values(2,'zhangbin',21); +insert into a_merge values(3,'fuguo',20); + +insert into b_merge values(1,2,'zhangbin',30,'吉林'); +insert into b_merge values(2,4,'yihe',33,'黑龙江'); +insert into b_merge (id,aid,name,city) values(3,3,'fuguo','山东'); +--数据对比 +select * from a_merge; select * from b_merge; + id | name | year +----+----------+------ + 1 | liuwei | 20 + 2 | zhangbin | 21 + 3 | fuguo | 20 +(3 rows) + + id | aid | name | year | city +----+-----+----------+------+-------- + 1 | 2 | zhangbin | 30 | 吉林 + 2 | 4 | yihe | 33 | 黑龙江 + 3 | 3 | fuguo | | 山东 +(3 rows) +--merge语句 +merge into a_merge a +using (select b.aid,b.name,b.year from b_merge b) c on (a.id=c.aid) +when matched then + update set year=c.year +when not matched then + insert values(c.aid,c.name,c.year); +--更新后的a_merge表 +select * from a_merge; + id | name | year +----+----------+------ + 1 | liuwei | 20 + 2 | zhangbin | 30 + 3 | fuguo | + 4 | yihe | 33 +(4 rows) +``` + +#### 测试二:匹配则修改,无则不操作 + +```sql +--插入数据 +insert into b_merge values(4,1,'liuwei',80,'江西'); +insert into b_merge values(5,5,'tiantian',23,'河南'); +--核对数据 +select * from a_merge;select * from b_merge; + id | name | year +----+----------+------ + 1 | liuwei | 20 + 2 | zhangbin | 30 + 3 | fuguo | + 4 | yihe | 33 +(4 rows) + + id | aid | name | year | city +----+-----+----------+------+-------- + 1 | 2 | zhangbin | 30 | 吉林 + 2 | 4 | yihe | 33 | 黑龙江 + 3 | 3 | fuguo | | 山东 + 4 | 1 | liuwei | 80 | 江西 + 5 | 5 | tiantian | 23 | 河南 +(5 rows) +--merge语句 +merge into a_merge a +using (select b.aid,b.name,b.year from b_merge b) c on (a.id=c.aid) +when matched then + update set year=c.year; +--数据对比 +select * from a_merge; + id | name | year +----+----------+------ + 1 | liuwei | 80 + 2 | zhangbin | 30 + 3 | fuguo | + 4 | yihe | 33 +(4 rows) +``` + +#### 测试三:匹配无操作,不匹配进行insert + +```sql +--修改测试数据 +update b_merge set year=70 where aid=2; +--两表对比 +select * from a_merge;select * from b_merge; + id | name | year +----+----------+------ + 1 | liuwei | 80 + 2 | zhangbin | 30 + 3 | fuguo | + 4 | yihe | 33 +(4 rows) + + id | aid | name | year | city +----+-----+----------+------+-------- + 2 | 4 | yihe | 33 | 黑龙江 + 3 | 3 | fuguo | | 山东 + 4 | 1 | liuwei | 80 | 江西 + 5 | 5 | tiantian | 23 | 河南 + 1 | 2 | zhangbin | 70 | 吉林 +(5 rows) +--merge语句 +merge into a_merge a +using (select b.aid,b.name,b.year from b_merge b) c on (a.id=c.aid) +when not matched then + insert values(c.aid,c.name,c.year); +--查看a_merge表 +select * from a_merge; + id | name | year +----+----------+------ + 1 | liuwei | 80 + 2 | zhangbin | 30 + 3 | fuguo | + 4 | yihe | 33 + 5 | tiantian | 23 +(5 rows) +``` + +#### 测试四:一律insert + +```sql +--merge语句 +merge into c_merge c +using (select b.aid,b.name,b.city from b_merge b) b on (1=0) +when not matched then + insert values(b.aid,b.name,b.city); +--查看两表,条数相同 +select * from c_merge ;select * from b_merge ; + id | name | city +----+----------+-------- + 3 | fuguo | 山东 + 5 | tiantian | 河南 + 2 | zhangbin | 吉林 + 4 | yihe++ | 黑龙江 + 1 | liuwei++ | 江西 + 6 | ningqin | 江西 + 7 | bing | 吉安 +(7 rows) + + id | aid | name | year | city +----+-----+----------+------+-------- + 3 | 3 | fuguo | | 山东 + 5 | 5 | tiantian | 23 | 河南 + 1 | 2 | zhangbin | 70 | 吉林 + 2 | 4 | yihe++ | 33 | 黑龙江 + 4 | 1 | liuwei++ | 80 | 江西 + 6 | 6 | ningqin | 23 | 江西 + 7 | 7 | bing | 24 | 吉安 +(7 rows) +``` diff --git "a/content/zh/post/hefang/openGauss\346\225\260\346\215\256\345\272\223\347\224\250copy\345\221\275\344\273\244\346\223\215\344\275\234\346\226\207\344\273\266.md" "b/content/zh/post/hefang/openGauss\346\225\260\346\215\256\345\272\223\347\224\250copy\345\221\275\344\273\244\346\223\215\344\275\234\346\226\207\344\273\266.md" new file mode 100644 index 0000000000000000000000000000000000000000..bd23928d63a9c339a15b528dead097daf482bce8 --- /dev/null +++ "b/content/zh/post/hefang/openGauss\346\225\260\346\215\256\345\272\223\347\224\250copy\345\221\275\344\273\244\346\223\215\344\275\234\346\226\207\344\273\266.md" @@ -0,0 +1,70 @@ ++++ + +title = "openGauss数据库用copy命令操作文件" + +date = "2022-04-07" + +tags = ["openGauss数据库用copy命令操作文件"] + +archives = "2022-04" + +author = "何放" + +summary = "openGauss数据库用copy命令操作文件" + +img = "/zh/post/hefang/title/img6.png" + +times = "10:20" + ++++ + +# openGauss数据库用copy命令操作文件 + +最近项目上正好客户有提到openGauss怎么才能对文件进行操作,copy命令是可以对文件进行操作,顺便解决完后发个文总结一下。 + +## copy命令语法及解析 + +```sql + COPY table_name [ ( column_name [, ...] ) ] + FROM { 'filename' | PROGRAM 'command' | STDIN } + [ [ WITH ] ( option [, ...] ) ] + + COPY { table_name [ ( column_name [, ...] ) ] | ( query ) } + TO { 'filename' | PROGRAM 'command' | STDOUT } + [ [ WITH ] ( option [, ...] ) ] +``` + +1. STDIN:指的是客户端程序的输入流。 +2. STDOUT:指向是客户端的输出流。 +3. OPTION:参数可以设置的有:FORMAT,OIDS,FREEZE,DELIMITER,NULL,HEADER,QUOTE,FORCE_QUOTE,FORCE_NOT_NULL,FORCE_NULL,ENCODING。 + +## copy命令演示 + +copy命令可以操作的文件类型有:txt、sql、csv、二进制格式。 + +**导入导出数据操作** + +```sql +--copy导入','分割的字段 +copy hould from '/home/omm/hould' delimiter ','; + +--copy命令导入导出csv格式的文件 +copy hould (id,name,time) to '/home/omm/hould.csv' csv header; +copy hould from '/home/omm/hould.csv'; + +--copy命令导入导出sql格式的文件 +copy hould to '/home/omm/hould.sql'; +copy hould from '/home/omm/hould.sql'; + +--copy命令导入导出txt格式的文件 +copy hould to '/home/omm/hould.txt'; +copy hould from '/home/omm/hould.txt'; + +--copy命令导入导出二进制格式文件 +copy binary hould to '/home/omm/houldbinary'; +copy binary hould from '/home/omm/houldbinary'; +``` + +## \copy与copy的区别 + +有时候会见到这两个相似的命令,会误导为是一个命令,这里区分一下:copy是可以在远端操作数据文件,\copy只能在本地的客户端操作。copy命令只有超级用户能使用,\copy命令一般用户就能操作。从性能上copy命令导入导出大数据文件比\copy的性能都要高。 diff --git "a/content/zh/post/hefang/openGauss\351\200\222\345\275\222\350\257\255\346\263\225with recursive.md" "b/content/zh/post/hefang/openGauss\351\200\222\345\275\222\350\257\255\346\263\225with recursive.md" new file mode 100644 index 0000000000000000000000000000000000000000..97f4f883199cab6b1fd20c76099e0bf02e75a05a --- /dev/null +++ "b/content/zh/post/hefang/openGauss\351\200\222\345\275\222\350\257\255\346\263\225with recursive.md" @@ -0,0 +1,311 @@ ++++ + +title = "openGauss递归语法with recursive" + +date = "2022-04-06" + +tags = ["openGauss递归语法with recursive"] + +archives = "2022-04" + +author = "何放" + +summary = "openGauss递归语法with recursive" + +img = "/zh/post/hefang/title/img6.png" + +times = "11:37" + ++++ + + + +# openGauss递归语法with recursive + +最近学习了openGauss使用with recursive做递归查询,这种用法在openGauss中实现Oracle的start with connect by语法,下面有用户发的语法报错,并介绍几种递归正确写法。 + +#### 报错信息 + +```language +ERROR: relation "test" does not exist +LINE 4: ....COLUMN1,a.COLUMN2 from tablename a,test b... + ^ +DETAIL: There is a WITH item named "test", but it cannot be referenced from this part of the query. +HINT: Use WITH RECURSIVE, or re-order the WITH items to remove forward references. +``` + +##### 需在with后添加recursive + +```sql +--报错SQL with test(COLUMN1,COLUMN2) as( select COLUMN1,COLUMN2 from tablename where COLUMN1 ='11' and COLUMN2 = '1' union all select a.COLUMN1,a.COLUMN2 from tablename a,test b where b.COLUMN1=a.COLUMN3 and COLUMN2 = '1' ) select COLUMN1,COLUMN2 from test order by COLUMN1; --正确语法 with RECURSIVE test(COLUMN1,COLUMN2) as( select COLUMN1,COLUMN2 from tablename where COLUMN1 ='11' and COLUMN2 = '1' union all select a.COLUMN1,a.COLUMN2 from tablename a,test b where b.COLUMN1=a.COLUMN3 and COLUMN2 = '1' ) select COLUMN1,COLUMN2 from test order by COLUMN1; +``` + +#### 1. 递归查询一 + +##### 1.1 创建测试表1 + +```sql +create table city +(no int, no1 int, name varchar(20)); + +insert into city values(1,null,'北京'); +insert into city values(2,null,'广州'); +insert into city values(11,1,'天安门'); +insert into city values(12,1,'鸟巢'); +insert into city values(13,1,'国家体育场'); +insert into city values(21,2,'白云山'); +insert into city values(22,2,'动物园'); +insert into city values(23,2,'天河城'); +``` + +##### 1.2 递归查询北京的下一级 + +```sql +--SQL语句 +with recursive cte as( + select a.* from city a + where a.no = 1 + union all + select b.* from city b + join cte c on c.no = b.no1 +)select * from cte; +--查询结果 + no | no1 | name +----+-----+------------ + 1 | | 北京 + 11 | 1 | 天安门 + 12 | 1 | 鸟巢 + 13 | 1 | 国家体育场 +(4 rows) +``` + +##### 1.3 递归查询广州的下一级 + +```sql +--SQL语句 +with recursive cte as( + select a.* from city a + where a.no = 2 + union all + select b.* from city b + join cte c on c.no = b.no1 +)select * from cte; +--查询结果 + no | no1 | name +----+-----+-------- + 2 | | 广州 + 21 | 2 | 白云山 + 22 | 2 | 动物园 + 23 | 2 | 天河城 +(4 rows) +``` + +##### 1.4 递归查询白云山的上一级 + +```sql +with recursive cte as( + select a.* from city a + where a.no = 21 + union all + select b.* from city b + join cte c on c.no1 = b.no +)select * from cte; + no | no1 | name +----+-----+-------- + 21 | 2 | 白云山 + 2 | | 广州 +(2 rows) +``` + +##### 1.5 递归查询天安门的上一级 + +```sql +with recursive cte as( + select a.* from city a + where a.no = 11 + union all + select b.* from city b + join cte c on c.no1 = b.no +)select * from cte; + no | no1 | name +----+-----+-------- + 11 | 1 | 天安门 + 1 | | 北京 +(2 rows) +``` + +#### 2. 递归查询二 + +##### 2.1 创建测试表2 + +```sql +create table city2 (id int,pid int,name varchar(20)); + +insert into city2 values('001',0,'四川省'); +insert into city2 values('002',0,'重庆市'); +insert into city2 values('003','001','成都市'); +insert into city2 values('004','001','绵阳市'); +insert into city2 values('005','003','武侯区'); +insert into city2 values('006','003','成华区'); +insert into city2 values('007','003','锦江区'); +insert into city2 values('008','002','江北区'); +insert into city2 values('009','002','渝中区'); +insert into city2 values('010','002','南岸区'); +insert into city2 values('011','002','沙坪坝区'); +``` + +##### 2.2 向下递归查询 + +```sql +with RECURSIVE cte as +( +select a.id,cast(a.name as varchar(100)) from city2 a where id='001' +union all +select k.id,cast(c.name||'>'||k.name as varchar(100)) as name from city2 k inner join cte c on c.id = k.pid +)select id,name from cte; + id | name +----+---------------------- + 1 | 四川省 + 3 | 四川省>成都市 + 4 | 四川省>绵阳市 + 5 | 四川省>成都市>武侯区 + 6 | 四川省>成都市>成华区 + 7 | 四川省>成都市>锦江区 +(6 rows) +``` + +##### 2.3 向下递归查询 + +```sql +with RECURSIVE cte as +( +select a.id,cast(a.name as varchar(100)) from city2 a where id='002' +union all +select k.id,cast(c.name||'>'||k.name as varchar(100)) as name from city2 k inner join cte c on c.id = k.pid +)select id,name from cte; + id | name +----+----------------- + 2 | 重庆市 + 8 | 重庆市>江北区 + 9 | 重庆市>渝中区 + 10 | 重庆市>南岸区 + 11 | 重庆市>沙坪坝区 +(5 rows) +``` + +#### 3. 递归计算 + +##### 3.1 使用递归计算1-100的加法 + +```sql +WITH RECURSIVE t(n) AS ( + VALUES (1) + UNION ALL + SELECT n+1 FROM t WHERE n < 100 +)SELECT sum(n) FROM t; + sum +------ + 5050 +(1 row) +``` + +##### 3.2 使用递归计算100-200之间的偶数平均数 + +```sql +with recursive t(n) as( + values(100) + union all + select n+2 from t where n<200 +)select avg(n) from t; + avg +---------------------- + 150.0000000000000000 +(1 row) +``` + +#### 4. with recursive语句对表操作 + +##### 4.1 创建测试表4 + +```sql +create table company ( + id int , + salary int +); +insert into company values(1,20000); +insert into company values(2,15000); +insert into company values(3,20000); +insert into company values(4,65000); +insert into company values(5,85000); +insert into company values(6,45000); +insert into company values(7,10000); +``` + +##### 4.2 计算salary小于20000的总和 + +```sql +WITH RECURSIVE t(n) AS ( + VALUES (0) + UNION ALL + SELECT salary FROM company WHERE salary < 20000 +)SELECT SUM(n) FROM t; + sum +------- + 25000 +(1 row) +``` + +##### 4.3 表进行操作 + +```sql +--创建表结构 +create table company20000 ( + id int , + salary int +); +--删除指定行数,RETURNING子句返回它们的内容,读取输出并将其插入到COMPANY1表 +WITH recursive a AS ( + DELETE FROM COMPANY + WHERE + SALARY >= 20000 + RETURNING * +)INSERT INTO COMPANY20000 (SELECT * FROM a); +--查看删除插入的数据 +select * from COMPANY20000; + id | salary +----+-------- + 1 | 20000 + 3 | 20000 + 4 | 65000 + 5 | 85000 + 6 | 45000 +(5 rows) + +select * from COMPANY; + id | salary +----+-------- + 2 | 15000 + 7 | 10000 +(2 rows) + +``` + +#### 5. 多个递归调用例子 + +语法格式: + +```sql +WITH RECURSIVE + cte1 AS (...) -- 可以为非递归语句 +, cte2 AS (SELECT ... + UNION ALL + SELECT ...) -- 递归语句 +, cte3 AS (...) -- 递归语句 +SELECT ... FROM cte3 WHERE ... +``` + +#### 6.总结及注意事项 + +1. 有混合递归和非递归,都统一使用WITH RECURSIVE。 +2. 顺序问题,先写非递归语句,然后写递归语句。 +3. PG中with的用法在openGauss中同样适用。 diff --git a/content/zh/post/hefang/title/img.png b/content/zh/post/hefang/title/img.png new file mode 100644 index 0000000000000000000000000000000000000000..86a420b92fb8289658d807d49f137b6d13862f6d Binary files /dev/null and b/content/zh/post/hefang/title/img.png differ diff --git a/content/zh/post/hefang/title/img6.png b/content/zh/post/hefang/title/img6.png new file mode 100644 index 0000000000000000000000000000000000000000..2ddddfa2858d77999b4cfec8e97e4f29ac0cab79 Binary files /dev/null and b/content/zh/post/hefang/title/img6.png differ diff --git a/content/zh/post/huzhengchao/2021-11-05-how_to_use_zhparser.md b/content/zh/post/huzhengchao/2021-11-05-how_to_use_zhparser.md new file mode 100644 index 0000000000000000000000000000000000000000..4f8a48a8d9318ff87cdced474b1d6ec2657dfd88 --- /dev/null +++ b/content/zh/post/huzhengchao/2021-11-05-how_to_use_zhparser.md @@ -0,0 +1,100 @@ ++++ +title = "如何在openGauss中使用zhparser" +date = "2021-11-05" +tags = ["如何在openGauss中使用zhparser"] +archives = "2021-11-05" +author = "gentle_hu" +summary = "如何在openGauss中使用zhparser" +img = "/zh/post/huzhengchao/title/img1.png" +times = "9:30" ++++ + +[TOC] + +# 如何在openGauss中使用zhparser + +## 准备 + +1. 一个装有openGauss数据库的环境 +2. 下载scws代码到任意位置: https://github.com/hightman/scws master +3. 下载zhparser代码到任意位置: https://github.com/amutu/zhparser master + +## 步骤 + +1. 登录环境并source openGauss的环境变量 +2. 编译安装 scws + 1. 解压并进入文件夹: `unzip scws-master.zip && cd scws-master` + 2. 生成configure文件并执行编译: `./acprep && ./configure && make` + 3. 安装scws到相关lib目录(需要root权限):`make install` + 4. 修改刚刚安装的scws lib的到合适的权限(需要root权限):`chmod 777 /usr/local/include/scws -R` +3. 编译安装 zhparser + 1. 解压并进入文件夹:`unzip zhparser-master.zip && cd zhparser-master` + 2. 按照下文patch修改zhparser代码。 + 3. 编译安装(若报错见Q&A):`make && make install` + + + +## PATCH + +``` +diff --git a/zhparser-master/Makefile b/zhparser-master/Makefile +index ae048c3..20b1830 100644 +--- a/zhparser-master/Makefile ++++ b/zhparser-master/Makefile +@@ -12,7 +12,7 @@ DATA_TSEARCH = dict.utf8.xdb rules.utf8.ini + REGRESS = zhparser + + SCWS_HOME ?= /usr/local +-PG_CPPFLAGS = -I$(SCWS_HOME)/include/scws ++PG_CPPFLAGS = -I$(SCWS_HOME)/include/scws -fpic + SHLIB_LINK = -lscws -L$(SCWS_HOME)/lib -Wl,-rpath -Wl,$(SCWS_HOME)/lib + + PG_CONFIG ?= pg_config +diff --git a/zhparser-master/zhparser.c b/zhparser-master/zhparser.c +index 527cef0..6212533 100644 +--- a/zhparser-master/zhparser.c ++++ b/zhparser-master/zhparser.c +@@ -57,16 +57,16 @@ static void init_type(LexDescr descr[]); + * prototypes + */ + PG_FUNCTION_INFO_V1(zhprs_start); +-Datum zhprs_start(PG_FUNCTION_ARGS); ++extern "C" Datum zhprs_start(PG_FUNCTION_ARGS); + + PG_FUNCTION_INFO_V1(zhprs_getlexeme); +-Datum zhprs_getlexeme(PG_FUNCTION_ARGS); ++extern "C" Datum zhprs_getlexeme(PG_FUNCTION_ARGS); + + PG_FUNCTION_INFO_V1(zhprs_end); +-Datum zhprs_end(PG_FUNCTION_ARGS); ++extern "C" Datum zhprs_end(PG_FUNCTION_ARGS); + + PG_FUNCTION_INFO_V1(zhprs_lextype); +-Datum zhprs_lextype(PG_FUNCTION_ARGS); ++extern "C" Datum zhprs_lextype(PG_FUNCTION_ARGS); + + static scws_t scws = NULL; + static ParserState parser_state; +@@ -213,7 +213,7 @@ static void init(){ + } + + snprintf(dict_path, MAXPGPATH, "%s/base/%u/zhprs_dict_%s.txt", +- DataDir, MyDatabaseId, get_database_name(MyDatabaseId)); ++ t_thrd.proc_cxt.DataDir, u_sess->proc_cxt.MyDatabaseId, get_database_name(u_sess->proc_cxt.MyDatabaseId)); + if(scws_add_dict(scws, dict_path, load_dict_mem_mode | SCWS_XDICT_TXT) != 0 ){ + ereport(NOTICE, + (errcode(ERRCODE_INTERNAL_ERROR), + +``` + + + +## Q&A + +1、编译zhparser时报错 error: access/ustore/undo/knl_uundotype.h: No such file or directory + +​ 解决方法:将openGauss-server/src/include/access/ustore/undo/knl_uundotype.h 拷贝到 $GAUSSHOME/include/postgresql/server/access/ustore/undo/中,若没有目标位置没有这个文件夹,需要自行创建。 + +2、编译zhparser时报错 error: communication/commproxy_basic.h: No such file or directory + +​ 参考上一个。 \ No newline at end of file diff --git a/content/zh/post/huzhengchao/2021-12-09-HowToAddANewFunc.md b/content/zh/post/huzhengchao/2021-12-09-HowToAddANewFunc.md new file mode 100644 index 0000000000000000000000000000000000000000..536764ceaa669b23da83bfb804c5904efc343120 --- /dev/null +++ b/content/zh/post/huzhengchao/2021-12-09-HowToAddANewFunc.md @@ -0,0 +1,511 @@ ++++ +title = "openGauss系统函数添加指导" +date = "2021-12-09" +tags = ["openGauss系统函数添加指导"] +archives = "021-12-09" +author = "gentle_hu" +summary = "openGauss系统函数添加指导" +img = "/zh/post/huzhengchao/title/img1.png" +times = "9:30" ++++ + +[TOC] + +# openGauss系统函数添加指导 + +## 1、函数架构简介 + +openGauss内函数的可以分为两个部分: + +- ​ 身份注册声明:openGauss中存在一个系统表 pg_proc,这个表存放了所有函数的基本元信息,相当于函数的“户口本”,只有在其中可以查到的函数,才可以在SQL语句中进行调用,才有“数据库函数”的身份。常见的注册方式有:builtin、升级脚本、CREATE FUNCTION语句、EXTENSION。 +- ​ 底层功能实现:实现其功能的具体逻辑代码,可以根据其所用的语言分为四类:INTERNAL, SQL, PLPGSQL、C。 + + + +四中常见的函数注册创建方式,分别对应着着不同的场景: + +- **builtin**:源代码中存在一个名为builtin_funcs.ini的文件,存放着一系列内置函数的元信息,在初始化安装数据库时,会通过某些方式,全量扫描此文件,将里面罗列的函数批量注册到pg_proc系统表。 +- **升级脚本**:数据库由老版本升级到新版本的场景下,不会也不能遍历重刷builtin_funcs.ini到pg_proc,因此若新版本有新增函数,就需要编写一个升级脚本,在升级过程中通过升级脚本将新增函数注册到pg_proc之中。 +- **CREATE FUNCTION**: 通过`CREATE FUNCTION ... BEGIN ... END`语句,一把完成注册和实现。 +- **EXTENSION**:随着extension进行注册和加载。 + + + +四类语言实现方案分别有不同的注册声明方式以及实现特征: + +- **INTERNAL**: 通过builtin或升级脚本进行注册,底层功能通过C语言实现的函数,也是数据库最常见的内置函数,如pg_sleep()。其底层功能函数函数名可以再pg_proc的prosrc列查到。 +- **SQL**: 通过builtin或者升级脚本进行注册,底层功能通过一句SQL实现的函数,也是数据库内置函数的一种。如to_char() ,在数据库底层会转换为一句`select CAST(... AS VARCHAR2);`,这一句在pg_proc的prosrc列可以查到,通常是为了复用已有功能模块来适配新接口而采用这种实现方案。 +- **PLPGSQL**: 这个就是我们所熟知的,使用plpgsql进行编写创建的函数了,通过语句一次完成声明与实现。pg_proc的prosrc列存放了这个语句的源代码。 +- **C**: 出现在各种extension之中,内部功能使用C语言实现。这个和INTERNAL比较类似,区别在于其具体注册方式为通过extension进行注册,并且底层代码是在外部lib之中,而INTERNAL是在gaussdb二进制内的。可以在pg_proc的prosrc、probin列查到其lib路径以及函数符号信息。 + +其中INTERNAL、SQL类的函数,因为都可以通过builtin的方式整,因此也都常被统称为builtin函数。 + + + +一个普通函数调用流程大致为: + +1、解析SQL语句,获取到函数名以及参数值与类型等信息。 + +2、根据以上信息,在pg_proc中检索到这个函数的元数据,元数据中包含默认值、实现语言、底层函数、估算代价等所有信息。 + +3、根据其实现语言,调用其具体底层接口模块。如:INTERNAL类型会直接调用其元数据中的底层C语言代码函数;C类型会根据元数据信息加载相关lib后调用lib中的C语言代码函数;SQL类型会直接转而执行元数据prosrc中存放的sql语句;PLPGSQL类型会转而走过程语言模块解释执行prosrc中存放的源代码。 + + + +另外还有一种稍微特殊的函数——聚集函数,它其实是在普通函数的架构基础上做的功能变更与扩展,其架构和添加流程与普通函数有些差异。我们分两章介绍如何添加普通的INTERNAL函数和聚集函数。 + + + +## 2、如何添加一个普通的INTERNAL函数 + +了解了上面的架构与流程后,不难得出,添加一个普通的INTERNAL函数,可分为四个步骤: + +​ 1、**声明函数身份**。将我们已经提前设计好的函数的各种属性,如参数数量类型、返回值类型、稳定性等等,按照特定的格式添加进buitin_funcs.ini文件之中。 + +​ 2、**实现功能代码**。在内核代码合适位置,实现一个C语言的函数,来实现对应的功能。 + +​ 3、**关联声明实现**。将上一个步骤编写函数的函数名,添加到builtin_funcs.ini对应条目的对应位置。 + +​ 4、**编写升级脚本**。用于在升级流程之中注册SQL函数身份。 + + + +### 2.1 声明函数身份 + +在这之前我们需要已经提前设计好自己的函数属性以及功能,如参数数量类型、返回值类型、稳定性等等,将这些信息按照特定的格式和顺序填写到`./src/common/backend/catalog/builtin_funcs.ini` 文件之中。 + +这个文件中需要按照如下结构进行书写: + + AddFuncGroup( + "pg_sleep", 1, + AddBuiltinFunc(_0(2626), _1("pg_sleep"), _2(1), _3(true), _4(false), _5(pg_sleep), _6(2278), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(1, 701), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("pg_sleep"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false)) + ), +可以看到其有内外两层组成。 + +外层为`AddFuncGroup("pg_sleep", 1, AddBuiltinFunc(...))`,其第一个成员变量为函数名,第二个成员变量为重载数量,后面的AddBuiltinFunc结构为函数元信息。这个结构会匹配内核代码中的结构体`struct FuncGroup`。**需要十分注意的是,这个结构需要按照第一个成员也就是函数名的ASCII大小升序,添加到对应的位置**。 + +内层`AddBuiltinFunc`为函数元信息,其目前一共含有38个属性,与内核代码结构体`struct Builtin_func`和系统表pg_proc都有对应关系。我们根据如下表每个属性含义,完善AddBuiltinFunc结构(可暂不关注属性5与25)。 + +| 编号 | 含义 | 对应Builtin_func | +| :--- | ------------------------------------------------------------ | --------------------------- | +| 0 | oid,函数的唯一标识id,需要小于10000且不可以和已有函数重复。 | Oid foid; | +| 1 | 函数名 | const char* funcName | +| 2 | 参数数量 | int2 nargs | +| 3 | 是否STRICT ( NULL IN NULL OUT,即若入参有NULL,则不执行,直接返回一个NULL) | bool strict | +| 4 | 是否返回一个集合,就是返回多行的意思。 | bool retset | +| 5 | 底层C语言功能代码函数名。 | PGFunction func | +| 6 | 返回值类型oid | Oid rettype | +| 7 | 所属schema | Oid pronamespace | +| 8 | owner | Oid proowner | +| 9 | 内部实现语言,填 INTERNALlanguageId或SQLlanguageId | Oid prolang | +| 10 | 如果返回一个集合的话,估算的每行执行代价,否则是0; | float4 procost | +| 11 | 如果返回一个集合的话,估算的返回行数,否则是0; | float4 prorows | +| 12 | 存在变长参数,这里是变长参数oid | Oid provariadic | +| 13 | 此函数的简化调用方式。 | regproc protransform | +| 14 | 是否是一个聚集参数。 | bool proisagg | +| 15 | 是否是一个窗口函数。 | bool proiswindow | +| 16 | 是否是一个安全定义器(也就是一个“setuid”函数) | bool prosecdef | +| 17 | 函数没副作用。如果函数没有对参数进行防泄露处理,则会抛出错误 | bool proleakproof | +| 18 | 函数稳定性。描述该函数的结果是否只依赖于它的输入参数。
i:最稳(immutable),对于相同的输入总是产生相同的结果。
s:较稳(stable),对于相同的输入其结果在一次扫描里不变。
v:不稳(volatile),其结果可能在任何时候变化,也用于那些有副作用的函数。 | char provolatile | +| 19 | 默认参数数量 | int2 pronargdefaults | +| 20 | 入参数量以及类型,仅包含IN、INOUT、VARIADIC参数。
格式如:_20(count, typeoid1, typeoid2, typeoid3...) | ArrayOid proargtypes | +| 21 | 所有参数的数量以及类型。
格式如:_21(count, typeoid1, typeoid2, ...) | ArrayOid* proallargtypes | +| 22 | 所有参数的数量以及模式,要和21对应。模式含义i-IN, o-OUT, b-INOUT, v-VARIADIC
格式如:_22(count, 'i', 'o', 'v',...)。
注意,若所有参数都是i,这个域为空。 | ArrayChar* proargmodes | +| 23 | 所有参数的数量以及名字,要和21对应。若没有参数有名字,则这个域为空。
格式如:_23(count, name1, name2, name3....) | ArrayCStr* proargnames | +| 24 | 若含有默认参数,则这里是默认值的表达式树(按照`nodeToString()`的表现方式),不含则为空 | const char* proargdefaults | +| 25 | 对于INTERNAL这里是底层函数名,对于SQL这里是一句sql,对于PLPGSQL这里是其源码。 | const char* prosrc; | +| 26 | 关于如何调用函数的附加信息。 | const char* probin; | +| 27 | 函数针对运行时配置变量的本地设置 | ArrayCStr* proconfig; | +| 28 | 访问权限 | ArrayAcl* proacl | +| 29 | 函数具有默认值的入参的位置。 | ArrayInt2* prodefaultargpos | +| 30 | 函数的执行模式,表示函数是在fence还是not fence模式下执行,如果是fence执行模式,函数的执行会在重新fork的进程中执行。分布式属性,单机不涉及,false即可。 | bool* fencedmode | +| 31 | 表示该函数是否可以下推到DN上执行。分布式属性,单机不涉及,false即可。 | bool* proshippable | +| 32 | 是否支持重载。 | bool* propackage | +| 33 | 函数描述注释。 | const char* descr | +| 34 | 函数类型。builtin_funcs中都是f类型。 | char prokind | +| 35 | function\procdure的入参字符串。 | const char* proargsrc | +| 36 | 如果属于某个package的话,这里填package的oid,不属于的话填0. | Oid propackageid | +| 37 | 是否是一个私有函数。 | bool proisprivate | +| | | | + +> 诀窍:builtin_funcs.ini内已经有三千多个函数,总有那么一些与自己要加的比较像,可以找出来对照着进行填写添加。 + + + +**函数重载** + +一个函数可以有多个重载,以generate_series为例,外层的第二个参数填写重载数量,后面对每一种重载都正常写一个AddBuiltinFunc,其中我们可以看到属性5都是不一样的,每一个重载版本,都对应不同的底层实现函数。入参肯定也是不一样的。 + + AddFuncGroup( + "generate_series", 8, + AddBuiltinFunc(_0(938), _1("generate_series"), ..., _5(generate_series_timestamp), ..., _20(3, 1114, 1114, 1186), ..., _25("generate_series_timestamp"), ...), + AddBuiltinFunc(_0( 939), _1("generate_series"),..., _5(generate_series_timestamptz), ..._20(3, 1184, 1184, 1186), ..., _25("generate_series_timestamptz"), ...), + AddBuiltinFunc(_0(1066), _1("generate_series"),...,_5(generate_series_step_int4),..., _20(3, 23, 23, 23), ..., _25("generate_series_step_int4"),...), + ... + ), + + + + +**变长参数** + +含有变长参数的函数,我们需要注意属性2、12、20、21、22。 + +2为入参数量,变长参数算一个;12为变长参数oid;20、21、22照常填写,注意22中变长参数模式为'v' + +可通过 \df 元命令快速查看,参数前面有 VARIADIC表示变长。例如concat_ws + +``` +openGauss=# \df concat_ws + List of functions + Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind +------------+-----------+------------------+----------------------+--------+------------+------------+--------- + pg_catalog | concat_ws | text | text, VARIADIC "any" | normal | f | f | f +(1 row) +``` + + + +**默认值参数** + +含有默认值的参数,我们需要注意2、19、24、29 + +2为入参数量,有默认值的参数算一个 + +19为默认参数的数量。 + +24为默认参数的值,里面存放放内核中的nodeToString打出来的字符串表达式。 + +29为默认参数的位置,填写格式与20、21等差不多,_29(num, pos),其中pos表示参数下标。 + +可通过\df元命令快速查看,变量后有个DEFAULT,如 + +``` +openGauss=# \df pg_start_backup + List of functions + Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind +------------+-----------------+------------------+---------------------------------------------+--------+------------+------------+--------- + pg_catalog | pg_start_backup | text | label text, fast boolean DEFAULT false | normal | f | f | f + pg_catalog | pg_start_backup | text | label text, fast boolean, exclusive boolean | normal | f | f | f +(2 rows) +``` + + + +### 2.2 实现功能代码 + +在内核代码合适的.cpp文件中,使用一些约定的接口,实现一个函数来完成相关功能。以pg_terminate_session为例 + + Datum pg_terminate_session(PG_FUNCTION_ARGS) + { + ThreadId tid = PG_GETARG_INT64(0); + uint64 sid = PG_GETARG_INT64(1); + int r = -1; + + if (tid == sid) { + r = kill_backend(tid); + } else if (ENABLE_THREAD_POOL) { + ThreadPoolSessControl *sess_ctrl = g_threadPoolControler->GetSessionCtrl(); + int ctrl_idx = sess_ctrl->FindCtrlIdxBySessId(sid); + r = sess_ctrl->SendSignal((int)ctrl_idx, SIGTERM); + } + + PG_RETURN_BOOL(r == 0); + } +函数返回类型都需要为Datum。 + +入参必须为FunctionCallInfo fcinfo,但为了简化书写以及保持接口形式统一,我们将其define成了一个宏PG_FUNCTION_ARGS。 + +获取实际参数则需要使用专门的宏接口来获取,例如 PG_GETARG_INT64(0) ,这一套接口一般都以PG_GETARG_为前缀,之后为数据类型,宏参数为函数参数下标。例如假如函数第三个参数为bool,则需要使用 PG_GETARG_BOOL(2)来获取。 + +return语句也有封装好的接口,一般都以PG_RETURN_为前缀,后面紧跟类型,宏参数为实际值。 + + + +上面是一个返回一行单列的函数,有时候我们还有返回多行多列的函数,以gs_threadpool_status为例,以下是部分简化代码: + + Datum gs_threadpool_status(PG_FUNCTION_ARGS) + { + FuncCallContext* funcctx = NULL; // 函数调用上下文 + ThreadPoolStat* entry = NULL; // 用于保存单行的值 + ... + + // 第一次调用需要提前初始化函数调用上下文的相关信息。 + if (SRF_IS_FIRSTCALL()) { + // 在fcinfo中创建函数调用上下文 + funcctx = SRF_FIRSTCALL_INIT(); + + // 初始化列描述信息,并保存到函数调用上下文。 + TupleDesc tupdesc = CreateTemplateTupleDesc(NUM_THREADPOOL_STATUS_ELEM, false, TAM_HEAP); + TupleDescInitEntry(tupdesc, (AttrNumber)1, "nodename", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)2, "groupid", INT4OID, -1, 0); + .... + funcctx->tuple_desc = BlessTupleDesc(tupdesc); + + /* 计算总行数,并获取所有源数据 */ + funcctx->user_fctx = (void*)g_threadPoolControler->GetThreadPoolStat(&(funcctx->max_calls)); + } + + // 在fcinfo中找到函数调用上下文。 + funcctx = SRF_PERCALL_SETUP(); + + // 调用计数 < 总行数 + if (funcctx->call_cntr < funcctx->max_calls) { + Datum values[NUM_THREADPOOL_STATUS_ELEM]; + bool nulls[NUM_THREADPOOL_STATUS_ELEM] = {false}; + + // 在上下文中找到当前行的源数据 + entry = (((ThreadPoolStat*)funcctx->user_fctx) + funcctx->call_cntr); + + // 将当前行源数据填充到values、nulls数组中,并form成一个tuple。 + values[0] = CStringGetTextDatum(g_instance.attr.attr_common.PGXCNodeName); + nulls[0] = false; + values[1] = Int32GetDatum(entry->groupId); + ... + HeapTuple tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + + // 返回当前行,并标注还会有下一行,同时将调用计数加一。后续还会继续调用此函数获取数据。 + SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); + } else { + // 返回空,并标注当前已经返回所有行了。后续不会再调用此函数获取数据了 + SRF_RETURN_DONE(funcctx); + } + } +> 诀窍:INTERNAL函数已经有三千多个,总有那么一些与自己要加的比较像,可以找出来对照着进行编写,如变长参数的获取等。 +> +> 这里的底层功能实现函数与我们要加的数据库函数名称是可以不一样的,例如重载的时候,一个数据库函数,对应好几个内部实现函数,每个都是不一样的。但一般为了便于开发与调试,能命名一样就尽量一样。 + + + +### **2.3 关联声明实现** + +我们需要找一个builtin_funcs.ini能访问到的合适的 .h 头文件,将我们写的c功能函数声明进去。 + +在builtin_funcs.ini 的属性5和25中, 将我们第二步实现的功能函数名写进去。 + + + +### 2.4 编写升级脚本 + +1:占用小版本号 + +打开 ./src/common/backend/utils/init/globals.cpp,找到如下代码: + +``` +/* hard-wired binary version number */ +const uint32 GRAND_VERSION_NUM = 92423; +``` + +这个值就是小版本号,每次提交之中若有涉及到需要版本升级差异的动作,都需要占用一个版本号。 + +如我们现在需要使用升级脚本来完成注册函数,需要将这个值修改加一到92424,而这个92424就是我们占用的版本号。 + + + +2:编写upgrade脚本 + +进入src/include/catalog/upgrade_sql/upgrade_catalog_maindb文件夹。 + +可以发现里面有类似upgrade_catalog_maindb_92_000.sql与upgrade-post_catalog_maindb_92_000.sql两种文件,主要区别在一个带`-post`,一个不带。区别在于在升级流程中,我们必然会将旧的gaussdb二进制文件更新为新的,而带post的在新的上执行,不带的在旧的上执行。 + +我们新增了INTERNAL函数,其底层实现函数肯定在新版二进制上,因此我们用我们占用的版本号创建一个文件:upgrade-post_catalog_maindb_92_424.sql + +对于每个新增的函数(以new_function_name为例),都按照如下三句的形式,添加到新建的文件中。 + +``` +-- 首先需要先DROP一下,防止例如我们第一次升级意外失败,残留数据没回滚干净又升了第二次等函数早就存在了的情况 +DROP FUNCTION IF EXISTS pg_catalog.new_function_name(text, text) CASCADE; + +-- 设置下一个创建对象的OID,此处0000要替换成我们这个函数的oid。 +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 0000; + +-- 创建函数。 +-- 类似通过PLPGSQL创建函数语法,但区别在于AS后并不是类似 "$$...begin...end $$"的函数体,而直接直接指定了其底层功能实现函数的函数名,同时LANGUAGE也不是plpgsql, 而是设置为internal。 +-- 其余的部分与plpgsql就一致了,如returns、各种属性指定方式等。 +CREATE FUNCTION pg_catalog.new_function_name(text, text) +RETURNS int8 +as 'cfunc_of_new_function' +LANGUAGE INTERNAL +IMMUTABLE COST 10000 STRICT; +``` + +可以看到其实升级脚本的功能其实就是利用create语句在升级流程中对函数进行声明注册,添加到pg_proc的过程。 + + + +最后还需要将这个文件复制到src/include/catalog/upgrade_sql/upgrade_catalog_otherdb文件夹下,并重命名为upgrade-post_catalog_otherdb_92_424.sql + + + +3:编写rollback脚本 + +rollback脚本是用来在升级前进行环境清理、预升级完成后但又不想升了,进行回滚操作用到的脚本。 + +进入src/include/catalog/upgrade_sql/rollback_catalog_maindb文件夹,与upgrade类似,我们也创建一个rollback-post_catalog_maindb_92_424.sql文件 + +对每个函数都按照如下格式进行添加 + +``` +-- 删除。 +DROP FUNCTION IF EXISTS pg_catalog.new_function_name(text, text) CASCADE; +``` + + + +同理,我们也需要将其拷贝到src/include/catalog/upgrade_sql/rollback_catalog_otherdb并重命名。 + + + +## 3、如何添加一个INTERNAL聚集函数 + +### 3.1 聚集函数的架构 + +聚集函数与普通函数不同,其执行并非直接调用底层功能代码实现,而是分成了好几个环节,每个环节各对应一个功能代码。 + +其可以分为三个执行环节: + +- transition:收集值计算中间变量。 +- collectition:这个步骤不是必须的,在分布式或者并行查询下用,用于收集多个中间变量,总合成一个中间变量 +- final:将中间变量计算为结果。 + +其中每个环节都由一个功能函数来进行。 + +例如 avg(int4) 函数由int4_avg_accum、 int8_avg_collect、int8_avg来完成,其中间变量为一个长度为2的int8数组,用来记录sum与count。 + +其执行流程为: + +- int4_avg_accum函数依次接收每一行的值,并计算sum,增加count + +- int8_avg_collect函数将分布式每个节点,或者并行查询的多线程下,所有的sum、count收集起来,计算出一个总的sum和count +- int8_avg函数计算sum / count得到最终结果。 + +其所有信息都在系统表pg_aggregate能查到。 + +``` +openGauss=# select * from pg_aggregate where aggfnoid=2101; + aggfnoid | aggtransfn | aggcollectfn | aggfinalfn | aggsortop | aggtranstype | agginitval | agginitcollect | aggkind | aggnumdirectargs +----------------+----------------+------------------+------------+-----------+--------------+------------+----------------+---------+------------------ + pg_catalog.avg | int4_avg_accum | int8_avg_collect | int8_avg | 0 | 1016 | {0,0} | {0,0} | n | 0 +(1 row) +``` + +当然并非所有聚集函数都需要上述三步,例如median函数,无法计算中间值,因此也没有collectition函数。count()函数中间值就是结果值,因此不需要final函数。 + + + +### 3.2 如何添加 + +1、首先我们需要设计好聚集函数的功能,还需根据其功能按需设计三个阶段函数以及中间变量。因此我们最多一共需要设计四个函数,一聚集三普通。 + + + +2、对于三个阶段函数,按照添加正常INTERNAL普通函数一样一样的流程,将其添加成普通的INTERNAL函数。 + + + +3、编写聚集函数的builtin + + 聚集函数也需要有函数身份,因此我们需要像普通函数一样通过builtin_funcs.ini将其注册到pg_proc。但填写时需要注意如下几个值。以avg为例: + + AddFuncGroup( + "avg", 8, + AddBuiltinFunc(_0(2100), _1("avg"), _2(1), _3(false), _4(false), _5(aggregate_dummy), _6(1700), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(true), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 20), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("aggregate_dummy"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false)), + ... + ) +属性5的底层功能代码函数名,但聚集函数并没有单一的底层代码,因此这里写啥都不会被执行,但为了格式形式等的统一与错误场景下调试,我们在这里用一个没啥用的假的函数 aggregate_dummy。 + +属性14表示是否是一个聚集函数,这里一定是true。 + +属性25保持统一,填假的函数的函数名。 + + + +4、添加pg_aggregate聚集函数元信息 + +​ 我们需要在./src/include/catalog/pg_aggregate.h,按照如下格式将聚集函数与三个阶段函数关联起来,并设置中间变量。 + +``` +DATA(insert ( 2101 int4_avg_accum int8_avg_collect int8_avg 0 1016 "{0,0}" "{0,0}" n 0)); +``` + +其共有10列,和系统表pg_aggregate一一对应,第一列是聚集函数oid,后三列是三个阶段函数,在之后是中间变量以及其初始化值等,具体含义可以参照官网文档pg_aggregate的说明。 + +​ 同builtin一样,这些数据也只会在初始化安装数据库时被统一导入到系统表pg_aggregate,升级时并不会重刷,因此我们还需要额外写升级脚本。 + + + +5、添加升级脚本 + +​ 三个阶段函数按照正常普通INTERNAL的格式添加至upgrade、rollback脚本之中。聚集函数我们使用create aggregate语法和drop aggregate语法添加删除。例如: + +upgrade.sql + +``` +-- 先创建阶段函数。json_agg的阶段函数只有两个。 +DROP FUNCTION IF EXISTS pg_catalog.json_agg_finalfn(internal) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3125; +CREATE FUNCTION pg_catalog.json_agg_finalfn ...; +DROP FUNCTION IF EXISTS pg_catalog.json_agg_transfn(internal, anyelement) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3126; +CREATE FUNCTION pg_catalog.json_agg_transfn ...; + +-- 同样先执行drop清理环境,之后设置oid,最后使用CREATE AGGREGATE语句注册聚集函数。 +drop aggregate if exists pg_catalog.json_agg(anyelement); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3124; +create aggregate pg_catalog.json_agg(anyelement) +(SFUNC=json_agg_transfn, STYPE= internal, finalfunc = json_agg_finalfn); +``` + +rollback.sql + +``` +-- 先drop聚集函数,在drop阶段函数。 +drop aggregate if exists pg_catalog.json_object_agg("any", "any"); +DROP FUNCTION IF EXISTS pg_catalog.json_object_agg_finalfn(internal) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.json_object_agg_transfn(internal, "any", "any") CASCADE; +``` + + + +## 5、如何添加一个SQL\PLPGSQL\C函数 + +SQL语言的函数和INTERNAL的比较类似,但添加起来更简单。不需要便携具体的底层实现,直接将builtin_funcs.ini文件的属性5置为NULL,属性9填写SQLlanguageId,属性25填写对应的SQL查询,升级脚本函数体写成对应的SQL就好了。可以参考to_char()。 + +PLPGSQL语言实现的函数,就是CREATE FUNCTION ... BEGIN ... END方式,大家都会,不再赘述。 + +C实现的函数,主要是作为EXTENSION的一部分,参考EXTENSION的写作方式。 + +聚集函数也差不多按照聚集函数的规则与实现语言的规则综合来就可以了。 + + + +## 6、如何验证功能的正确 + +对于我们添加的builtin函数或者在system_views.sql之中使用plpgsql添加的函数等,都算是我们数据库的系统函数,出问题都算是数据可的责任,因此我们需要去进行测试。至于数据库运行期间我们通过plpgsql或者extension添加的用户自己的函数就不用了。 + +builtin函数分两种注册方式,builtin与升级脚本,对应的也是正常安装与版本升级两种场景,因此我们业主要针对两种场景入手测试。 + +### 6.1 验证功能的正确 + +​ (1)编译安装启动数据库,自行编写测试用例测试功能正确。 + +​ (2)执行fastcheck测试集,测试数据库其他功能正常。若有失败用例,需要分析是否是改坏了,若不是则可修改用力预期,例如有些用例维护了一个全量函数列表,因此新增函数的话,这个用例一定会失败,可以修改预期。 + +​ (3)推荐将自身编写的测试集添加到fastcheck测试集,方便功能看护。 + + + +### 6.2 验证升级正确性 + +​ (1)安装一个老版本数据库。 + +​ (2)使用自己代码编译出一个安装包。 + +​ (3)使用自己的安装包,将老版本数据库预升级到新版本,并验证函数功能正确,数据库正常 + +​ (4)回滚版本到老版本,验证回滚完全无残留,数据库正常。 + +​ (5)重新执行升级,并提交升级。验证功能正确,数据库正常。 + + 升级流程指导参照官网相关wiki与文档。 + diff --git a/content/zh/post/huzhengchao/2021-12-13-HowToUseSqlines.md b/content/zh/post/huzhengchao/2021-12-13-HowToUseSqlines.md new file mode 100644 index 0000000000000000000000000000000000000000..5e27b1eec2a142a40c37b451b2c3146e87b2eb5d --- /dev/null +++ b/content/zh/post/huzhengchao/2021-12-13-HowToUseSqlines.md @@ -0,0 +1,762 @@ ++++ +title = "openGauss Sqlines 使用指导" +date = "2021-12-13" +tags = ["sqlines"] +archives = "2021-12-13" +author = "gentle_hu" +summary = "如何使用 Sqlines 向openGauss迁移SQL语句" +img = "/zh/post/huzhengchao/title/img1.png" +times = "9:30" ++++ + + +# openGauss Sqlines 使用指导 + +[TOC] + +## Sqlines简介 + +Sqlines是一款开源软件,支持多种数据库之间的SQL语句语法的的转换,openGauss将此工具修改适配,新增了openGauss数据库选项,目前可以支持PostgreSQL、MySQL、Oracle向openGauss的SQL语法转换。 + + + +## 如何获取和使用 + +1、在社区下载代码到任意位置:[openGauss/openGauss-tools-sqlines (gitee.com)](https://gitee.com/opengauss/openGauss-tools-sqlines) + +2、进入代码根目录下, 执行脚本编译安装sqlines: + +``` +[user@openGauss33 sqlines]$ sh build.sh -i +``` + +3、sqlines将安装到根目录下的/bin文件夹下,可将其添加到环境变量方便使用: + +``` +[user@openGauss33 sqlines]$ export PATH=$PATH:`pwd`/bin +``` + +4、使用sqlines + +``` +[user@openGauss33 sqlines]$ sqlines -? +SQLines 3.1.330 - SQL Assessment and Conversion Tool. +Portions Copyright (c) 2020 SQLines. +Portions Copyright (c) 2021 Huawei Technologies Co.,Ltd. +All Rights Reserved. + +How to use: + sqlines -option=value [...n] + +Options: + -s - Source type + -t - Target type + -in - List of files (wildcards *.* are allowed) + -out - Output directory (the current directory by default) + -log - Log file (sqlines.log by default) + -? - Print how to use + +Example: +Convert script.sql file from Oracle to openGauss + ./sqlines -s=oracle -t=opengauss -in=script.sql +``` + +参数说明: + +| **参数** | **值域** | **功能** | +| -------- | --------------------------------- | ---------------------------------- | +| **-?** | - | 帮助菜单 | +| **-s** | [ oracle \| mysql \| postgresql ] | Source数据库 | +| **-t** | [ opengauss ] | Target数据库 | +| **-in** | FILE_PATH | 输入文件 | +| **-out** | [ FILE_PATH \| /* empty */] | 输出文件,不指定时输出在in文件夹, | +| **-log** | [ FILE_PATH \| /* empty */] | 输出日志,不指定时输出在当前文件夹 | +| | | | + + + +5、执行脚本卸载sqlines: + +``` +[user@openGauss33 sqlines]$ sh build.sh -m +``` + + + +## PostgreSQL to openGauss + +### 删除IF + +``` +Create table IF NOT EXISTS tb as select * from basetb; +Create table IF NOT EXISTS tb as execute p1(); +Create index IF NOT EXISTS idx on tb(a); +Create sequence IF NOT EXISTS sqc; +Create schema IF NOT EXISTS schm; +``` + +openGauss中很多语法暂时不支持 if not exists判断,因此在转换时会给删掉。 + +如: `Create schema IF NOT EXISTS schm;` => `Create schema schm;` + + + +## MySQL to openGauss + +### 数据类型 + +| MYSQL 数据类型 | openGauss 数据类型 | **备注** | +| -------------- | ------------------ | -------- | +| TINYINT | SMALLINT | | +| MEDIUMINT | INT | | +| DOUBLE | DOUBLE PRECISION | | +| FLOAT | DOUBLE PRECISION | | +| DATETIME | TIMESTAMP | | +| TINYBLOB | BYTEA | | +| BLOB | BYTEA | | +| MEDIUNBLOB | BYTEA | | +| LONGBLOB | BYTEA | | +| TINYTEXT | TEXT | | +| MEDIUMTEXT | TEXT | | +| LONGTEXT | TEXT | | +| BINARY | BYTEA | | +| VARBINARY | BYTEA | | + + Mysql中很多数据类型与openGauss有差别,对于表中的数据类型,可以进行转换成为openGauss的数据类型。 + + + +### CREATE TABLE + +### 删除if + +``` +CREATE [TEMPORARY] TABLE [IF NOT EXISTS] tbl_name select_statement +``` + +对于create table as语句,openGauss不支持使用 if not exists 判断,因此会删除if判断。 + + + +#### 列约束 + +``` +语法:column_definition: + col_name type [NOT NULL | NULL] [DEFAULT default_value] [AUTO_INCREMENT] [UNIQUE [KEY] | [PRIMARY] KEY] [COMMENT 'string'] [reference_definition] + +举例: +Create table tb(a int NOT NULL, b text PRIMARY KEY, c int AUTO_INCREMENT……)括号内的部分。在创建表定义列时,可以立马为列指定很多的属性与约束,如NOT NULL等。 +``` + + 转换时行为如下: + +**AUTO_INCREMENT**:在前面添加一句创建序列SQL,并将auto_increment转换为defualt nextval(seq); + +**COMMENT 'string'**:删除 + + + +#### 表属性 + +``` +table_option: + {ENGINE|TYPE} = engine_name + | AUTO_INCREMENT = value + | AVG_ROW_LENGTH = value + | [DEFAULT] CHARACTER SET charset_name [COLLATE collation_name] + | CHECKSUM = {0 | 1} + | COMMENT = 'string' + | CONNECTION = 'connect_string' + | MAX_ROWS = value + | MIN_ROWS = value + | PACK_KEYS = {0 | 1 | DEFAULT} + | PASSWORD = 'string' + | DELAY_KEY_WRITE = {0 | 1} + | ROW_FORMAT = {DEFAULT|DYNAMIC|FIXED|COMPRESSED|REDUNDANT|COMPACT} + | UNION = (tbl_name[, tbl_name]...) + | INSERT_METHOD = { NO | FIRST | LAST } + | DATA DIRECTORY = 'absolute path to directory' + | INDEX DIRECTORY = 'absolute path to directory' + +举例: +Create table tb(a int) MAX_ROWS = 1000, CHECNSUM = 1, ……;中的MAX_ROWS等 +``` + + openGauss 并不支持这个地方添加这些语法用来指定一些属性,转换时全部删除处理。 + + + +### CREATE DATABASE + +``` +CREATE DATABASE [IF NOT EXISTS] db_name + [create_specification [, create_specification] ...] + +create_specification: + [DEFAULT] CHARACTER SET charset_name + | [DEFAULT] COLLATE = collation_name +``` + +1、openGauss 语法不支持 if 判断,转换时会删掉 + +2、对于创建参数,openGauss和mysql也不大一样。其行为如下: + + **[DEFAULT] CHARACTER SET *charset_name***: 删除 + +​ **[DEFAULT] COLLATE = *collation_name***: 将COLLATE转换为 LC_COLLATE + + + +### CREATE FUNCTION/PROCDURE + +若不存在or replace则自动添加。 + +在 AS后的函数体部分,前后自动添加 $$ 符号。 + +语言属性自动添加或修改为 language plpgsql; + +SQL%NOTFOUND => NOT FOUND + +SQL%FOUND => FOUND + +SQL%ROWCOUNT => V_SQLROWCOUNT + + + +### CREATE INDEX + +``` +CREATE [UNIQUE | FULLTEXT | SPATIAL] INDEX index_name + [USING index_type] + ON tbl_name (index_col_name,...) + +index_col_name: + col_name [(length)] [ASC | DESC] +``` + + openGauss 不支持FULLTEXT、SPATIAL类型的 index,因此若有这两种的话,转换时会删除这两个关键字。 + + + +### CREATE SCHEMA + +``` +CREATE SCHEMA [IF NOT EXISTS] db_name +[create_specification [, create_specification] ...] + +create_specification: + [DEFAULT] CHARACTER SET charset_name + | [DEFAULT] COLLATE collation_name +``` + +1、openGauss 的语法不支持if判断,转换时会删掉 + +2、对于创建参数,openGauss 和mysql也不大一样。其行为如下: + +​ **[DEFAULT] CHARACTER SET *charset_name***: 注释掉。 + +​ **[DEFAULT] COLLATE *collation_name***: 将COLLATE关键字转换为 LC_COLLATE + + + +### ALTER TABLE + +``` +ALTER [IGNORE] TABLE tbl_name alter_specification [, alter_specification] ... + +alter_specification: + ADD [COLUMN] column_definition [FIRST | AFTER col_name ] +…… +``` + +1、openGauss 不支持ignore选项,转换时会给删除。 + + 2、ADD COLUMN时,openGauss 不支持使用first、after来指定列的位置,会将其删除。 + + + +### DROP INDEX + +``` +DROP INDEX index_name ON tbl_name +``` + +openGauss 不支持on子句,如 drop index idxa on tba, 转换时会删掉ON子句。 + + + +### INSERT + +``` +INSERT [LOW_PRIORITY | DELAYED | HIGH_PRIORITY] [IGNORE] + [INTO] tbl_name [(col_name,...)] + VALUES ({expr | DEFAULT},...),(...),... + [ ON DUPLICATE KEY UPDATE col_name=expr, ... ] +``` + + openGauss 的insert不支持 `LOW_PRIORITY / DELAYED / HIGH_PROPRITY / IGNORE`等选项,转换时会直接删掉这些选项。 + + + +### UPDATE + +``` +UPDATE [LOW_PRIORITY] [IGNORE] tbl_name + SET col_name1=expr1 [, col_name2=expr2 ...] + [WHERE where_definition] + [ORDER BY ...] + [LIMIT row_count] +``` + + openGauss 不支持` LOW_PRIORITY \ IGNORE` 选项,转换时会直接删除 + + + +### DELETE + +``` +single delete: +DELETE [LOW_PRIORITY] [QUICK] [IGNORE] FROM tbl_name + [WHERE where_definition] + [ORDER BY ...] +[LIMIT row_count] + +muilty-delete: +DELETE [LOW_PRIORITY] [QUICK] [IGNORE] + FROM tbl_name[.*] [, tbl_name[.*] ...] + USING table_references + [WHERE where_definition] + +``` + + openGauss 不支持 ` LOW_PRIORITY \ QUICK \ IGNORE ` 选项,转换时会直接删除。 + + + +### SELECT + +``` +SELECT +[ALL | DISTINCT | DISTINCTROW ] [HIGH_PRIORITY] [STRAIGHT_JOIN] [SQL_SMALL_RESULT] [SQL_BIG_RESULT] [SQL_BUFFER_RESULT] [SQL_CACHE | SQL_NO_CACHE] [SQL_CALC_FOUND_ROWS] +select_expr, ... +[INTO OUTFILE 'file_name' export_options | INTO DUMPFILE 'file_name'] +[FROM table_references] +[WHERE where_definition] +[GROUP BY {col_name | expr | position} [ASC | DESC], ... [WITH ROLLUP]] +[HAVING where_definition] +[ORDER BY {col_name | expr | position} [ASC | DESC] , ...] +[LIMIT {[offset,] row_count | row_count OFFSET offset}] +[FOR UPDATE | LOCK IN SHARE MODE]] +``` + +openGauss 不支持 DISTINCTROW 关键字,转换时会给删掉。 + +openGauss 不支持 [HIGH_PRIORITY] [STRAIGHT_JOIN] [SQL_SMALL_RESULT] [SQL_BIG_RESULT] [SQL_BUFFER_RESULT] [SQL_CACHE | SQL_NO_CACHE] [SQL_CALC_FOUND_ROWS]这些关键字,转换时都会给删除。 + + + +### RENAME + +``` +RENAME TABLE tbl_name TO new_tbl_name; +``` + + 支持将rename语法转换为 alter table rename语法 + +​ 如: RENAME TABLE tba TO tbb; => ALTER TABLE tba RENAME TO tbb; + + + + + +## Oracle to openGauss + +### 数据类型 + +| **ORACLE** | **OPENGAUSS** | 备注 | +| ------------------------------ | ------------------------- | ---- | +| BINARY_FLOAT | REAL | | +| BINARY_DOUBLE | DOUBLE PRECISION | | +| BLOB | BYTEA | | +| CLOB | TEXT | | +| DATE | TIMESTAMP | | +| FLOAT | DOUBLE PRECISION | | +| INTERVAL YEAR(4) TO MONTH | INTERVAL YEAR TO MONTH | | +| INTERVAL DAY(4) TO SECOND(8) | INTERVAL DAY TO SECOND(8) | | +| TIMESTAMP WITH LOCAL TIME ZONE | TIMESTAMP WITH TIME ZONE | | +| LONG | TEXT | | +| LONG RAW | BYTEA | | +| NCHAR(8) | CHAR(8) | | +| NCHAR VARYING(7) | VARCHAR(7) | | +| NCLOB | TEXT | | +| NUMBER(8) | INT | | +| NUMBER(1,0) | SMALLINT | | +| NUMBER(4,0) | SMALLINT | | +| NUMBER(8,0) | INT | | +| NUMBER(12,0) | BIGINT | | +| NUMBER(20,0) | DECIMAL(20,0) | | +| NUMBER(10,2) | DECIMAL(10,2) | | +| NUMBER | DOUBLE PRECISION | | +| NUMBER(\*) | DOUBLE PRECISION | | +| NVARCHAR2(12) | VARCHAR(12) | | +| RAW(8) | BYTEA | | +| REAL | DOUBLE PRECISION | | +| SMALLINT | DECIMAL(38) | | +| UROWID(16) | VARCHAR(16) | | +| VARCHAR2(18) | VARCHAR(18) | | +| BFILE | VARCHAR(255) | | +| ROWID | CHAR(10) | | +| SYS_REFCURSOR | REFCURSOR | | +| XMLTYPE | XML | | + + + +### CREATE FUNCTION/PROCDURE + +没有or replace时会给自动添加上。 + +在 AS后的函数体部分,前后自动添加 $$ 符号。 + +函数的语言属性自动修改或添加为 language plpgsql; + +函数的 RETURN 关键字转换为 RETURNS + +DBMS_OUTPUT.PUT_LINE('err'); => RAISE NOTICE '%','err'; + +调用传参操作符 => 会转换为 := + +EXISTS IF NOT FOUND => EXISTS + +SQL%NOTFOUND => NOT FOUND + +SQL%FOUND => FOUND + +SQL%ROWCOUNT => V_SQLROWCOUNT + +SYS_REFCURSOR => REFCURSOR + + + +### CREATE TABLE + +``` +CREATE [ GLOBAL TEMPORARY | SHARDED | DUPLICATED ] TABLE + [ schema. ] table + [ SHARING = { METADATA | DATA | EXTENDED DATA | NONE } ] + { relational_table | object_table | XMLType_table } + [ PARENT [ schema. ] table ] ; +``` + +openGauss没有SHARDED、 DUPLICATED的表,转换时会删除此关键字。 + +openGauss没有SHARING参数选项,转换时会删除此参数。 + +这些存储参数全部删除: SEGMENT、PCTFREE、PCTUSED、INITRANS、MAXTRANS、COMPRESS、NOCOMPRESS、NOCACHE、LOGGING、NOLOGGING、NOPARALLEL、PARALLEL、NOMONITORING、TABLESPACE 、STORAGE、LOB、COMPUTE、ENABLE、REVERSE + + + + + + ### CREATE VIEW + +``` +CREATE [OR REPLACE] + [[NO] FORCE] [ EDITIONING | EDITIONABLE [ EDITIONING ] | NONEDITIONABLE ] +VIEW [schema.] view + [ SHARING = { METADATA | DATA | EXTENDED DATA | NONE } ] + [ ( { alias [ VISIBLE | INVISIBLE ] [ inline_constraint... ] + | out_of_line_constraint + } + [, { alias [ VISIBLE | INVISIBLE ] [ inline_constraint...] + | out_of_line_constraint + } + ] + ) + | object_view_clause + | XMLType_view_clause + ] + [ DEFAULT COLLATION collation_name ] + [ BEQUEATH { CURRENT_USER | DEFINER } ] +AS subquery [ subquery_restriction_clause ] + [ CONTAINER_MAP | CONTAINERS_DEFAULT ] ; +``` + +`CREATE OR REPLACE` 与`VIEW`关键词之间的参数,`[[NO] FORCE] [ EDITIONING | EDITIONABLE [ EDITIONING ] | NONEDITIONABLE ]` 转换时会被删除。 + +`[ SHARING = { METADATA | DATA | EXTENDED DATA | NONE } ]`转换时会被删除。 + + + +### CREATE SEQUENCE + +``` +CREATE SEQUENCE [ schema. ] sequence + [ SHARING = { METADATA | DATA | NONE } ] + [ { INCREMENT BY | START WITH } integer + | { MAXVALUE integer | NOMAXVALUE } + | { MINVALUE integer | NOMINVALUE } + | { CYCLE | NOCYCLE } + | { CACHE integer | NOCACHE } + | { ORDER | NOORDER } + | { KEEP | NOKEEP } + | { SESSION | GLOBAL } + ]... +; +``` + +openGauss不支持`[ SHARING = { METADATA | DATA | NONE } ]`,转换时会删除。 + +openGauss不支持参数` NOCACHE, ORDER NOORDER,KEEP, NOKEEP, SESSION, GLOBAL `,转换时会删除 + + + + ### ALTER INDEX + +``` +ALTER INDEX [ schema. ]index + { { deallocate_unused_clause + | allocate_extent_clause + | shrink_clause + | parallel_clause + | physical_attributes_clause + | logging_clause + | partial_index_clause + } ... + | rebuild_clause [ { DEFERRED | IMMEDIATE } INVALIDATION] + | PARAMETERS ( 'ODCI_parameters' ) + | COMPILE + | { ENABLE | DISABLE } + | UNUSABLE [ ONLINE ] [ { DEFERRED | IMMEDIATE } INVALIDATION ] + | VISIBLE | INVISIBLE + | RENAME TO new_name + | COALESCE [ CLEANUP ] [ parallel_clause ] + | { MONITORING | NOMONITORING } USAGE + | UPDATE BLOCK REFERENCES + | alter_index_partitioning + } + ; +``` + +rebuild_clause 的三个参数,转换时会给删除。 + +​ 如:` alter index idx rebuild immediate invalidation; => alter index idx rebuild; ` + +ENABLE / VISABLE 关键字支持改为 `REBUILD` + +​ 如:`alter index idx enable; => alter index idx rebuild;` + +DISABLE / INVLSIBLE 支持改为 UNUSABLE + +​ 如:`alter index idx disable => alter index idx unusable;` + +openGauss的UNUSBALE后面没有参数,转换时给删除 + +​ 如:`alter index idx ununsable online; => alter index idx ununsable;` + + + +### ALTER SEQUENCE + +``` +ALTER SEQUENCE [ schema. ] sequence + { INCREMENT BY integer + | { MAXVALUE integer | NOMAXVALUE } + | { MINVALUE integer | NOMINVALUE } + | { CYCLE | NOCYCLE } + | { CACHE integer | NOCACHE } + | { ORDER | NOORDER } + | { KEEP | NOKEEP } + | { SESSION | GLOBAL } + } ... +; +``` + +openGauss不支持参数 ` { MINVALUE integer | NOMINVALUE }`,`{ CYCLE | NOCYCLE }`,`NOCACHE`, `{ ORDER | NOORDER }`, `{ KEEP | NOKEEP }`, `{ SESSION | GLOBAL }`,转换时会删掉。 + + + +### DROP INDEX + +``` +DROP INDEX [ schema. ] index [ ONLINE ] [ FORCE ] [ { DEFERRED | IMMEDIATE } INVALIDATION ]; +``` + +openGauss仅支持 DROP INDEX name;后面的参数 ONLINE \ FORCE \ DEFERRED \ IMMEDIATE \ INVALIDATION 转换时都会删除。 + + + +### DROP MATERIALIZED VIEW + +``` +DROP MATERIALIZED VIEW [ schema. ] materialized_view + [ PRESERVE TABLE ] ; +``` + +openGauss不支持加参数,后面的preserve table会被删掉。 + + + +### DROP TABLE + +``` +DROP TABLE [ schema. ] table [ CASCADE CONSTRAINTS ] [ PURGE ] ; +``` + + 后面的参数 cascade constraints、purge 会删掉。 + + + +### DROP TABLESPACE + +``` +DROP TABLESPACE tablespace + [ { DROP | KEEP } QUOTA ] + [ INCLUDING CONTENTS [ { AND | KEEP } DATAFILES ] [ CASCADE CONSTRAINTS ] ] + ; +``` + +openGauss只支持 DROP TABLESPACE tablespace; + +各种选项参数都会被删除。 + + + +### DROP TYPE + +``` +DROP TYPE [ schema. ] type_name [ FORCE | VALIDATE ] ; +``` + +openGauss不支持 FORCE \ VALIDATE参数,转换时会删除。 + + + +### DROP VIEW + +``` +DROP VIEW [ schema. ] view [ CASCADE CONSTRAINTS ] ; +``` + + 参数只保留cascade; + + + +### ANALYZE + +``` +ANALYZE + { { TABLE [ schema. ] table + | INDEX [ schema. ] index + } [ partition_extension_clause ] + | CLUSTER [ schema. ] cluster + } + { validation_clauses + | LIST CHAINED ROWS [ into_clause ] + | DELETE [ SYSTEM ] STATISTICS + } ; +``` + +openGauss在analyze后面不支持添加 TABLE \ INDEX关键字,转换时会删除这两个关键字。 + + + +### SELECT + +``` + [ with_clause ] +SELECT [ hint ] [ { { DISTINCT | UNIQUE } | ALL } ] select_list +FROM { table_reference | join_clause | ( join_clause ) } + [ , { table_reference | join_clause | (join_clause) } ] ... + [ where_clause ] + [ hierarchical_query_clause ] + [ group_by_clause ] + [ model_clause ] +``` + + + +对于某些情况,oracle需要加FROM DUAL,我们不需要,转换时会给删除。 + +​ 如: `select 1 from dual; => select 1;` + + + +我们不支持unique关键字,会转变为distinct + +​ 如:`select unique * from tb; => select distinct * from tb;` + + + +### EXECUTE + +``` +EXECUTE IMMEDIATE function(); +``` + +我们不支持IMMEDIATE参数,会给删除。 + + + +### GRANT + +``` +GRANT USAGE ON LANGUAGE SPL TO … +``` + +openGauss 内叫做plpgsql,会将SPL关键字转换为 PLPGSQL; + + + +### REVOKE + +``` +REVOKE USAGE ON LANGUAGE SPL FROM … +``` + +openGauss 内叫做plpgsql,会将SPL关键字转换为 PLPGSQL; + + + +### RENAME + +``` +RENAME old_name TO new_name ; +``` + +支持将rename修改为alter table rename + +​ 如: `rename oldname to newname; => alter table oldname rename to newname;` + + + +### TRUNCATE + +``` +TRUNCATE TABLE [schema.] table + [ {PRESERVE | PURGE} MATERIALIZED VIEW LOG ] + [ {DROP [ ALL ] | REUSE} STORAGE ] [ CASCADE ] ; +``` + +转换时最多仅会保留 truncate table name cascade;其他都会删除。 + + + + + +## 函数转换 + + 有一些系统函数、无参函数等,是有差异的,但语义基本一样,因此可支持做一些映射 + +| **Source** | **openGauss** | 备注 | +| ------------------------- | ------------------------ | ---- | +| Charindex(str1, str2) | Position(str1 in str2) | | +| CURRENT DATE | CURRENT_DATE | | +| CURRENT TIMESTAMP | CURRENT_TIMESTAMP | | +| Convert(varchar, source) | To_char(source) | | +| USER | CURRENT_USER | | +| Getdate() | Now() | | +| ISNULL(expr, replace) | COALESCE(expr, replace) | | +| NVL(expr, expr) | COALESCE(expr, expr) | | +| SYSDATE() | CURRENT_TIMESTAMP() | | +| SYSTIMESTAMP | CURRENT_TIMESTAMP | | + diff --git a/content/zh/post/huzhengchao/title/img1.png b/content/zh/post/huzhengchao/title/img1.png new file mode 100644 index 0000000000000000000000000000000000000000..65e2d4c4751f069c64357704715e2ba99beb511a Binary files /dev/null and b/content/zh/post/huzhengchao/title/img1.png differ diff --git a/content/zh/post/jackey-wu/figures/1-1.png b/content/zh/post/jackey-wu/figures/1-1.png new file mode 100644 index 0000000000000000000000000000000000000000..b98df531cbf09944fb31aad496f73d5b9fe2bd82 Binary files /dev/null and b/content/zh/post/jackey-wu/figures/1-1.png differ diff --git a/content/zh/post/jackey-wu/figures/1-2.png b/content/zh/post/jackey-wu/figures/1-2.png new file mode 100644 index 0000000000000000000000000000000000000000..1b224cde2a35e1147c1882a7961dfe22e479d9c6 Binary files /dev/null and b/content/zh/post/jackey-wu/figures/1-2.png differ diff --git a/content/zh/post/jackey-wu/figures/1-3.png b/content/zh/post/jackey-wu/figures/1-3.png new file mode 100644 index 0000000000000000000000000000000000000000..0de7f3d7874751c66f7a45ba66e0a4c683b2ec40 Binary files /dev/null and b/content/zh/post/jackey-wu/figures/1-3.png differ diff --git a/content/zh/post/jackey-wu/figures/1-4.png b/content/zh/post/jackey-wu/figures/1-4.png new file mode 100644 index 0000000000000000000000000000000000000000..f07d68deefcf0f7788e0455e3b080283ceadfc2d Binary files /dev/null and b/content/zh/post/jackey-wu/figures/1-4.png differ diff --git a/content/zh/post/jackey-wu/figures/1-5.png b/content/zh/post/jackey-wu/figures/1-5.png new file mode 100644 index 0000000000000000000000000000000000000000..2243b59cd76a2e6b051a4994f1ea132307dd9a55 Binary files /dev/null and b/content/zh/post/jackey-wu/figures/1-5.png differ diff --git a/content/zh/post/jackey-wu/figures/1-6.png b/content/zh/post/jackey-wu/figures/1-6.png new file mode 100644 index 0000000000000000000000000000000000000000..59e493a93233705ebd0acecc1ad164e3c15218da Binary files /dev/null and b/content/zh/post/jackey-wu/figures/1-6.png differ diff --git a/content/zh/post/jackey-wu/figures/2-1.png b/content/zh/post/jackey-wu/figures/2-1.png new file mode 100644 index 0000000000000000000000000000000000000000..4747fbe0f5af89b184d3a6db4e5f3bc43c7fb3cc Binary files /dev/null and b/content/zh/post/jackey-wu/figures/2-1.png differ diff --git a/content/zh/post/jackey-wu/figures/2-10.png b/content/zh/post/jackey-wu/figures/2-10.png new file mode 100644 index 0000000000000000000000000000000000000000..4fc184d3eaaca98fc418055a3b5003993a9e0f5a Binary files /dev/null and b/content/zh/post/jackey-wu/figures/2-10.png differ diff --git a/content/zh/post/jackey-wu/figures/2-11.png b/content/zh/post/jackey-wu/figures/2-11.png new file mode 100644 index 0000000000000000000000000000000000000000..039b6598251165735e6c8b451c143bd663a087e9 Binary files /dev/null and b/content/zh/post/jackey-wu/figures/2-11.png differ diff --git a/content/zh/post/jackey-wu/figures/2-12.png b/content/zh/post/jackey-wu/figures/2-12.png new file mode 100644 index 0000000000000000000000000000000000000000..22ee4e41a33fd329811b77aeef126868ffc1ffc3 Binary files /dev/null and b/content/zh/post/jackey-wu/figures/2-12.png differ diff --git a/content/zh/post/jackey-wu/figures/2-2.png b/content/zh/post/jackey-wu/figures/2-2.png new file mode 100644 index 0000000000000000000000000000000000000000..b061f12dd34581f0da5e7431df82b16d08ee5ae6 Binary files /dev/null and b/content/zh/post/jackey-wu/figures/2-2.png differ diff --git a/content/zh/post/jackey-wu/figures/2-3.png b/content/zh/post/jackey-wu/figures/2-3.png new file mode 100644 index 0000000000000000000000000000000000000000..fcc57c2819f251c388590a2e9e41936fb4412e1b Binary files /dev/null and b/content/zh/post/jackey-wu/figures/2-3.png differ diff --git a/content/zh/post/jackey-wu/figures/2-4.png b/content/zh/post/jackey-wu/figures/2-4.png new file mode 100644 index 0000000000000000000000000000000000000000..ea224537becad900a93e79544836ceb176dbac48 Binary files /dev/null and b/content/zh/post/jackey-wu/figures/2-4.png differ diff --git a/content/zh/post/jackey-wu/figures/2-5.png b/content/zh/post/jackey-wu/figures/2-5.png new file mode 100644 index 0000000000000000000000000000000000000000..192e8af27052f30cd507b183908193edece86235 Binary files /dev/null and b/content/zh/post/jackey-wu/figures/2-5.png differ diff --git a/content/zh/post/jackey-wu/figures/2-6.png b/content/zh/post/jackey-wu/figures/2-6.png new file mode 100644 index 0000000000000000000000000000000000000000..fb22515e5ac995ae15d276b05b8357fbdd73c789 Binary files /dev/null and b/content/zh/post/jackey-wu/figures/2-6.png differ diff --git a/content/zh/post/jackey-wu/figures/2-7.png b/content/zh/post/jackey-wu/figures/2-7.png new file mode 100644 index 0000000000000000000000000000000000000000..160ee6561a8338973b0634af771213a4d2c6cebf Binary files /dev/null and b/content/zh/post/jackey-wu/figures/2-7.png differ diff --git a/content/zh/post/jackey-wu/figures/2-8.png b/content/zh/post/jackey-wu/figures/2-8.png new file mode 100644 index 0000000000000000000000000000000000000000..7e22e16bd44476c66ef2a40004768ba5d8fbe32d Binary files /dev/null and b/content/zh/post/jackey-wu/figures/2-8.png differ diff --git a/content/zh/post/jackey-wu/figures/2-9.png b/content/zh/post/jackey-wu/figures/2-9.png new file mode 100644 index 0000000000000000000000000000000000000000..b0f93a6a7295aa1674dfcfdf0b6be8cd1b9c2581 Binary files /dev/null and b/content/zh/post/jackey-wu/figures/2-9.png differ diff --git "a/content/zh/post/jackey-wu/\345\274\200\346\272\220\346\225\260\346\215\256\345\272\223OpenGauss\347\232\204SQL\350\247\243\346\236\220\346\272\220\347\240\201\345\210\206\346\236\220.md" "b/content/zh/post/jackey-wu/\345\274\200\346\272\220\346\225\260\346\215\256\345\272\223OpenGauss\347\232\204SQL\350\247\243\346\236\220\346\272\220\347\240\201\345\210\206\346\236\220.md" new file mode 100644 index 0000000000000000000000000000000000000000..a79c31cd1e840c120e32091f63a1040e8dca55ab --- /dev/null +++ "b/content/zh/post/jackey-wu/\345\274\200\346\272\220\346\225\260\346\215\256\345\272\223OpenGauss\347\232\204SQL\350\247\243\346\236\220\346\272\220\347\240\201\345\210\206\346\236\220.md" @@ -0,0 +1,112 @@ ++++ +title = "开源数据库OpenGauss的SQL解析源码分析" +date = "2021-12-06" +tags = ["OpenGauss入门"] +archives = "2021-12" +author = "Jackey WU" +summary = "开源数据库OpenGauss的SQL解析源码分析" ++++ + +# 开源数据库OpenGauss的SQL解析源码分析 + +## OpenGauss数据库体系概述 +openGauss是关系型数据库,采用客户端/服务器,单进程多线程架构;支持单机和一主多备部署方式,同时支持备机可读、双机高可用等特性。 + +从代码结构体系结构的角度来说,oepnGauss的第一个组成部分是通信管理。 + +openGauss查询响应是使用“单个用户对应一个服务器线程”的简单客户端/服务器模型实现的。由于我们无法预先知道需要建立多少连接,所以必须使用主进程(GaussMaster)来监听指定TCP/IP(传输控制协议/网际协议)端口上的传入连接,只要连接请求 检测到,主进程将生成一个新的服务器线程。服务器线程使用信号量和共享内存相互通信,以确保整个并发数据访问期间的数据完整性。 + +除开通信管理之外,OpenGauss的一大组成部分就是SQL引擎,承担着查询解析、查询分流、查询重写、查询优化和查询执行等任务,之后剩下的就是存储引擎了。 + +**SQL组成**
+![](../figures/2-1.png "SQL组成") +## SQL模块简介 +SQL引擎作为数据库系统的入口,主要承担了对SQL语言进行解析、优化、生成执行计划的作用。对于用户输入的SQL语句,SQL引擎会对语句进行语法/语义上的分析以判断是否满足语法规则等,之后会对语句进行优化以便生成最优的执行计划给执行器执行。故SQL引擎在数据库系统中承担着“接收信息,下达命令”的作用,是数据库系统的“脊柱神经”。 + +SQL引擎负责对用户输入的SQL语言进行编译,在编译的过程中需要对输入的SQL语言进行词法分析、语法分析、语义分析,从而生成逻辑执行计划,逻辑执行计划经过代数优化和代价优化之后,产生物理执行计划,然后将执行计划交给执行引擎进行执行。 + +就SQL引擎的组成而言,SQL引擎可以分为两部分。 + +1. **SQL解析(查询解析)**
+SQL解析对输入的SQL语句进行词法分析、语法分析、语义分析,获得查询解析树或者逻辑计划。SQL查询语句解析的解析器(parser)阶段包括如下:
+**a. 词法分析**:从查询语句中识别出系统支持的关键字、标识符、操作符、终结符等,每个词确定自己固有的词性。
+**b. 语法分析**:根据SQL语言的标准定义语法规则,使用词法分析中产生的词去匹配语法规则,如果一个SQL语句能够匹配一个语法规则,则生成对应的语法树(Abstract Synatax Tree,AST)。
+**c. 语义分析**:对语法树(AST)进行检查与分析,检查AST中对应的表、列、函数、表达式是否有对应的元数据(指数据库中定义有关数据特征的数据,用来检索数据库信息)描述,基于分析结果对语法树进行扩充,输出查询树。主要检查的内容包括: + ①检查关系的使用:FROM子句中出现的关系必须是该查询对应模式中的关系或视图。 + ②检查与解析属性的使用:在SELECT句中或者WHERE子句中出现的各个属性必须是FROM子句中某个关系或视图的属性。 + ③检查数据类型:所有属性的数据类型必须是匹配的。
+openGauss中参照SQL语言标准实现了大部分SQL的主要语法功能,并结合应用过程中的具体实践对SQL语言进行了扩展。 +
+2. **查询优化**
+优化器(optimizer)的任务是创建最佳执行计划。一个给定的SQL查询(以及一个查询树)实际上可以以多种不同的方式执行,每种方式都会产生相同的结果集。如果在计算上可行,则查询优化器将检查这些可能的执行计划中的每一个,最终选择预期运行速度最快的执行计划。
+优化主要分成了逻辑优化和物理优化两个部分,从关系代数和物理执行两个角度对SQL进行优化,进而结合自底向上的动态规划方法和基于随机搜索的遗传算法对物理路径进行搜索,从而获得较好的执行计划。 + +## SQL解析源码解读 +**SQL解析主流程**
+![](../figures/2-2.png "SQL解析主流程") + +### 代码文件 +``` +src/common/backend/parser/scan.l 定义词法结构,采用Lex编译后生成scan.cpp文件 +src/common/backend/parser/gram.y 定义语法结构,采用Yacc编译后生成gram.cpp文件 +src/common/backend/parser/scansup.cpp 提供词法分析的常用函数 +src/common/backend/parser/parser.cpp 词法、语法分析的主入口文件,入口函数是raw_parser +src/common/backend/parser/analyze.cpp 语义分析的主入口文件,入口函数是parse_analyze +``` +### 词法分析 +1. **parser.cpp** +openGauss采用flex和bison两个工具来完成词法分析和语法分析的主要工作。对于用户输入的每个SQL语句,它首先交由flex工具进行词法分析。flex工具通过对已经定义好的词法文件进行编译,生成词法分析的代码。

+2. **scan.l(词法分析)** +openGauss中的词法文件是scan.l,它根据SQL语言标准对SQL语言中的关键字、标识符、操作符、常量、终结符进行了定义和识别。代码如下:
+**定义数值类型**
+![](../figures/2-3.png "定义数值类型")
+**定义操作符**
+![](../figures/2-4.png "定义操作符")
+其中的operator即为操作符的定义,从代码中可以看出,operator是由多个op_chars组成的,而op_chars则是[~!@#^&|`?+-*/%<>=]中的任意一个符号。但这样的定义还不能满足SQL的词法分析的需要,因为并非多个op_chars的组合就能形成一个合法的操作符,因此在scan.l中会对操作符进行更明确的定义(或者说检查)。
+**operator**
+![](../figures/2-5.png "operator")
+词法分析其实就是将一个SQL划分成多个不同的token,每个token会有自己的词性,在scan.l中定义了如下词性。
+**词法分析词性说明**
+![](../figures/2-6.png "词法分析词性说明")
+ +### 语法分析 +1. **gram.y**
+在openGauss中,定义了一系列表达Statement的结构体,这些结构体通常以Stmt作为命名后缀,用来保存语法分析结果。
+以SELECT查询为例,它对应了一个Statement结构体,这个结构体可以看作一个多叉树,每个叶子节点都表达了SELECT查询语句中的一个语法结构,对应到gram.y中,它会有一个SelectStmt,代码如图所示。
+**SelectStmt**
+![](../figures/2-7.png "SelectStmt")
+simple_select除了上面的基本形式,还可以表示为其他形式,如VALUES子句、关系表达式、多个SELECT语句的集合操作等,这些形式会进一步的递归处理,最终转换为基本的simple_select形式。代码如图所示。
+**递归集合**
+![](../figures/2-8.png "递归集合")
+在成功匹配simple_select语法结构后,将会创建一个Statement结构体,将各个子句进行相应的赋值。对simple_select而言,目标属性、FROM子句、WHERE子句是最重要的组成部分。
+以目标属性为例分析,对应语法定义中的target_list,由若干个target_el组成。target_el可以定义为表达式、取别名的表达式和“*”等。当成功匹配到一个target_el后,会创建一个ResTarget结构体,用于存储目标对象的全部信息。代码如图所示。
+**ResTarget**
+![](../figures/2-9.png "ResTarget")

+2. **parser.y**
+simple_select的其他子句,如distinctClause、groupClause、havingClause等,语法分析方式类似。而其他SQL命令,如CREATE、INSERT、UPDATE、DELETE等,处理方式与SELECT命令类似。
+对于任何复杂的SQL语句,都可以拆解为多个基本的SQL命令执行。在完成词法分析和语法分析后,raw_parser函数会将所有的语法分析树封装为一个List结构,名为raw_parse_tree_list,返回给exec_simple_query函数,用于后面的语义分析、查询重写等步骤,该List中的每个ListCell包含一个语法树。
+ +### 语义分析 +在完成词法分析和语法分析后,parse_analyze 函数会根据语法树的类型调用transformSelectStmt 将parseTree 改写为查询树。在重写过程中,parse_analyze不仅会检查SQL命令是否满足语义要求,还会根据语法树对象获取更利于执行的信息,如表的OID、列数、 等等。在某一示实例中,查询树对应的内存组织结构如图所示。目标属性、FROM 子句和WHERE子句的语义分析结果将分别存储在结构TargetEntry、RangeTblEntry、FromExpr中。
+**查询树内存组织结构图**
+![](../figures/2-10.png "查询树内存组织结构图")
+在完成语义分析后,SQL解析过程也就完成,SQL引擎开始执行查询优化。 +1. **analyze.cpp(语义分析)**
+语义分析模块在词法分析和语法分析之后执行,用于检查SQL命令是否符合语义规定,能否正确执行。负责语义分析的是parse_analyze函数,位于analyze.cpp下。parse_analyze会根据词法分析和语法分析得到的语法树,生成一个ParseState结构体用于记录语义分析的状态,再调用transformStmt函数,根据不同的命令类型进行相应的处理,最后生成查询树。

+2. **ParseState 结构体**
+ParseState保存了许多语义分析的中间信息,如原始SQL命令、范围表、连接表达式、原始WINDOW子句、FOR UPDATE/FOR SHARE子句等。
+ParseState结构体在语义分析入口函数parse_analyze下被初始化,在transformStmt函数下根据不同的Stmt存储不同的中间信息,完成语义分析后再被释放。ParseState结构图所示。
+**ParseState**
+![](../figures/2-11.png "ParseState")

+3. **ParseTree 语法树——Node结构**
+在语义分析过程中,语法树parseTree使用Node节点进行包装。Node结构只有一个类型为NodeTag枚举变量的字段,用于识别不同的处理情况。如图所示。
+**Node结构体(nodes.h)**
+![](../figures/2-12.png "ParseState")
+以SelectStmt为例, 其对应的NodeTag值为T_SelectStmt。
+transformStmt函数会根据NodeTag的值,将语法树转化为不同的Stmt结构体,调用对应的语义分析函数进行处理。
+openGauss在语义分析阶段处理的NodeTag情况有九种,参照着不同地NodeTag情况去调用transformStmt函数,继而完成语义分析工作。
+ +## References & Thanks +;
+;
+. diff --git "a/content/zh/post/jackey-wu/\345\274\200\346\272\220\346\225\260\346\215\256\345\272\223OpenGauss\347\232\204\345\256\211\350\243\205\345\222\214\350\277\220\350\241\214.md" "b/content/zh/post/jackey-wu/\345\274\200\346\272\220\346\225\260\346\215\256\345\272\223OpenGauss\347\232\204\345\256\211\350\243\205\345\222\214\350\277\220\350\241\214.md" new file mode 100644 index 0000000000000000000000000000000000000000..7a7b1c30f59a65ff176f31d2ffe11f96a4b1b071 --- /dev/null +++ "b/content/zh/post/jackey-wu/\345\274\200\346\272\220\346\225\260\346\215\256\345\272\223OpenGauss\347\232\204\345\256\211\350\243\205\345\222\214\350\277\220\350\241\214.md" @@ -0,0 +1,226 @@ ++++ +title = "开源数据库OpenGauss的安装和运行" +date = "2021-12-06" +tags = ["OpenGauss入门"] +archives = "2021-12" +author = "Jackey WU" +summary = "开源数据库OpenGauss的安装和运行" ++++ + +# 开源数据库OpenGauss的安装和运行 + +## 安装环境 +### OpenGauss版本 +openGauss-2.1.0-openEuler-64bit +下载地址: +``` +https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.1/x86/openGauss-2.1.0-openEuler-64bit.tar.bz2 + +``` + +### 虚拟机信息 +虚拟机软件:VMware Workstation 16 Player +虚拟机软件版本:16.1.2 build-17966106 +虚拟机配置信息:内存8GB、处理器1核、硬盘40GB + +![](../figures/1-1.png) + +### 操作系统信息 +openEuler-20.03-LTS +下载链接: +``` +https://mirror.iscas.ac.cn/openeuler/openEuler-20.03-LTS/ISO/x86_64/openEuler-20.03-LTS-x86_64-dvd.iso + +``` + +## 安装详细步骤 +首先需要说明,这一块儿大部分都没有截图,因为安装OpenGauss已经是很久以前的事情了,下面有的截图也只是后面补截取的。 +### 虚拟机准备 +本次实验我选择了VMware Workstation 16 Player软件搭建虚拟机,在虚拟机器的准备中,和一般部署虚拟机的操作没有什么区别,资源设定如下: + 内存 8GB + 硬盘 40GB + 处理器内核数量 1个 + +### 安装openEuler +这里也就是下载好openeuler的完整镜像之后在VM中正常安装即可,安装过程中是有详细的引导的,需要自行操作基本只有分区设置(自动分区)和地区设置,以及用户和密码的设置。 +安装完成后以预设用户登录即可。 +需要说明的是,我选择了使用无图形界面的openEuler,并通过SSH连接的方式在MobaXterm软件中对服务器(也就是部署好openEuler的虚拟机)进行操作。 + +![](../figures/1-2.png "安装好的openEuler") + +### 系统环境配置 +1. **关闭防火墙** +``` +[root@node1 ~]# systemctl disable firewalld.service +[root@node1 ~]# systemctl stop firewalld.service +``` +2. **关闭selinux** +``` +[root@node1 ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config +``` +3. **设置字符集** +``` +[root@node1 ~]#cat >> /etc/profile < mtu 1500 +``` +7. **设置root为远程用户** +去掉sshd_config文件(这个文件是关于ssh链接的配置)中PermitRootLogin的注释符#并把no改为yes +``` +[root@node1 ~]# vim /etc/ssh/sshd_config + PermitRootLogin yes +``` +8. **操作系统参数设置** +``` +[root@node1 ~]# cat >> /etc/sysctl.conf <> /etc/security/limits.conf +[root@node1 ~]# echo "* hard nofile 1000000" >> /etc/security/limits.conf +[root@node1 ~]# echo "* soft nprc 60000" >> /etc/security/limits.d/90-nproc.conf +``` +9. **修改主机名** +``` +[root@node1 ~]# echo "node1" > /etc/hostname +[root@node1 ~]# echo "192.168.111.132 node1" >>/etc/hosts +``` +10. **重启** +``` +[root@node1 ~]# reboot +``` + +### 安装openGauss +1. **创建配置文件** +``` +[root@node1 ~]# vim /opt/clusterconfig.xml +``` +以下为预先编辑好的配置文件内容,我通过WinSCP直接进行了修改和覆盖(最开始做实验的时候是XShell配合着WinSCP用的)。 +
![](../figures/1-3.png "配置文件内容")
+2. **下载安装包(1.0.0版本)** +
下载地址 +``` +https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.1/x86/openGauss-2.1.0-openEuler-64bit.tar.bz2 +``` +下载完之后提前传到虚拟机中并记录好路径。
+3. **创建用户组和目录** +``` +[root@node1 ~]# groupadd dbgrp +[root@node1 ~]# useradd -g dbgrp -d /home/omm -m -s /bin/bash omm +[root@node1 ~]# echo "omm" | passwd --stdin omm +[root@node1 ~]# mkdir -p /opt/software/openGauss +[root@node1 ~]# chmod 755 -R /opt/software +[root@node1 ~]# chown -R omm:dbgrp /opt/software/openGauss +``` +4. **解压安装包到指定目录** +``` +[root@node1 ~]# tar -xvf /mnt/hgfs/share/ openGauss-2.1.0-openEuler-64bit.tar.bz2 /opt/software/openGauss +``` +5. **设置lib库** +``` +[root@node1 ~]# vim /etc/profile + export LD_LIBRARY_PATH=/opt/software/openGauss/script/gspylib/clib:$LD_LIBRARY_PATH +``` +6. **预安装** +``` +[root@node1 ~]# cd /opt/software/script +[root@node1 ~]# python3 gs_preinstall -U omm -G dbgrp -X /opt/clusterconfig.xml +``` +之后初始化过程中需要进行交互,具体而言: + 遇到[yes/no],就选yes; + 让输入root密码,就输入root密码; + 让输入omm密码,就输入omm密码。 +Ps:如果在预安装失败 就执行 gs_checkos -i A -h node1 --detail 命令 查看失败原因
+7. **安装openGauss** +``` +[root@node1 ~]# su - omm +[omm@node1 ~]# gs_install -X /opt/clusterconfig.xml +``` +执行的时候需要设置初始密码,复杂度要求和openEuler系统一样比较高,要至少三种字符和最少8个字符。
+8. **重启数据库** +``` +[root@node1 ~]# su - omm +[omm@node1 ~]# gs_ctl start -D "/opt/huawei/install/data/db1" +``` +9. **登录数据库** +``` +[root@node1 ~]# gsql -d postgres -p 26000 +``` + +### 基本数据库操作验证 +1. **启停数据库** +``` +[root@node1 ~]#gs_ctl start -D /opt/huawei/install/data/db1/ +[root@node1 ~]#gs_ctl stop -D /opt/huawei/install/data/db1/ +``` +2. **切换omm系统用户登录数据库** +``` +[root@node1 ~]#gs_guc set -N all -I all -h "host all jack 192.168.111.132/32 sha256" +[root@node1 ~]#gsql -d postgres -p 26000 +``` +3. **创建用户并赋予用户管理权限**
+ i. 创建用户jack 并设置密码为jack@123 + ``` + [root@node1 ~]#create user jackey identified by '123321jackey.'; + ``` + ii. 默认用户没有创建数据库表权限需要修改其权限 + ``` + [root@node1 ~]#ALTER ROLE gaussadmin SYSADMIN; + ``` + +### 通过JDBC执行SQL +1. **JDBC包与驱动类**
+在linux服务器端源代码目录下执行build.sh,获得驱动jar包postgresql.jar,包位置在源代码目录下。从发布包中获取,包名为openGauss-x.x.x-操作系统版本号-64bit-Jdbc.tar.gz。
+驱动包与PostgreSQL保持兼容,其中类名、类结构与PostgreSQL驱动完全一致,曾经运行于PostgreSQL的应用程序可以直接移植到当前系统使用。
+就驱动类而言,在创建数据库连接之前,需要加载数据库驱动类“org.postgresql.Driver”。

+2. **加载驱动**
+在创建数据库连接之前,需要先加载数据库驱动程序。加载驱动有两种方法:
+ i. 在代码中创建连接之前任意位置隐含装载:Class.forName(“org.postgresql.Driver”); + ii. 在JVM启动时参数传递:java -Djdbc.drivers=org.postgresql.Driver jdbctest

+3. **连接数据库**
+在创建数据库连接之后,才能使用它来执行SQL语句操作数据。JDBC提供了三个方法,用于创建数据库连接。
+ i. DriverManager.getConnection(String url); + ii. DriverManager.getConnection(String url, Properties info); + iii. DriverManager.getConnection(String url, String user, String password);

+4. **示例:基于openGauss提供的JDBC接口连接数据库。**
+![](../figures/1-4.png "使用JDBC接口连接数据库") +![](../figures/1-5.png "使用JDBC接口连接数据库") + +## 遇到的问题和解决办法 +1. **通过SSH连接虚拟机进行操作时提示access denied无法成功连接**
+![](../figures/1-6.png "access denied")
+这个问题很早就出现过,我最开始考虑的自然是密码输错了,后来查阅资料发现是前面提到过的sshd_config文件的配置有问题,主要是这几个点:
+ UsePAM yes #需要打开 + PasswordAuthentication yes #需要打开 + ChallengeResponseAuthentication no #需要打开
+在修改之后自然也就得到了解决,能够顺利进行下去。
+但是值得一说的是,最近写报告的时候想着要截图放报告里,结果发现又连接不上了,还是同样的SSH访问提示access denied拒绝访问,我首先就检查了/etc/ssh/sshd_config文件,发现UsePAM yes 变成了UsePAM no,据网上的说法是和修改过系统密码有关,总之我是把no又改回了yes,但是这次却无法解决问题,直到目前我也无法重新通过SSH连上虚拟机。

+2. **用户组部署出错**
+如前面所说,数据库安装完成后,默认生成名称为postgres的数据库。第一次连接数据库时,通过gsql -d postgres -p 26000命令(其中postgres为需要连接的数据库名称,26000为数据库主节点的端口号,即XML配置文件中的dataPortBase的值)就可以连接到此数据库。
+如果成功连接则会显示类似如下的信息。 +``` +gsql ((openGauss x.x.x build 290d125f) compiled at 2021-03-08 02:59:43 commit 2143 last mr 131 +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. +``` +但我当时多次尝试后其实并没有成功,所以我参考了opengauss松鼠会的技术人员撰写的安装脚本,对照着修改了用户组部署那一部分的内容,最后问题得到了解决,能够正常安装完成并得到上述信息验证。 + +## References & Thanks +; +. diff --git a/content/zh/post/jiajunfeng/figures/20210402-933115a3-e28a-41ea-b721-ce28f0a6724b.png b/content/zh/post/jiajunfeng/figures/20210402-933115a3-e28a-41ea-b721-ce28f0a6724b.png new file mode 100644 index 0000000000000000000000000000000000000000..26fe5babc0732d378a999c66482623a03dd3e5c4 Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/20210402-933115a3-e28a-41ea-b721-ce28f0a6724b.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr1.png b/content/zh/post/jiajunfeng/figures/wdr1.png new file mode 100644 index 0000000000000000000000000000000000000000..105a49cf5c619c21ab2e4074632e93b0a83b241a Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr1.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr10.png b/content/zh/post/jiajunfeng/figures/wdr10.png new file mode 100644 index 0000000000000000000000000000000000000000..c5c142a7ed812c431d4e5f2ee20240b6a9408a8d Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr10.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr11.png b/content/zh/post/jiajunfeng/figures/wdr11.png new file mode 100644 index 0000000000000000000000000000000000000000..eed1b52f884294265c5ff8b0e430006f0770be20 Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr11.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr12.png b/content/zh/post/jiajunfeng/figures/wdr12.png new file mode 100644 index 0000000000000000000000000000000000000000..13389b848c1e9a16200783044504df90670065ce Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr12.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr13.png b/content/zh/post/jiajunfeng/figures/wdr13.png new file mode 100644 index 0000000000000000000000000000000000000000..e993fa3a41d3b5fbc79a16756207f057c00c5cc1 Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr13.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr14.png b/content/zh/post/jiajunfeng/figures/wdr14.png new file mode 100644 index 0000000000000000000000000000000000000000..d5d86eca385162223e62bea8af92495b56cfdf5a Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr14.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr15.png b/content/zh/post/jiajunfeng/figures/wdr15.png new file mode 100644 index 0000000000000000000000000000000000000000..66f5551a7a3fe89c81e1221f81d9e831fc054eed Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr15.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr16.png b/content/zh/post/jiajunfeng/figures/wdr16.png new file mode 100644 index 0000000000000000000000000000000000000000..d409e65747fc3e931fd08672b0e6a63e1cea03a0 Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr16.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr17.png b/content/zh/post/jiajunfeng/figures/wdr17.png new file mode 100644 index 0000000000000000000000000000000000000000..b63729eab0f5d1a45e6c4eb4b3dee7aab754fb52 Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr17.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr18.png b/content/zh/post/jiajunfeng/figures/wdr18.png new file mode 100644 index 0000000000000000000000000000000000000000..25d482360d7e4c45e1c040b7d524641c6459432c Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr18.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr19.png b/content/zh/post/jiajunfeng/figures/wdr19.png new file mode 100644 index 0000000000000000000000000000000000000000..9c25f8db103ccb7f878c62cc1265145ebfc05576 Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr19.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr2.png b/content/zh/post/jiajunfeng/figures/wdr2.png new file mode 100644 index 0000000000000000000000000000000000000000..8a61d3aee05df69f629007c4c6bd66827a2d3c2c Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr2.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr20.png b/content/zh/post/jiajunfeng/figures/wdr20.png new file mode 100644 index 0000000000000000000000000000000000000000..42821a48e1ea4e40e51192c9fa9f1af9ddd40c7e Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr20.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr21.png b/content/zh/post/jiajunfeng/figures/wdr21.png new file mode 100644 index 0000000000000000000000000000000000000000..ce0816b15230eefd6323b7eccd7a5160aff991d9 Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr21.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr22.png b/content/zh/post/jiajunfeng/figures/wdr22.png new file mode 100644 index 0000000000000000000000000000000000000000..43d78da196561602a52593072de565b4581698dd Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr22.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr3.png b/content/zh/post/jiajunfeng/figures/wdr3.png new file mode 100644 index 0000000000000000000000000000000000000000..b41b59c46ad60988c87d922ebabfb24b82f6b4f2 Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr3.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr4.png b/content/zh/post/jiajunfeng/figures/wdr4.png new file mode 100644 index 0000000000000000000000000000000000000000..c0c2300aa84ecbdc3596e3acbab0657a49302bd3 Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr4.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr5.png b/content/zh/post/jiajunfeng/figures/wdr5.png new file mode 100644 index 0000000000000000000000000000000000000000..7ae05c5b0786db3de6e622b638093664ec2c15e1 Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr5.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr6.png b/content/zh/post/jiajunfeng/figures/wdr6.png new file mode 100644 index 0000000000000000000000000000000000000000..e7317b54b6c06e9e25fbe5fc9571980b7a5d7b5e Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr6.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr7.png b/content/zh/post/jiajunfeng/figures/wdr7.png new file mode 100644 index 0000000000000000000000000000000000000000..fc110e2e577637892d751432c6fff08d877aa42f Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr7.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr8.png b/content/zh/post/jiajunfeng/figures/wdr8.png new file mode 100644 index 0000000000000000000000000000000000000000..48d91d16a1d10ee96f497797dd402995f4e9fd23 Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr8.png differ diff --git a/content/zh/post/jiajunfeng/figures/wdr9.png b/content/zh/post/jiajunfeng/figures/wdr9.png new file mode 100644 index 0000000000000000000000000000000000000000..51ac8035eb1881b41fb4b932df2ca9e4d34d0859 Binary files /dev/null and b/content/zh/post/jiajunfeng/figures/wdr9.png differ diff --git a/content/zh/post/jiajunfeng/openGauss+KeepAlived.md b/content/zh/post/jiajunfeng/openGauss+KeepAlived.md new file mode 100644 index 0000000000000000000000000000000000000000..64260ffce0bc663c21c5ba8a6cc14821c5f1ba38 --- /dev/null +++ b/content/zh/post/jiajunfeng/openGauss+KeepAlived.md @@ -0,0 +1,304 @@ ++++ + +title = "openGauss+KeepAlived" + +date = "2021-03-08" + +tags = ["openGauss+KeepAlived"] + +archives = "2021-03" + +author = "贾军锋" + +summary = "openGauss+KeepAlived" + +img = "/zh/post/jiajunfeng/title/img33.png" + +times = "12:30" + ++++ + +# openGauss+KeepAlived + +## 实验环境 + +操作系统: CentOS 7.6 + +数据库版本: openGauss 1.1.0Primary + +主机/IP: opengaussdb1/192.168.1.11 \(openGauss主备已部署完毕\) + +Standby 主机/IP: opengaussdb2/192.168.1.12 \(openGauss主备已部署完毕\) + +>![](public_sys-resources/icon-note.gif) **说明:** +>不建议在云环境\(如:华为云\)下搭建Keepalived进行测试,本人在云环境下测试发现,Keepalived的VIP无法在云环境下与其他主机通信,云环境下如何使用该VIP建议咨询云服务厂商。在踩坑之后,选择使用本地的VMWare workstation进行简单测试。 + +## 安装KeepAlived软件 + +``` +## 在所有节点执行安装 +yum install keepalived -y +``` + +## 配置keepalived + +>![](public_sys-resources/icon-note.gif) **说明:** +>采用nopreempt不抢占VIP,主备节点的state均设置为BACKUP。 + +- 主节点配置文件。 + +``` +# vi /etc/keepalived/keepalived.conf +-------------------------------------------- +! Configuration File for keepalived +## 全局定义 +global_defs { + router_id Keepalived_openGauss #运行 keepalived 服务器的一个标识 + script_user root #执行脚本的用户 +} + +## VRRP实例定义 +## 通常如果master服务Down掉后backup会变成master,但是当master服务又好了的时候 master此时会抢占VIP,这样就会发生两次数据库切换。 +## 建议使用nopreempt参数设置为非抢占模式,此时主库从故障中恢复后,不会从新的主库抢回VIP,但这需要将master和backup的state都设置成backup。 +vrrp_instance VI_1 { + state BACKUP #指定Keepalived的角色(BACKUP需大写) + interface eth0 #指定 HA 监测的网络接口 + virtual_router_id 59 #虚拟路由的数字标识,同一个 vrrp_instance 下,MASTER 和 BACKUP 一致 + nopreempt #非抢占模式,主库从故障中恢复后,不会从新的主库抢回VIP + priority 100 #优先级,备节点需要适当降低优先级 + advert_int 1 #MASTER 和 BACKUP 负载均衡器同步检查的时间间隔(秒) + authentication { #设置验证码和验证类型 + auth_type PASS + auth_pass 1111 + } + virtual_ipaddress { #设置虚拟 IP 地址,可以设置多个,每个一行 + 192.168.1.10 + } +} + +## 虚拟服务器定义 +virtual_server 192.168.1.10 26000 { #设置虚拟服务器的 IP 和端口,用空格隔开 + delay_loop 6 #设置运行情况检查时间,单位是秒 +# lb_algo rr #负载调度算法(轮询) +# lb_kind DR #负载均衡机制(NAT、TUN、DR) + persistence_timeout 50 #会话保持时间(秒) + protocol TCP #转发协议类型 + real_server 192.168.1.11 26000 { #配置服务节点 + weight 100 #配置服务节点的权重 + notify_down /gauss/failoverdb.sh #故障响应脚本 + TCP_CHECK { #使用TCP_CHECK方式进行健康检查 + connect_timeout 10 #10秒无响应即超时 + delay_before_retry 3 #重试间隔时间 + } + } +} +``` + +- 主节点故障切换脚本(仅适用openGauss进程崩溃故障处理,不适用Primary操作系统宕机故障处理)。 + +``` +vi /gauss/failoverdb.sh +-------------------------------------------- +#!/bin/bash +echo "Start to failover openGauss database." +pkill keepalived +ssh 192.168.1.12 "su - omm -c 'gs_ctl failover -D /gauss/data/db1'" +ssh 192.168.1.12 "su - omm -c 'gs_om -t refreshconf'" +echo 'Failover operation is completed.' +-------------------------------------------- +chmod 764 /gauss/failoverdb.sh +``` + +- 备节点配置文件。 + +``` +# vi /etc/keepalived/keepalived.conf +-------------------------------------------- +! Configuration File for keepalived +## 全局定义 +global_defs { + router_id Keepalived_openGauss #运行 keepalived 服务器的一个标识 + script_user root #执行脚本的用户 +} + +## VRRP实例定义 +## 通常如果master服务Down掉后backup会变成master,但是当master服务又好了的时候 master此时会抢占VIP,这样就会发生两次数据库切换。 +## 建议使用nopreempt参数设置为非抢占模式,此时主库从故障中恢复后,不会从新的主库抢回VIP,但这需要将master和backup的state都设置成backup。 +vrrp_instance VI_1 { + state BACKUP #指定Keepalived的角色(BACKUP需大写) + interface eth0 #指定 HA 监测的网络接口 + virtual_router_id 59 #虚拟路由的数字标识,同一个 vrrp_instance 下,MASTER 和 BACKUP 一致 + nopreempt #非抢占模式,主库从故障中恢复后,不会从新的主库抢回VIP + priority 60 #优先级,备节点需要适当降低优先级 + advert_int 1 #MASTER 和 BACKUP 负载均衡器同步检查的时间间隔(秒) + authentication { #设置验证码和验证类型 + auth_type PASS + auth_pass 1111 + } + virtual_ipaddress { #设置虚拟 IP 地址,可以设置多个,每个一行 + 192.168.1.10 + } +} + +## 虚拟服务器定义 +virtual_server 192.168.1.10 26000 { #设置虚拟服务器的 IP 和端口,用空格隔开 + delay_loop 6 #设置运行情况检查时间,单位是秒 +# lb_algo rr #负载调度算法(轮询) +# lb_kind DR #负载均衡机制(NAT、TUN、DR) + persistence_timeout 50 #会话保持时间(秒) + protocol TCP #转发协议类型 + real_server 192.168.1.12 26000 { #配置服务节点 + weight 60 #配置服务节点的权重 + notify_down /gauss/failoverdb.sh #虚拟服务故障响应脚本 + MISC_CHECK { ## 使用 MISC_CHECK 方式自定义脚本做健康检查 + misc_path "/gauss/check.sh" ## 检测脚本 + misc_timeout 10 ## 执行脚本的超时时间 + misc_dynamic ## 根据退出状态码动态调整服务器的权重 + } + } +} +-------------------------------------------- +## 备节点选择MISC_CHECK方式的原因: +## 测试发现,当主节点直接断电宕机后,Keepalived的VIP会漂移至备节点,此时如果使用TCP_CHECK方式做健康检查,会因为备机可读的原因使得VIP:26000连接正常,造成keepalived健康检查的误判。 +## 最终导致主节点断电宕机后,备节点虽获取了VIP,但并没有执行openGauss的failover操作,备节点依旧只读,无法对外提供业务。 +## 为了纠正这一点,建议使用MISC_CHECK方式自定义脚本,登录主节点做数据库健康检查(简单示例脚本:/gauss/check.sh) +``` + +- 备节点健康检查脚本\[ ssh登录主节点进行数据库连接检查 \]。 + +``` +vi /gauss/check.sh +------------------------------------------- +ssh 192.168.1.11 "su - omm -c \"gsql -d postgres -p 26000 -t -A -c 'select 1;'\"" +------------------------------------------- +``` + +- 备节点故障切换脚本。 + +``` +vi /gauss/failoverdb.sh +-------------------------------------------- +#!/bin/bash +echo "Start to failover openGauss database." +pkill keepalived +su - omm -c "gs_ctl failover -D /gauss/data/db1" +su - omm -c "gs_om -t refreshconf" +echo 'Failover operation is completed.' +-------------------------------------------- +chmod 764 /gauss/failoverdb.sh +``` + +## openGauss配置 + +- 修改openGauss侦听地址。 + +``` +$ gs_guc set -I all -N all -c "listen_addresses = '0.0.0.0'" +$ gs_guc set -I all -N all -c "local_bind_address = '0.0.0.0'" +``` + +- 修改所有节点replconninfo参数(避免端口冲突)。 + +``` +$ vi /gauss/data/db1/postgresql.conf +-------------------------------------------- +修改:localport --> 26011 +修改:remoteport --> 26011 +-------------------------------------------- +``` + +- 重启openGauss数据库,并检查服务器状态。 + +``` +## 重启openGauss +[omm@prod db1]$ gs_om -t stop && gs_om -t start + +## 检查openGauss状态 +[root@opengaussdb1 ~]# su - omm -c "gs_om -t status --detail" +[ Cluster State ] +cluster_state : Normal +redistributing : No +current_az : AZ_ALL +[ Datanode State ] +node node_ip instance state | +----------------------------------------------------------------------- +1 opengaussdb1 192.168.1.11 6001 /gauss/data/db1 P Primary Normal | +2 opengaussdb2 192.168.1.12 6002 /gauss/data/db1 S Standby Normal + +## 检查KeepAlived进程状态 +[omm@opengaussdb1 ~]$ ps -ef|grep keep|grep -v grep +root 15664 1 0 16:15 ? 00:00:00 /usr/sbin/keepalived -D +root 15665 15664 0 16:15 ? 00:00:00 /usr/sbin/keepalived -D +root 15666 15664 0 16:15 ? 00:00:00 /usr/sbin/keepalived -D + +## 检查VIP状态 +[root@opengaussdb1 ~]# ip a +1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever +2: ens33: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 + link/ether 00:0c:29:da:60:c0 brd ff:ff:ff:ff:ff:ff + inet 192.168.1.11/24 brd 192.168.1.255 scope global noprefixroute ens33 + valid_lft forever preferred_lft forever + inet 192.168.1.10/32 scope global ens33 ## VIP:192.168.1.10 + valid_lft forever preferred_lft forever + inet6 2408:8270:237:ded0:c89c:adab:e7b:8bd6/64 scope global noprefixroute dynamic + valid_lft 258806sec preferred_lft 172406sec + inet6 fe80::c4f2:8ad1:200d:ce9b/64 scope link noprefixroute + valid_lft forever preferred_lft forever +``` + +## 故障模拟测试 + +- 主节点\[192.168.1.11\]操作。 + +``` +## kill数据库进程 +[root@opengaussdb1 ~]# ps -ef|grep gauss +omm 18115 1 4 16:30 ? 00:00:35 /gauss/app/bin/gaussdb -D /gauss/data/db1 -M primary +root 19254 9299 0 16:42 pts/0 00:00:00 grep --color=auto gauss +[root@opengaussdb1 ~]# kill -9 18115 + +## 检查message日志[检测到故障,执行notify_down脚本,并关闭keepalived服务] +# tail -fn 200 /var/log/messages +Feb 19 16:42:57 opengaussdb1 Keepalived_healthcheckers[18816]: TCP connection to [192.168.1.11]:26000 failed. +Feb 19 16:43:00 opengaussdb1 Keepalived_healthcheckers[18816]: TCP connection to [192.168.1.11]:26000 failed. +Feb 19 16:43:00 opengaussdb1 Keepalived_healthcheckers[18816]: Check on service [192.168.1.11]:26000 failed after 1 retry. +Feb 19 16:43:00 opengaussdb1 Keepalived_healthcheckers[18816]: Removing service [192.168.1.11]:26000 from VS [192.168.1.10]:26000 +Feb 19 16:43:00 opengaussdb1 Keepalived_healthcheckers[18816]: IPVS (cmd 1160, errno 2): No such destination +Feb 19 16:43:00 opengaussdb1 Keepalived_healthcheckers[18816]: Executing [/gauss/failoverdb.sh] for service [192.168.1.11]:26000 in VS [192.168.1.10]:26000 +Feb 19 16:43:00 opengaussdb1 Keepalived_healthcheckers[18816]: Lost quorum 1-0=1 > 0 for VS [192.168.1.10]:26000 +Feb 19 16:43:00 opengaussdb1 Keepalived[18815]: Stopping +Feb 19 16:43:00 opengaussdb1 Keepalived_healthcheckers[18816]: pid 19258 exited due to signal 15 +Feb 19 16:43:00 opengaussdb1 Keepalived_vrrp[18817]: VRRP_Instance(VI_1) sent 0 priority +Feb 19 16:43:00 opengaussdb1 Keepalived_vrrp[18817]: VRRP_Instance(VI_1) removing protocol VIPs. +Feb 19 16:43:00 opengaussdb1 Keepalived_healthcheckers[18816]: IPVS (cmd 1156, errno 2): No such file or directory +Feb 19 16:43:00 opengaussdb1 Keepalived_healthcheckers[18816]: Stopped +Feb 19 16:43:01 opengaussdb1 Keepalived_vrrp[18817]: Stopped +Feb 19 16:43:01 opengaussdb1 Keepalived[18815]: Stopped Keepalived v1.3.5 (03/19,2017), git commit v1.3.5-6-g6fa32f2 +``` + +- 备节点\[192.168.1.12\]检查。 + +``` +## 检查VIP是否已漂移 +[root@opengaussdb2 ~]# ip a|grep 192.168 + inet 192.168.1.12/24 brd 192.168.1.255 scope global noprefixroute ens33 + inet 192.168.1.10/32 scope global ens33 + +## 检查数据库状态[已failover成为Primary] +[omm@opengaussdb2 ~]$ gs_om -t status --detail +[ Cluster State ] +cluster_state : Degraded +redistributing : No +current_az : AZ_ALL +[ Datanode State ] +node node_ip instance state | +--------------------------------------------------------------------------------- +1 opengaussdb1 192.168.1.11 6001 /gauss/data/db1 P Down Manually stopped | +2 opengaussdb2 192.168.1.12 6002 /gauss/data/db1 S Primary Normal +``` + diff --git "a/content/zh/post/jiajunfeng/openGauss1-1-0\344\270\273\345\244\207\351\233\206\347\276\244\350\212\202\347\202\271\347\232\204\346\267\273\345\212\240\345\222\214\345\210\240\351\231\244.md" "b/content/zh/post/jiajunfeng/openGauss1-1-0\344\270\273\345\244\207\351\233\206\347\276\244\350\212\202\347\202\271\347\232\204\346\267\273\345\212\240\345\222\214\345\210\240\351\231\244.md" index 619c0fa1a2efc27bccc116f0949d2b77c19186a4..82cdaaeeb99ed411cc02c003addf25d3d7a4f957 100644 --- "a/content/zh/post/jiajunfeng/openGauss1-1-0\344\270\273\345\244\207\351\233\206\347\276\244\350\212\202\347\202\271\347\232\204\346\267\273\345\212\240\345\222\214\345\210\240\351\231\244.md" +++ "b/content/zh/post/jiajunfeng/openGauss1-1-0\344\270\273\345\244\207\351\233\206\347\276\244\350\212\202\347\202\271\347\232\204\346\267\273\345\212\240\345\222\214\345\210\240\351\231\244.md" @@ -22,7 +22,7 @@ times = "16:30" 在上一篇文章《[openGauss 1.1.0主备部署\(1主+2备+1级联备\)](https://www.modb.pro/db/43407)》中,我们已经搭建了常见于生产环境的数据库架构\(1主2备1级联备\),如下图所示: -![](figures/1.jpg) +![](../figures/1.jpg) 本文将基于已搭建的主备环境进行集群节点的删除和添加操作,希望相关操作内容对大家有所帮助。 diff --git "a/content/zh/post/jiajunfeng/openGauss2-0-0\344\270\273\345\244\207\345\256\211\350\243\205\351\203\250\347\275\262.md" "b/content/zh/post/jiajunfeng/openGauss2-0-0\344\270\273\345\244\207\345\256\211\350\243\205\351\203\250\347\275\262.md" new file mode 100644 index 0000000000000000000000000000000000000000..020767eab2e7d6874f7cd24d138e6c42f7396fda --- /dev/null +++ "b/content/zh/post/jiajunfeng/openGauss2-0-0\344\270\273\345\244\207\345\256\211\350\243\205\351\203\250\347\275\262.md" @@ -0,0 +1,502 @@ ++++ + +title = "openGauss2.0.0主备安装部署" + +date = "2021-04-19" + +tags = ["openGauss安装部署"] + +archives = "2021-04" + +author = "贾军锋" + +summary = "openGauss2.0.0主备安装部署" + +img = "/zh/post/jiajunfeng/title/img4.png" + +times = "15:30" + ++++ + +# openGauss2.0.0主备安装部署 + +openGauss 2.0.0于2021年3月31日正式发布,大部分对openGauss数据库感兴趣的小伙伴首先想到的应该是先把新版本软件下载下来,搭建一套自己的测试环境,然后再根据官方文档研究研究新特性的使用和原理。 + +本文基于openGauss 2.0.0搭建1主+1备+1级联备测试环境,并将操作记录共享出来,希望能帮到有需要的小伙伴。 + +## 2.0.0版新增特性 + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

特性

+

介绍

+

支持延迟备库

+

相对主机,备机可以延迟一段指定的时间后再回放XLOG记录

+

备机支持逻辑复制

+

支持备机逻辑解码,可以减少主机的压力

+

扩容工具功能增强

+

优化了扩容工具,支持不停服在线扩容备机或级联备

+

灰度升级

+

优化升级工具,增加灰度升级能力,支持业务在线升级。目前仅支持从1.1.0版本到2.0.0版本进行灰度升级

+

备机IO写放大优化

+

优化备机IO,平滑备机检查点刷盘的IO量,解决备机IO量大影响查询性能问题

+

WDR诊断报告增加数据库运行指标

+

新增“Effective CPU”、“WalWrite NoWait”、“Soft Parse”、“Non-Parse” CPU四个数据库运行指标

+

Data Studio客户端工具特性

+
  • 增加pldebugger调试功能
  • 增加pldebugger调试功能的回滚,在使用Data Studio调试前增加选项来保证调试函数在修改完数据后回退
  • 支持xml和serial类型,表中增加列,列的类型支持xml和serial(big|normal|small)类型
  • 支持在Data Studio中创建和展示外表对象
  • 支持列存表的partial_cluster_key约束
  • 全局临时表支持DDL的展示和导出
  • 创建分区表支持LOCAL和GLOBAL标记
  • 增加MOT表的展示
+
+ +软件下载地址:https://opengauss.org/zh/download.html + +![](../figures/20210402-933115a3-e28a-41ea-b721-ce28f0a6724b.png) + +## 测试环境 + +操作系统版本:CentOS7.6.1810 x86\_64 + +硬件配置: \[华为云ECS\] 2c/8G/40GB + +节点信息: \[主节点\] 192.168.0.21 prod + +\[备节点\] 192.168.0.22 stb1 + +\[级联备\] 192.168.0.23 casstb + +## 运行环境初始化 + +操作系统内核参数配置中,涉及SCTP协议的内核参数可以忽略不配置,由于SCTP协议在分布式数据库中使用,openGauss不使用该协议,官方文档也将会删除相关内容。 + +预安装脚本gs\_preinstall会完成部分系统的配置,但还需要用户做少许基础配置,示例脚本如下\[仅供参考\]: + +``` +vi /root/initial_env.sh +------------------------------------------------------------------------------------------- +#!/bin/bash + +##Configure Linux environment For openGauss + +## 1.Disable firewalld service +systemctl disable firewalld.service +systemctl stop firewalld.service +echo "Firewalld " `systemctl status firewalld|grep Active` +echo "1.Disable firewalld service completed." +echo -e "\n" + +## 2.Disable SELINUX +sed -i '/^SELINUX=/d' /etc/selinux/config +echo "SELINUX=disabled" >> /etc/selinux/config +cat /etc/selinux/config|grep "SELINUX=disabled" +echo "2.Disable SELINUX completed." +echo -e "\n" + + +## 3.Configure encoding +echo "LANG=en_US.UTF-8" >> /etc/profile +source /etc/profile +echo $LANG +echo "3.Configure encoding completed." +echo -e "\n" + +## 4. Configure Timezone +rm -fr /etc/localtime +ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime +date -R +hwclock +echo "4.Configure Timezone completed." +echo -e "\n" + +## 5. Turn off SWAP +sed -i '/swap/s/^/#/' /etc/fstab +swapoff -a +free -m +echo "5.Close swap partition completed." +echo -e "\n" + +## optional options,please take care of this +## echo "MTU=8192" >> /etc/sysconfig/network-scripts/ifcfg-ens34 +## For 10GB Ethernet environment , please set rx = 4096、tx = 4096 + + +## 6. Configure SSH Service +sed -i '/Banner/s/^/#/' /etc/ssh/sshd_config +sed -i '/PermitRootLogin/s/^/#/' /etc/ssh/sshd_config +echo -e "\n" >> /etc/ssh/sshd_config +echo "Banner none " >> /etc/ssh/sshd_config +echo "PermitRootLogin yes" >> /etc/ssh/sshd_config +cat /etc/ssh/sshd_config |grep -v ^#|grep -E 'PermitRoot|Banner' +echo "6.Configure SSH Service completed." +echo -e "\n" + +## 7. Configure YUM and Install Packages +mkdir /etc/yum.repos.d/bak +mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/bak/ +wget -O /etc/yum.repos.d/CentOS-Base.repo https://repo.huaweicloud.com/repository/conf/CentOS-7-reg.repo +yum clean all +yum install -y bzip2 python3 +yum install -y libaio-devel flex bison ncurses-devel glibc-devel patch redhat-lsb-core readline-devel +echo "7.Configure YUM and Install Packages completed." +echo -e "\n" + +## 8. Close transparent_hugepage +################Only for CentOS [Close transparent_hugepage]##################### +cat >>/etc/rc.d/rc.local<<EOF +if test -f /sys/kernel/mm/transparent_hugepage/enabled; then + echo never > /sys/kernel/mm/transparent_hugepage/enabled +fi +if test -f /sys/kernel/mm/transparent_hugepage/defrag; then + echo never > /sys/kernel/mm/transparent_hugepage/defrag +fi +EOF +chmod +x /etc/rc.d/rc.local +echo "8.Close transparent_hugepage completed." +echo -e "\n" +################################################################################ + +## 9. Configure OS Parameter +cat >> /etc/sysctl.conf <<EOF +net.ipv4.tcp_retries1 = 5 +net.ipv4.tcp_syn_retries = 5 +net.sctp.path_max_retrans = 10 +net.sctp.max_init_retransmits = 10 +EOF +sysctl -p + +################Only for openEuler[Disable RemoveIPC]##################### +## sed -i '/^RemoveIPC/d' /etc/systemd/logind.conf +## sed -i '/^RemoveIPC/d' /usr/lib/systemd/system/systemd-logind.service +## echo "RemoveIPC=no" >> /etc/systemd/logind.conf +## echo "RemoveIPC=no" >> /usr/lib/systemd/system/systemd-logind.service +## systemctl daemon-reload +## systemctl restart systemd-logind +## loginctl show-session | grep RemoveIPC +## systemctl show systemd-logind | grep RemoveIPC +## echo "10.Disable RemoveIPC completed." +## echo -e "\n" +## echo -e "\n" +########################################################################## +------------------------------------------------------------------------------------------- +sh /root/initial_env.sh ## 执行初始化脚本[SCTP内核参数告警可以忽略] +``` + +## 配置XML文件 + +``` +# mkdir /soft +# vi /soft/cluster_config.xml +----------------------------------------------------------------------------- + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +----------------------------------------------------------------------------- +``` + +## 执行预安装gs\_preinstall + +**1. 解压软件包** + +``` +cd /soft/ +tar -zxvf openGauss-2.0.0-CentOS-64bit-all.tar.gz +tar -zxvf openGauss-2.0.0-CentOS-64bit-om.tar.gz +``` + +**2. 执行预安装** + +``` +[root@prod ~]# cd /soft/script +[root@prod script]# ./gs_preinstall -U omm -G dbgrp -X /soft/cluster_config.xml +--------------------------------------------------------------------------------------------- +说明: + gs_preinstall脚本操作和之前版本的操作基本一致,为了增加阅读性,这里就不再重复粘贴脚本输出信息。 + 脚本操作内容总结如下: + 1. 解析命令参数和XML文件参数 + 2. 本地解压并安装应用软件 + 3. 配置集群节点间root用户SSH互信,并设置/etc/hosts解析 + 4. 分发软件包至各个节点的指定目录(如:/gauss/app目录),其他备节点相关目录,脚本会自动创建,无需用户手动提前创建 + 5. 创建普通用户(如:omm)及用户组,并建立各节点普通用户的SSH互信[需要用户输入新的用户密码] + 6. 安装集群管理工具(如:OM) + 7. 检查主机名、网络、OS版本及依赖包 + 8. 创建集群目录 + 9. 设置SCTP服务、系统资源限制、定义系统告警信息和日志速率、操作系统sysctl参数[缺少2个关于tcp重试的参数,需要手动设置] + 10. 设置CRON服务、用户环境变量、动态链接库、Core file路径、ARM优化配置等 + 11. 修改目录的属主为数据库运行用户、设置Finish标签,完成openGauss的预安装操作。 +--------------------------------------------------------------------------------------------- + +-- 根据输出提示,使用gs_checkos工具检查操作预安装结果 +[root@prod script]# /soft/script/gs_checkos -i A -h prod,stb1,casstb --detail ## -h指定需要检查的主机名 +--------------------------------------------------------------------------------------------- +脚本检查内容包括: + 1. OS 版本 + 2. Kernel版本 + 3. 字符集编码 + 4. 时区 + 5. SWAP分区关闭状态 + 6. sysctl参数配置 + 7. 文件系统配置 + 8. 磁盘配置状态 + 9. 磁盘块预读取参数 + 10. 磁盘IO调度策略 + 11. 网卡配置 [华为云ECS环境请忽略有关speed的告警] + 12. 时间一致性、NTP服务 + 13. 防火墙关闭状态 + 14. 透明大页关闭状态 +--------------------------------------------------------------------------------------------- +[root@prod script]# chown -R omm:dbgrp /soft +``` + +## 执行安装gs\_install + +``` +[root@prod ~]# su - omm +[omm@prod ~]$ gs_install -X /soft/cluster_config.xml +--------------------------------------------------------------------------------------------- +从脚本信息能看到来自于自于GaussDB200,吸取了GaussDB200的相关经验。 +基本操作内容如下: + 1. 解析命令行参数、对比并检查XML文件和集群静态配置文件 + 2. 检查并初始化全局参数 + 3. 备份软件目录 + 4. 检查预安装的集群节点环境 + 5. 调用本地../local/Install.py脚本执行本地数据库安装和集群配置 + 本地Install安装大致包含以下步骤: + 1> 查询并初始化全局参数,如:操作系统用户信息、安装路径信息、静态配置文件等; + 2> 解压并拷贝数据库Bin文件 + 3> 调用本地脚本InitInstance.py初始化实例[需要用户输入数据库密码] + 4> 创建CA证书文件 + 5> 调用本地脚本[ConfigInstance.py]配置数据库实例 + 6> 调用本地脚本[CleanInstance.py]清理数据库实例[删除表空间目录、实例目录、xlog目录、socket文件等] + 7> 初始化所有节点的Instance,配置数据库参数和主备集群 + 8> 检查CPU、内存等是否满足要求 + 9> 配置pg_hba.conf,添加所有集群节点至白名单 + 10> 启动集群,完成部署。 +--------------------------------------------------------------------------------------------- +备注: + 关于Install的第6步操作“CleanInstance”还没看明白其中的原因,很疑惑在Install安装操作却需要执行Clean清理操作,这个希望对代码精通的小伙伴予以解惑,感谢~ + 后续有正确的答案会更新该文章,目前仅供大家参考。 +``` + +## 检查数据库信息 + +``` +[omm@prod ~]$ gsql -d postgres -p 26000 -r +gsql ((openGauss 2.0.0 build 78689da9) compiled at 2021-03-31 21:04:03 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +postgres=# \l + List of databases + Name | Owner | Encoding | Collate | Ctype | Access privileges +-----------+-------+-----------+---------+-------+------------------- + postgres | omm | SQL_ASCII | C | C | + template0 | omm | SQL_ASCII | C | C | =c/omm + + | | | | | omm=CTc/omm + template1 | omm | SQL_ASCII | C | C | =c/omm + + | | | | | omm=CTc/omm + +## 说明: +## openGauss 2.0.0相比于1.1.0版本,不再强制修改初始用户的密码,免去了很多初学者的困扰。 +## openGauss 2.0.0相比于1.1.0版本,又重新开放了gsql的readline功能,可以实现Tab键自动补齐命令,大幅增加了gsql的易用性,这个很不错。 + +postgres=# select * from dbe_perf.replication_stat; + pid | usesysid | usename | application_name | client_addr | client_hostname | client_port | backend_start | state | sender_sent_location | receiver_write_location | receiver_flush_location | receiver_replay_location | sync_priority | sync_state +-----------------+----------+---------+-------------------------------+--------------+-----------------+-------------+-------------------------------+-----------+----------------------+-------------------------+-------------------------+--------------------------+---------------+------------ + 139635524359936 | 10 | omm | WalSender to Standby[dn_6002] | 192.168.0.22 | stb1 | 42460 | 2021-04-02 09:34:26.351701+08 | Streaming | 0/6002580 | 0/6002580 | 0/6002580 | 0/6002580 | 0 | Async +``` + +## 检查主备信息 + +``` +## 查询集群健康状态 +[omm@prod ~]$ gs_om -t status --detail +[ Cluster State ] + +cluster_state : Normal +redistributing : No +current_az : AZ_ALL + +[ Datanode State ] + +node node_ip instance state | +----------------------------------------------------------------- +1 prod 192.168.0.21 6001 /gauss/data/db1 P Primary Normal | +2 stb1 192.168.0.22 6002 /gauss/data/db1 S Standby Normal | +3 casstb 192.168.0.23 6003 /gauss/data/db1 C Cascade Normal + +## 查询主备同步信息 +[omm@prod ~]$ gs_ctl query -D /gauss/data/db1 +[2021-04-02 09:39:46.201][23236][][gs_ctl]: gs_ctl query ,datadir is /gauss/data/db1 + HA state: + local_role : Primary + static_connections : 2 + db_state : Normal + detail_information : Normal + + Senders info: + sender_pid : 22614 + local_role : Primary + peer_role : Standby + peer_state : Normal + state : Streaming + sender_sent_location : 0/6001FF0 + sender_write_location : 0/6001FF0 + sender_flush_location : 0/6001FF0 + sender_replay_location : 0/6001FF0 + receiver_received_location : 0/6001FF0 + receiver_write_location : 0/6001FF0 + receiver_flush_location : 0/6001FF0 + receiver_replay_location : 0/6001FF0 + sync_percent : 100% + sync_state : Async + sync_priority : 0 + sync_most_available : Off + channel : 192.168.0.21:26001-->192.168.0.22:42460 + +## 查询集群节点配置信息 +[omm@prod ~]$ gs_om -t view +NodeHeader: +version:301 +time:1617327141 +nodeCount:3 +node:1 +azName:AZ1 +azPriority:1 + +node :1 +nodeName:prod +ssh channel : +sshChannel 1:192.168.0.21 +datanodeCount :1 +datanode 1: +datanodeLocalDataPath :/gauss/data/db1 +datanodeXlogPath : +datanodeListenIP 1:192.168.0.21 +datanodePort :26000 +datanodeLocalHAIP 1:192.168.0.21 +datanodeLocalHAPort :26001 +dn_replication_num: 3 +datanodePeer0DataPath :/gauss/data/db1 +datanodePeer0HAIP 1:192.168.0.22 +datanodePeer0HAPort :26001 +datanodePeer1DataPath :/gauss/data/db1 +datanodePeer1HAIP 1:192.168.0.23 +datanodePeer1HAPort :26001 +azName:AZ1 +azPriority:1 + +node :2 +nodeName:stb1 +ssh channel : +sshChannel 1:192.168.0.22 +datanodeCount :1 +datanode 1: +datanodeLocalDataPath :/gauss/data/db1 +datanodeXlogPath : +datanodeListenIP 1:192.168.0.22 +datanodePort :26000 +datanodeLocalHAIP 1:192.168.0.22 +datanodeLocalHAPort :26001 +dn_replication_num: 3 +datanodePeer0DataPath :/gauss/data/db1 +datanodePeer0HAIP 1:192.168.0.21 +datanodePeer0HAPort :26001 +datanodePeer1DataPath :/gauss/data/db1 +datanodePeer1HAIP 1:192.168.0.23 +datanodePeer1HAPort :26001 +azName:AZ1 +azPriority:1 + +node :3 +nodeName:casstb +ssh channel : +sshChannel 1:192.168.0.23 +datanodeCount :1 +datanode 1: +datanodeLocalDataPath :/gauss/data/db1 +datanodeXlogPath : +datanodeListenIP 1:192.168.0.23 +datanodePort :26000 +datanodeLocalHAIP 1:192.168.0.23 +datanodeLocalHAPort :26001 +dn_replication_num: 3 +datanodePeer0DataPath :/gauss/data/db1 +datanodePeer0HAIP 1:192.168.0.21 +datanodePeer0HAPort :26001 +datanodePeer1DataPath :/gauss/data/db1 +datanodePeer1HAIP 1:192.168.0.22 +datanodePeer1HAPort :26001 +``` + diff --git "a/content/zh/post/jiajunfeng/openGauss2-0-0\346\236\201\347\256\200\347\211\210\345\256\211\350\243\205.md" "b/content/zh/post/jiajunfeng/openGauss2-0-0\346\236\201\347\256\200\347\211\210\345\256\211\350\243\205.md" new file mode 100644 index 0000000000000000000000000000000000000000..47deff6f5295af1ff6deb829803925c387fa9a6c --- /dev/null +++ "b/content/zh/post/jiajunfeng/openGauss2-0-0\346\236\201\347\256\200\347\211\210\345\256\211\350\243\205.md" @@ -0,0 +1,406 @@ ++++ + +title = "openGauss2.0.0极简版安装" + +date = "2021-04-01" + +tags = ["openGauss安装部署"] + +archives = "2021-04" + +author = "贾军锋" + +summary = "openGauss2.0.0极简版安装" + +img = "/zh/post/jiajunfeng/title/img4.png" + +times = "15:30" + ++++ + +# openGauss2.0.0极简版安装 + +openGauss的安装在官方文档的描述中,一直以企业生产环境为标准进行安装部署。但在个人基本的功能测试需求下,这样的安装操作显得有些复杂。 + +在 openGauss 2.0.0 版本中\(2021.03.31发布\)新增了极简版的软件包,极简版安装的使用主体主要针对高校和个人测试环境,相对企业安装流程更简单快捷,更加适合高校学生或者个人功能测试的场景,该软件包中并不包含OM工具,采用脚本可以实现一键式安装部署。 + +本文通过使用极简版进行安装部署,希望基本的操作示例对大家能有所帮助。 + +软件环境: 包含了上一版本要求的软件依赖包,新增了openEuler x86环境下需要的libnsl软件包。 + +硬件环境: 极简版中对于硬件环境要求描述“个人开发者最低配置2核4G, 推荐配置4核8G。”,本次安装实验采用最低配置2c/4GB,操作系统使用CentOS7.6.1810。 + +极简版为了适应小内存机器,在部署时将部分重要内存参数设置较低,如:“shared\_buffers = 32MB”、“cstore\_buffers = 512MB”。 + +另外,极简版安装的数据库字符集将原先默认的SQL\_ACSII字符集改为en\_US.UTF-8,同时初始用户密码不做强制修改\[modify\_initial\_password = false\]。 + +## 运行环境配置 + +**1. 配置YUM源** + +``` +mkdir /etc/yum.repos.d/bak +mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/bak/ +wget -O /etc/yum.repos.d/CentOS-Base.repo https://repo.huaweicloud.com/repository/conf/CentOS-7-reg.repo +yum clean all +``` + +**2. 安装依赖的软件包** + +``` +yum install libaio-devel flex bison ncurses-devel glibc-devel patch redhat-lsb-core readline-devel -y +``` + +**3. 关闭安全设置** + +``` +## 关闭防火墙 +systemctl status firewalld +systemctl disable firewalld.service +systemctl stop firewalld.service +## 关闭SELinux +sed -i '/SELINUX=/d' /etc/selinux/config +echo "SELINUX=disabled" >> /etc/selinux/config +cat /etc/selinux/config|grep -v ^#|grep -v '^$' +``` + +**4. 设置时区** + +``` +rm -fr /etc/localtime +ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime +ll /etc/localtime +``` + +**5. 关闭SWAP** + +``` +## 修改分区表文件,删除SWAP挂载信息 +cp /etc/fstab /etc/fstab.bak +sed -i '/swap/s/^/#/' /etc/fstab +cat /etc/fstab|grep -v ^#|grep -v '^$' +## 关闭swap +swapoff -a +``` + +**6. 配置操作系统内核参数** + +``` +## 极简安装但没有实现内核参数的自动化配置,这个有些不足,希望后续将参数配置写入脚本。 +## 此处参考之前的配置吧 +cat >> /etc/sysctl.conf << EOF +net.ipv4.tcp_max_tw_buckets = 10000 +net.ipv4.tcp_tw_reuse = 1 +net.ipv4.tcp_tw_recycle = 1 +net.ipv4.tcp_keepalive_time = 30 +net.ipv4.tcp_keepalive_probes = 9 +net.ipv4.tcp_keepalive_intvl = 30 +net.ipv4.tcp_retries1 = 5 +net.ipv4.tcp_syn_retries = 5 +net.ipv4.tcp_synack_retries = 5 +net.ipv4.tcp_retries2 = 12 +vm.overcommit_memory = 0 +net.ipv4.tcp_rmem = 8192 250000 16777216 +net.ipv4.tcp_wmem = 8192 250000 16777216 +net.core.wmem_max = 21299200 +net.core.rmem_max = 21299200 +net.core.wmem_default = 21299200 +net.core.rmem_default = 21299200 +net.ipv4.ip_local_port_range = 26000 65535 +kernel.sem = 250 6400000 1000 25600 +vm.min_free_kbytes = 102400 ##suggest to set as physical memory * 5% +net.core.somaxconn = 65535 +net.ipv4.tcp_syncookies = 1 +net.core.netdev_max_backlog = 65535 +net.ipv4.tcp_max_syn_backlog = 65535 +net.ipv4.tcp_fin_timeout = 60 +kernel.shmall = 1152921504606846720 +kernel.shmmax = 18446744073709551615 +net.ipv4.tcp_sack = 1 +net.ipv4.tcp_timestamps = 1 +vm.extfrag_threshold = 500 +vm.overcommit_ratio = 90 +EOF +sysctl -p +``` + +备注: openEuler操作系统需要关闭RemoveIPC,操作请参考官方文档。 + +## 创建普通用户和目录,并授权 + +``` +groupadd -g 1001 dbgrp +useradd -u 2001 -g dbgrp omm +mkdir -p /opt/software/openGauss +chown -R omm:dbgrp /opt +``` + +## 解压并一键式安装单机openGauss + +极简版软件包:openGauss-2.0.0-CentOS-64bit.tar.bz2 + +企业版软件包:openGauss-2.0.0-CentOS-64bit-all.tar.gz \(包含om工具\) + +单机部署的数据目录 --\> /opt/software/openGauss/data/single\_node + +主备部署的数据目录 --\> /opt/software/openGauss/data/master 和 /opt/software/openGauss/data/slave + +``` +## 解压软件包 +[root@db1 ~]# su - omm +[omm@db1 ~]$ cd /opt/software/openGauss/ +[omm@db1 openGauss]$ tar -jxf openGauss-2.0.0-CentOS-64bit.tar.bz2 -C /opt/software/openGauss/ +## 一键式脚本安装 +[omm@db1 openGauss]$ cd /opt/software/openGauss/simpleInstall/ +[omm@db1 simpleInstall]$ sh install.sh -w gauss@123 -p 26000 ## -w指定数据库初始用户密码、-p指定数据库端口 +[step 1]: check parameter +[step 2]: check install env and os setting +[step 3]: change_gausshome_owner +[step 4]: set environment variables +/home/omm/.bashrc: line 16: ulimit: open files: cannot modify limit: Operation not permitted +[step 6]: init datanode +The files belonging to this database system will be owned by user "omm". +This user must also own the server process. +The database cluster will be initialized with locale "en_US.UTF-8". +The default database encoding has accordingly been set to "UTF8". +The default text search configuration will be set to "english". + +creating directory /opt/software/openGauss/data/single_node ... ok +creating subdirectories ... ok +selecting default max_connections ... 100 +selecting default shared_buffers ... 32MB +creating configuration files ... ok +creating template1 database in /opt/software/openGauss/data/single_node/base/1 ... The core dump path is an invalid directory +2021-04-01 09:58:57.927 [unknown] [unknown] localhost 139899531253504 0 [BACKEND] WARNING: macAddr is 64022/1040773698, sysidentifier is 4195761672/4064452798, randomNum is 486318270 +... ... +WARNING: enabling "trust" authentication for local connections +You can change this by editing pg_hba.conf or using the option -A, or +--auth-local and --auth-host, the next time you run gs_initdb. +Success. You can now start the database server of single node using: + + gaussdb -D /opt/software/openGauss/data/single_node --single_node +or + gs_ctl start -D /opt/software/openGauss/data/single_node -Z single_node -l logfile + +[step 7]: start datanode +[2021-04-01 09:59:21.027][8464][][gs_ctl]: gs_ctl started,datadir is /opt/software/openGauss/data/single_node +[2021-04-01 09:59:21.136][8464][][gs_ctl]: waiting for server to start... +0 LOG: [Alarm Module]can not read GAUSS_WARNING_TYPE env. +0 LOG: [Alarm Module]Host Name: db1 +0 LOG: [Alarm Module]Host IP: 127.0.0.1 +0 LOG: [Alarm Module]Cluster Name: dbCluster +0 LOG: [Alarm Module]Invalid data in AlarmItem file! Read alarm English name failed! line: 52 +0 WARNING: failed to open feature control file, please check whether it exists: FileName=gaussdb.version, Errno=2, Errmessage=No such file or directory. +0 WARNING: failed to parse feature control file: gaussdb.version. +0 WARNING: Failed to load the product control file, so gaussdb cannot distinguish product version. +0 LOG: Failed to initialze environment for codegen. +The core dump path is an invalid directory +2021-04-01 09:59:21.359 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] LOG: when starting as multi_standby mode, we couldn't support data replicaton. +gaussdb.state does not exist, and skipt setting since it is optional.2021-04-01 09:59:21.359 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] LOG: [Alarm Module]can not read GAUSS_WARNING_TYPE env. + +2021-04-01 09:59:21.359 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] LOG: [Alarm Module]Host Name: db1 +2021-04-01 09:59:21.359 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] LOG: [Alarm Module]Host IP: 127.0.0.1 +2021-04-01 09:59:21.359 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] LOG: [Alarm Module]Cluster Name: dbCluster +2021-04-01 09:59:21.359 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] LOG: [Alarm Module]Invalid data in AlarmItem file! Read alarm English name failed! line: 52 +2021-04-01 09:59:21.359 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] LOG: Transparent encryption disabled. +2021-04-01 09:59:21.365 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] LOG: loaded library "security_plugin" +2021-04-01 09:59:21.365 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] WARNING: could not create any HA TCP/IP sockets +2021-04-01 09:59:21.374 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] WARNING: No explicit IP is configured for listen_addresses GUC. +2021-04-01 09:59:21.374 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] LOG: InitNuma numaNodeNum: 1 numa_distribute_mode: none inheritThreadPool: 0. +2021-04-01 09:59:21.374 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] LOG: reserved memory for backend threads is: 220 MB +2021-04-01 09:59:21.374 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] LOG: reserved memory for WAL buffers is: 128 MB +2021-04-01 09:59:21.374 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] LOG: Set max backend reserve memory is: 348 MB, max dynamic memory is: 11097 MB +2021-04-01 09:59:21.374 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] LOG: shared memory 330 Mbytes, memory context 11445 Mbytes, max process memory 12288 Mbytes +2021-04-01 09:59:21.404 [unknown] [unknown] localhost 140033854506752 0 0 [CACHE] LOG: set data cache size(402653184) +2021-04-01 09:59:21.415 [unknown] [unknown] localhost 140033854506752 0 0 [CACHE] LOG: set metadata cache size(134217728) +2021-04-01 09:59:21.462 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] LOG: gaussdb: fsync file "/opt/software/openGauss/data/single_node/gaussdb.state.temp" success +2021-04-01 09:59:21.462 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] LOG: create gaussdb state file success: db state(STARTING_STATE), server mode(Normal) +2021-04-01 09:59:21.483 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] LOG: max_safe_fds = 977, usable_fds = 1000, already_open = 13 +The core dump path is an invalid directory +2021-04-01 09:59:21.484 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] LOG: user configure file is not found, it will be created. +2021-04-01 09:59:21.488 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] LOG: the configure file /opt/software/openGauss/etc/gscgroup_omm.cfg doesn't exist or the size of configure file has changed. Please create it by root user! +2021-04-01 09:59:21.488 [unknown] [unknown] localhost 140033854506752 0 0 [BACKEND] LOG: Failed to parse cgroup config file. + +[2021-04-01 09:59:22.143][8464][][gs_ctl]: done +[2021-04-01 09:59:22.143][8464][][gs_ctl]: server started (/opt/software/openGauss/data/single_node) +import sql file +Would you like to create a demo database (yes/no)? yes ## 创建Demo数据库 +Load demoDB [school,finance] success. +[complete successfully]: You can start or stop the database server using: + gs_ctl start|stop|restart -D $GAUSSHOME/data/single_node -Z single_node +``` + +## 检查数据库 + +``` +[omm@db1 ~]$ echo "PATH=/opt/software/openGauss/bin:\$PATH" >> /home/omm/.bash_profile ## 配置PATH +[omm@db1 ~]$ source ~/.bash_profile +-bash: ulimit: open files: cannot modify limit: Operation not permitted ## 这里提示打开文件数量限制不能修改,这...... 代表resource limit参数脚本也没有做相应的配置 +[omm@db1 ~]$ gsql -d postgres -p 26000 -r +gsql ((openGauss 2.0.0 build 78689da9) compiled at 2021-03-31 21:04:03 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +postgres=# \l + List of databases + Name | Owner | Encoding | Collate | Ctype | Access privileges +-----------+-------+----------+-------------+-------------+------------------- + finance | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | -- 金融场景数据库示例 + postgres | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | + school | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | -- 学校场景数据库示例 + template0 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/omm + + | | | | | omm=CTc/omm + template1 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/omm + + | | | | | omm=CTc/omm +(5 rows) +``` + +## 总结 + +openGauss的极简安装没有使用OM工具,即不能使用OM工具对openGauss实例进行管理和配置。 + +openGauss极简版免去了用户配置XML文件的操作,也免去了配置1主+1备的配置操作,这简化了少许安装操作。 + +但是极简版个人感觉稍有些失望,所谓的"极"字并没有得到充分体现,系统内核参数、资源限制参数、环境变量配置、用户创建、目录创建和权限等等这些操作并没有实现自动化配置,脚本很简单但是并没有写入install脚本中。 + +个人对“极简”的期待是90分,实际感觉是60分,还有待完善,但值得期待。 + +## 附录:极简安装主备环境 + +``` +openGauss极简主备部署,脚本的基本操作就是分别单机安装主、备节点,然后配置主备关系并重建备库。同时,极简安装也会部署测试库finance和school。 +[omm@db1 ~]$ cd /opt/software/openGauss/ +[omm@db1 openGauss]$ tar -jxf openGauss-2.0.0-CentOS-64bit.tar.bz2 -C /opt/software/openGauss/ +[omm@db1 openGauss]$ cd /opt/software/openGauss/simpleInstall/ + +## 主备部署需要配合--multinode参数 +[omm@db1 simpleInstall]$ sh install.sh -w gauss@123 -p 26000 --multinode +[step 1]: check parameter +[step 2]: check install env and os setting +[step 3]: change_gausshome_owner +[step 4]: set environment variables + +/home/omm/.bashrc: line 16: ulimit: open files: cannot modify limit: Operation not permitted +[init primary datanode.] +The files belonging to this database system will be owned by user "omm". +This user must also own the server process. + +The database cluster will be initialized with locale "en_US.UTF-8". +The default text search configuration will be set to "english". + +creating directory /opt/software/openGauss/data/master ... ok +creating subdirectories ... ok +selecting default max_connections ... 100 +selecting default shared_buffers ... 32MB +creating configuration files ... ok +creating template1 database in /opt/software/openGauss/data/master/base/1 ... The core dump path is an invalid directory +... ... +WARNING: enabling "trust" authentication for local connections +You can change this by editing pg_hba.conf or using the option -A, or +--auth-local and --auth-host, the next time you run gs_initdb. + +Success. You can now start the database server of single node using: + + gaussdb -D /opt/software/openGauss/data/master --single_node +or + gs_ctl start -D /opt/software/openGauss/data/master -Z single_node -l logfile + +[init slave datanode.] +The files belonging to this database system will be owned by user "omm". +This user must also own the server process. + +The database cluster will be initialized with locale "en_US.UTF-8". +The default text search configuration will be set to "english". + +creating directory /opt/software/openGauss/data/slave ... ok +creating subdirectories ... ok +selecting default max_connections ... 100 +selecting default shared_buffers ... 32MB +creating configuration files ... ok +creating template1 database in /opt/software/openGauss/data/slave/base/1 ... The core dump path is an invalid directory +2021-04-01 10:16:31.046 [unknown] [unknown] localhost 140719588914944 0 [BACKEND] WARNING: macAddr is 64022/1040773698, sysidentifier is 4195761672/4064474332, randomNum is 2212623580 +... ... +WARNING: enabling "trust" authentication for local connections +You can change this by editing pg_hba.conf or using the option -A, or +--auth-local and --auth-host, the next time you run gs_initdb. + +Success. You can now start the database server of single node using: + + gaussdb -D /opt/software/openGauss/data/slave --single_node +or + gs_ctl start -D /opt/software/openGauss/data/slave -Z single_node -l logfile + +[config datanode.] +remote_read_mode = non_authentication +host all all 192.168.0.100/32 trust +[start primary datanode.] +[2021-04-01 10:16:53.293][1997][][gs_ctl]: gs_ctl started,datadir is /opt/software/openGauss/data/master +[2021-04-01 10:16:53.400][1997][][gs_ctl]: waiting for server to start... +...... +[2021-04-01 10:17:10.092][2063][datanode2][gs_ctl]: done +[2021-04-01 10:17:10.092][2063][datanode2][gs_ctl]: server started (/opt/software/openGauss/data/slave) +[2021-04-01 10:17:10.092][2063][datanode2][gs_ctl]: fopen build pid file "/opt/software/openGauss/data/slave/gs_build.pid" success +[2021-04-01 10:17:10.092][2063][datanode2][gs_ctl]: fprintf build pid file "/opt/software/openGauss/data/slave/gs_build.pid" success +[2021-04-01 10:17:10.095][2063][datanode2][gs_ctl]: fsync build pid file "/opt/software/openGauss/data/slave/gs_build.pid" success +import sql file +Would you like to create a demo database (yes/no)? yes +Load demoDB [school,finance] success. +[complete successfully]: You can start or stop the database server using: + primary: gs_ctl start|stop|restart -D $GAUSSHOME/data/master -M primary + standby: gs_ctl start|stop|restart -D $GAUSSHOME/data/slave -M standby +``` + +**-\> 数据库检查** + +``` +[omm@db1 ~]$ echo "PATH=/opt/software/openGauss/bin:\$PATH" >> /home/omm/.bash_profile ## 配置PATH +[omm@db1 ~]$ source ~/.bash_profile +[omm@db1 master]$ gsql -d postgres -p 26000 -r +postgres=# \l + List of databases + Name | Owner | Encoding | Collate | Ctype | Access privileges +-----------+-------+----------+-------------+-------------+------------------- + finance | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | + postgres | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | + school | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | + template0 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/omm + + | | | | | omm=CTc/omm + template1 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/omm + + | | | | | omm=CTc/omm +``` + +**-\> 主备状态检查** + +``` +[omm@db1 master]$ gs_ctl query -D /opt/software/openGauss/data/master +[2021-04-01 10:32:43.785][2239][][gs_ctl]: gs_ctl query ,datadir is /opt/software/openGauss/data/master + HA state: + local_role : Primary + static_connections : 1 + db_state : Normal + detail_information : Normal + + Senders info: + sender_pid : 2151 + local_role : Primary + peer_role : Standby + peer_state : Normal + state : Streaming + sender_sent_location : 0/403B850 + sender_write_location : 0/403B850 + sender_flush_location : 0/403B850 + sender_replay_location : 0/403B850 + receiver_received_location : 0/403B850 + receiver_write_location : 0/403B850 + receiver_flush_location : 0/403B850 + receiver_replay_location : 0/403B850 + sync_percent : 100% + sync_state : Sync + sync_priority : 1 + sync_most_available : Off + channel : 192.168.0.100:26001-->192.168.0.100:37014 + + Receiver info: +No information +``` + diff --git "a/content/zh/post/jiajunfeng/openGauss\346\225\260\346\215\256\345\212\250\346\200\201\350\204\261\346\225\217.md" "b/content/zh/post/jiajunfeng/openGauss\346\225\260\346\215\256\345\212\250\346\200\201\350\204\261\346\225\217.md" new file mode 100644 index 0000000000000000000000000000000000000000..706f33f85086464f9a882349e7f16612a12bf878 --- /dev/null +++ "b/content/zh/post/jiajunfeng/openGauss\346\225\260\346\215\256\345\212\250\346\200\201\350\204\261\346\225\217.md" @@ -0,0 +1,220 @@ ++++ + +title = "openGauss数据动态脱敏" + +date = "2021-04-19" + +tags = ["openGauss核心技术"] + +archives = "2021-04" + +author = "贾军锋" + +summary = "openGauss数据动态脱敏" + +img = "/zh/post/jiajunfeng/title/img20.png" + +times = "15:30" + ++++ + +# openGauss数据动态脱敏 + +## 常见脱敏路线 + +结果集解析: 不改写发给数据库的语句,需要提前获悉数据表结构,待数据库返回结果后再根据表结构判断集合内哪些数据需要脱敏,并逐条改写结果数据。 + +语句改写: 将包含敏感字段查询的语句改写,对于查询中涉及的敏感字段(表列)通过外层嵌套函数的方式改写,使得数据库运行查询语句时返回不包含敏感数据的结果集。 + +openGauss采用了语句改写的方式实现数据脱敏,无论对于性能还是准确性来说都是较为优秀的脱敏方案。基于语句改写的思想,在查询解析获取查询树后,根据用户定义的脱敏策略识别查询树目标结点\(Node\),并对待脱敏结点进行改写构造“脱敏查询树”,再交由数据库内核执行最终返回脱敏后数据,10万条敏感数据脱敏的性能损耗低于5%。openGauss从1.1.0版本定义了一套完整的内置安全策略模型,基于该模型用户可以定义资源标签来标识敏感数据,针对不同的资源标签类别和内容可定义相关的安全策略机制。 + +## 内置安全策略 + +- openGauss中的动态数据脱敏是以内置安全插件(security plugin)的方式与数据库部署在一起的,业务方面无需额外适配就可使用内置安全策略。 +- SQL的解析与脱敏策略匹配交由openGauss安全策略模块负责,业务在配置脱敏策略后即可生效。 +- 安全策略(Security Policy)模型,是指通过配置一系列安全策略来对用户行为进行识别和保护,提供了包括保护用户敏感数据的能力。 +- 资源标签(Resource Label)是Security Policy的基础,它的本质是一系列数据库资源集合。通过归类数据库资源,将这些资源统一地投入到各种安全策略中去管理。 +- 动态数据脱敏特性便是利用资源标签去识别敏感数据,然后匹配脱敏策略,实现对敏感数据的屏蔽。 + +**脱敏策略内容:** + +脱敏方式\(Masking Function\),是指该脱敏策略使用何种方式对目标字段进行脱敏,目前openGauss预置了7种脱敏方式:creditcardmasking、 basicemailmasking、fullemailmasking、alldigitsmasking、shufflemasking、randommasking、maskall,分别适用于不同的脱敏场景。 + +脱敏对象\(Resource Label\),是指脱敏策略生效时作用的对象集合(LABEL),若查询目标字段存在于LABEL中,则该字段将会根据脱敏策略进行敏感数据脱敏,openGauss动态数据脱敏特性支持对仅包含数据列的LABEL进行脱敏。 + +过滤器,指出脱敏策略在何种用户场景下生效,主要涉及USER(用户名)、APP(用户登录客户端名称)、IP(用户所处的ip)。当查询用户同时满足Masking Filter所指定的阈值时,数据脱敏策略才会生效。 + +## 触发脱敏策略 + +当系统接收到查询命令时,security\_plugin将在解析器中拦截语义分析生成的查询树(Query),首先根据用户登录信息(用户名、客户端、IP)筛选出满足用户场景的脱敏策略。由于脱敏策略是基于(仅包含表列的)资源标签配置的,因此需要判断查询树的目标节点是否属于某个资源标签,然后将识别到的资源标签与脱敏策略相匹配,根据策略内容将查询树目标节点改写,最终将查询树返还给解析器。 + +security\_plugin模块由于内置查询树脱敏方式,数据访问者不会感知内置安全策略重写查询树的过程,如同执行普通查询一样去访问数据,同时保护数据隐私。 + +## 实操示例 + +**1. 打开内置安全策略\[ 默认off \]** + +``` +[omm@lab01 ~]$ gs_guc reload -N all -I all -c "enable_security_policy=on" +[omm@lab01 ~]$ gsql -d postgres -p 26000 -c "show enable_security_policy ;" + enable_security_policy +------------------------ + on +``` + +**2. 创建测试表及数据** + +创建测试表person + +``` +create table person(id int primary key,name varchar(20),creditcard varchar(20),address varchar(50)); +insert into person values(1,'张三','1234-4567-7890-0123','huoyue Mansion, No. 98, 1st Fuhua Street'); +insert into person values(2,'李四','1111-2222-3333-4444','Futian District, Shenzhen City'); +select * from person; ++----+------+---------------------+------------------------------------------+ +| id | name | creditcard | address | ++----+------+---------------------+------------------------------------------+ +| 1 | 张三 | 1234-4567-7890-0123 | huoyue Mansion, No. 98, 1st Fuhua Street | +| 2 | 李四 | 1111-2222-3333-4444 | Futian District, Shenzhen City | ++----+------+---------------------+------------------------------------------+ +``` + +创建测试表orders + +``` +create table orders(id int primary key,pid int,customername varchar(20),order_no int,email varchar(50)); +insert into orders values(1,1,'李雷',13002345,'654321@qq.com'); +insert into orders values(2,1,'韩梅',13001234,'testdb@huawei.com'); +insert into orders values(3,2,'Jerry',13009876,'test123@google.com'); +select * from orders; ++----+-----+--------------+----------+ +| id | pid | customername | order_no | ++----+-----+--------------+----------+ +| 1 | 1 | 李雷 | 13002345 | +| 2 | 1 | 韩梅 | 13001234 | +| 3 | 2 | Jerry | 13009876 | ++----+-----+--------------+----------+ +``` + +**3. 策略配置** + +创建资源标签【对表的敏感字段添加资源标签\(需要拥有poladmin权限\)】 + +``` +create resource label creditcard_label add column(person.creditcard); +create resource label customer_label add column(orders.customername); +create resource label email_label add column(orders.email); +create resource label id_label add column(orders.id); +create resource label order_no_label add column(orders.order_no); +create resource label pid_label add column(orders.pid); +``` + +创建脱敏策略 + +``` +-- 语法: +CREATE MASKING POLICY policy_name masking_clause [, ... ] [ policy_filter_clause ] [ ENABLE | DISABLE ]; +where masking_clause can be: +masking_function ON LABEL(label_name [, ... ],*) +where masking_function can be: +{ maskall | randommasking | creditcardmasking | basicemailmasking | fullemailmasking | shufflemasking | alldigitsmasking } +where policy_filter_clause can be: +FILTER ON { ( FILTER_TYPE ( filter_value [, ... ],* ) ) [, ... ],* } +where FILTER_TYPE can be: +{ APP | ROLES | IP } + +-- 创建策略一【脱敏方式:maskall】 +策略名:mask_card_pol +针对用户:user1 +针对IP:192.168.0.99 +针对应用:gsql +脱敏方式:creditcardmasking +create masking policy mask_card_pol + creditcardmasking on label (creditcard_label) + filter on roles('user1') ,IP('192.168.0.99'),APP('gsql'); +-- 小缺陷:测试发现应用程序无法识别"Data Studio",这个APP列表待完善,或者使用方法待说明 + +-- 创建策略二:【脱敏方式:maskall】 +create masking policy mask_name_pol maskall on label(customer_label); + +-- 创建策略三:【脱敏方式:randommasking】 +create masking policy mask_id_pol randommasking on label(id_label); + +-- 创建策略四:【脱敏方式:basicemailmasking】 +create masking policy mask_email_pol basicemailmasking on label(email_label); + +-- 创建策略五:【脱敏方式:alldigitsmasking 】 +create masking policy mask_order_no_pol alldigitsmasking on label(order_no_label); + +-- 创建策略六:【脱敏方式:shufflemasking 】 +create masking policy mask_pid_pol shufflemasking on label(pid_label); +``` + +脱敏效果测试 + +``` +[omm@lab01 ~]$ gsql -d mydb -p 26000 -h 192.168.0.99 -U user1 -r +mydb=> select * from person; + id | name | creditcard | address +----+------+---------------------+------------------------------------------ + 1 | 张三 | xxxx-xxxx-xxxx-0123 | huoyue Mansion, No. 98, 1st Fuhua Street + 2 | 李四 | xxxx-xxxx-xxxx-4444 | Futian District, Shenzhen City + +mydb=# select * from orders; + id | pid | customername | order_no | email +----+-----+--------------+----------+-------------------- + 0 | 0 | xx | 0 | xxxxxx@qq.com + 0 | 0 | xx | 0 | xxxxxx@huawei.com + 0 | 0 | xxxxx | 0 | xxxxxxx@google.com +``` + +相关数据字典 + +``` +-- 查询脱敏策略 +mydb=# select * from gs_masking_policy; + polname | polcomments | modifydate | polenabled +-------------------+-------------+----------------------------+------------ + mask_card_pol | | 2021-04-06 11:38:24.746857 | t + mask_name_pol | | 2021-04-06 14:53:48.3176 | t + mask_id_pol | | 2021-04-06 14:53:52.079475 | t + mask_order_no_pol | | 2021-04-06 14:55:50.421073 | t + mask_pid_pol | | 2021-04-06 15:00:09.927095 | t + mask_email_pol | | 2021-04-06 15:02:26.486597 | t + +-- 查询策略label +mydb=# select * from gs_policy_label; + labelname | labeltype | fqdnnamespace | fqdnid | relcolumn | fqdntype +------------------+-----------+---------------+--------+--------------+---------- + creditcard_label | resource | 2200 | 16404 | creditcard | column + customer_label | resource | 2200 | 16438 | customername | column + email_label | resource | 2200 | 16438 | email | column + id_label | resource | 2200 | 16438 | id | column + order_no_label | resource | 2200 | 16438 | order_no | column + pid_label | resource | 2200 | 16438 | pid | column + +-- 查询策略Filter +mydb=# select * from gs_masking_policy_filters; + filtertype | filterlabelname | policyoid | modifydate | logicaloperator +--------------+-----------------+-----------+---------------------------+----------------------------------------- + logical_expr | logical_expr | 16420 | 2021-04-06 11:38:24.74733 | **roles[16399]ip[192.168.0.99]app[gsql] +``` + +## 总结 + +关于脱敏方式的实测效果如下: + +maskall --\> 实测效果:将所有数据内容设置为x; + +creditcardmasking --\> 实测效果:保留连接符号\(-\)和末尾4位数字,其余全部设为x; + +basicemailmasking --\> 实测效果:将@符号之前的所有数据内容设为x; + +fullemailmasking --\> 实测效果:仅保留@符号和邮箱dot结尾,其余全部设为x; + +randommasking || shufflemasking || alldigitsmasking --\> 实测效果:将所有数据内容设置为0,这里仅测试了数值类型,没有测试其他数据类型的脱敏效果。 + +openGauss 的动态脱敏功能完全可以满足绝大多数的脱敏场景应用需求,但是从结果上面看,对数字类型的脱敏显得有些简单粗暴 \[ 将测试的主键列ID全部置0 \] 。 + +如果个人测试方法没有问题,那么这就存在应用的使用问题,在实际的测试环境中,如果动态脱敏后的数据丧失了其主键唯一性,这样的数据还需要做二次处理,无法直接使用。如果脱敏算法能够使用具有唯一特性的随机数对指定的数据进行动态脱敏,那就更好了,这一点期待进一步完善。 + diff --git "a/content/zh/post/jiajunfeng/openGauss\347\232\204WDR\346\212\245\345\221\212\350\257\246\347\273\206\350\247\243\350\257\273.md" "b/content/zh/post/jiajunfeng/openGauss\347\232\204WDR\346\212\245\345\221\212\350\257\246\347\273\206\350\247\243\350\257\273.md" new file mode 100644 index 0000000000000000000000000000000000000000..9f255e641d2bf643ab3b50e504fd7acb7f4d83dd --- /dev/null +++ "b/content/zh/post/jiajunfeng/openGauss\347\232\204WDR\346\212\245\345\221\212\350\257\246\347\273\206\350\247\243\350\257\273.md" @@ -0,0 +1,1222 @@ ++++ + +title = "openGauss的WDR报告详细解读" + +date = "2021-05-10" + +tags = ["openGauss核心技术"] + +archives = "2021-05" + +author = "贾军锋" + +summary = "openGauss的WDR报告详细解读" + +img = "/zh/post/jiajunfeng/title/img4.png" + +times = "12:30" + ++++ + +# openGauss的WDR报告详细解读 + +        openGauss数据库自2020年6月30日开源至今已有10个月了,在这短短的10个月内,openGauss社区用户下载量已达13W+、issue合并2000+、发行商业版本6个。仅3月份就有11家企业完成CLA签署,包括虚谷伟业、云和恩墨、优炫软件、海量数据、柏睿数据、快立方,电信天翼云、东方通、宝兰德、新数科技、深信服,正式加入openGauss社区,越来越多的力量参与到社区建设中。4月24日,openGauss社区理事会筹备会议在深圳大学城召开,邀请到国内著名的数据库技术方向上的多个公司、组织和机构,包括华为、招商银行、中国电信、云和恩墨、海量数据、人大金仓、神舟通用、虚谷伟业、快立方、亚信、超图软件、深信服、哈工大等机构参与,为openGauss社区开放治理迈出了新的一步。 +        openGauss数据库在经历1.0.0/1.0.1/1.1.0三个版本的迭代发布后,于2021年3月31日发布openGauss 2.0.0版本,这是openGauss社区发布的第一个Release版本。深度融合华为在数据库领域多年的经验,结合企业级场景需求,持续构建竞争力特性。 +        作为一名从Oracle DBA转openGauss相关工作的“IT攻城狮”,在遇到性能诊断时念念不忘的还是以前经常使用的AWR报告,通过这份报告,DBA可以较为全面的分析出数据库的性能问题所在范围、为下一步的数据库性能优化和故障诊断提供有力支撑。很高兴在openGauss数据库中也看到了类似的功能,那就是openGauss的**WDR报告**。 +        本文针对openGauss 2.0.0的WDR报告进行详细解读,帮助大家梳理WDR报告的数据来源以及相关含义,以便在openGauss数据库的性能诊断工作中游刃有余。关于性能调优操作方法,由于涉及内容较多,这里就不再复述,再写下去就该被老板炒鱿鱼了。openGauss数据库归根结底,它的本质还是数据库软件,主流的数据库调优方法在openGauss数据库中也基本适用,大家仁者见仁、智者见智,根据WDR报告结合自己已有的数据库调优方法,完全可以满足openGauss绝大多数的性能调优工作。 +
+ +## 干货内容如下: +### 1. 启用WDR报告的snapshot收集 +```sql +$ gs_guc reload -N all -I all -c "enable_wdr_snapshot=on" +postgres=# select name,setting from pg_settings where name like '%wdr%'; + name | setting +-----------------------------+--------- + enable_wdr_snapshot | on -- 开启数据库监控快照功能 + wdr_snapshot_interval | 60 -- 后台Snapshot线程执行监控快照的时间间隔 + wdr_snapshot_query_timeout | 100 -- 快照操作相关的sql语句的执行超时时间 + wdr_snapshot_retention_days | 8 -- 系统中数据库监控快照数据的保留天数 +``` +
+ +### 2. WDR信息表 +**1> snapshot.snapshot** 【记录当前系统中存储的WDR快照信息】 +```sql +postgres=# \d snapshot.snapshot + Table "snapshot.snapshot" + Column | Type | Modifiers +-------------+--------------------------+----------- + snapshot_id | bigint | not null -- WDR快照序列号 + start_ts | timestamp with time zone | -- WDR快照的开始时间 + end_ts | timestamp with time zone | -- WDR快照的结束时间 +``` +**2> snapshot.tables_snap_timestamp**【记录所有表的WDR快照信息】 +```sql +postgres=# \d snapshot.tables_snap_timestamp + Table "snapshot.tables_snap_timestamp" + Column | Type | Modifiers +-------------+--------------------------+----------- + snapshot_id | bigint | not null -- WDR快照序列号 + db_name | text | -- WDR snapshot对应的database + tablename | text | -- WDR snasphot对应的table + start_ts | timestamp with time zone | -- WDR快照的开始时间 + end_ts | timestamp with time zone | -- WDR快照的结束时间 +``` +
+ +### 3. WDR数据表 +说明:WDR的数据表保存在snapshot这个schema下以snap_开头的表,其数据来源于dbe_perf这个schema内的视图 +```sql +postgres=# select relname from pg_class where relname like '%snap_%'; +---------------------------------------------------------------------------------------------------------------- +snapshot.tables_snap_timestamp -- 记录所有存储的WDR快照中数据库、表对象、数据采集的开始、结束时间 +snapshot.snapshot -- 记录当前系统中存储的WDR快照数据的索引信息、开始、结束时间 +snapshot.snapshot_pkey -- snapshot.snapshot表的primary key +snapshot.snap_seq -- 序列 +snapshot.snap_global_os_runtime -- 操作系统运行状态信息 +snapshot.snap_global_os_threads -- 线程状态信息 +snapshot.snap_global_instance_time -- 各种时间消耗信息(时间类型见instance_time视图) +snapshot.snap_summary_workload_sql_count -- 各数据库主节点的workload上的SQL数量分布 +snapshot.snap_summary_workload_sql_elapse_time -- 数据库主节点上workload(业务)负载的SQL耗时信息 +snapshot.snap_global_workload_transaction -- 各节点上的workload的负载信息 +snapshot.snap_summary_workload_transaction -- 汇总的负载事务信息 +snapshot.snap_global_thread_wait_status -- 工作线程以及辅助线程的阻塞等待情况 +snapshot.snap_global_memory_node_detail -- 节点的内存使用情况 +snapshot.snap_global_shared_memory_detail -- 共享内存上下文的使用情况 +snapshot.snap_global_stat_db_cu -- 数据库的CU命中情况,可以通过gs_stat_reset()进行清零 +snapshot.snap_global_stat_database -- 数据库的统计信息 +snapshot.snap_summary_stat_database -- 汇总的数据库统计信息 +snapshot.snap_global_stat_database_conflicts -- 数据库冲突状态的统计信息 +snapshot.snap_summary_stat_database_conflicts -- 汇总的数据库冲突状态的统计信息 +snapshot.snap_global_stat_bad_block -- 表、索引等文件的读取失败信息 +snapshot.snap_summary_stat_bad_block -- 汇总的表、索引等文件的读取失败信息 +snapshot.snap_global_file_redo_iostat -- Redo(WAL)相关统计信息 +snapshot.snap_summary_file_redo_iostat -- 汇总的Redo(WAL)相关统计信息 +snapshot.snap_global_rel_iostat -- 数据对象IO统计信息 +snapshot.snap_summary_rel_iostat -- 汇总的数据对象IO统计信息 +snapshot.snap_global_file_iostat -- 数据文件IO统计信息 +snapshot.snap_summary_file_iostat -- 汇总的数据文件IO统计信息 +snapshot.snap_global_replication_slots -- 复制节点的信息 +snapshot.snap_global_bgwriter_stat -- 后端写进程活动的统计信息 +snapshot.snap_global_replication_stat -- 日志同步状态信息 +snapshot.snap_global_transactions_running_xacts -- 各节点运行事务的信息 +snapshot.snap_summary_transactions_running_xacts -- 汇总各节点运行事务的信息 +snapshot.snap_global_transactions_prepared_xacts -- 当前准备好进行两阶段提交的事务的信息 +snapshot.snap_summary_transactions_prepared_xacts -- 汇总的当前准备好进行两阶段提交的事务的信息 +snapshot.snap_summary_statement -- SQL语句的全量信息 +snapshot.snap_global_statement_count -- 当前时刻执行的DML/DDL/DQL/DCL语句统计信息 +snapshot.snap_summary_statement_count -- 汇总的当前时刻执行的DML/DDL/DQL/DCL语句统计信息 +snapshot.snap_global_config_settings -- 数据库运行时参数信息 +snapshot.snap_global_wait_events -- event等待相关统计信息 +snapshot.snap_summary_user_login -- 用户登录和退出次数的相关信息 +snapshot.snap_global_ckpt_status -- 实例的检查点信息和各类日志刷页情况 +snapshot.snap_global_double_write_status -- 实例的双写文件的情况 +snapshot.snap_global_pagewriter_status -- 实例的刷页信息和检查点信息 +snapshot.snap_global_redo_status -- 实例的日志回放情况 +snapshot.snap_global_rto_status -- 极致RTO状态信息 +snapshot.snap_global_recovery_status -- 主机和备机的日志流控信息 +snapshot.snap_global_threadpool_status -- 节点上的线程池中工作线程及会话的状态信息 +snapshot.snap_statement_responsetime_percentile -- SQL响应时间P80、P95分布信息 +snapshot.snap_global_statio_all_indexes -- 数据库中的每个索引行、显示特定索引的I/O的统计 +snapshot.snap_summary_statio_all_indexes -- 汇总的数据库中的每个索引行、显示特定索引的I/O的统计 +snapshot.snap_global_statio_all_sequences -- 数据库中每个序列的每一行、显示特定序列关于I/O的统计 +snapshot.snap_summary_statio_all_sequences -- 汇总的数据库中每个序列的每一行、显示特定序列关于I/O的统计 +snapshot.snap_global_statio_all_tables -- 数据库中每个表(包括TOAST表)的I/O的统计 +snapshot.snap_summary_statio_all_tables -- 汇总的数据库中每个表(包括TOAST表)的I/O的统计 +snapshot.snap_global_stat_all_indexes -- 数据库中的每个索引行,显示访问特定索引的统计 +snapshot.snap_summary_stat_all_indexes -- 汇总的数据库中的每个索引行,显示访问特定索引的统计 +snapshot.snap_summary_stat_user_functions -- 汇总的数据库节点用户自定义函数的相关统计信息 +snapshot.snap_global_stat_user_functions -- 用户所创建的函数的状态的统计信息 +snapshot.snap_global_stat_all_tables -- 每个表的一行(包括TOAST表)的统计信息 +snapshot.snap_summary_stat_all_tables -- 汇总的每个表的一行(包括TOAST表)的统计信息 +snapshot.snap_class_vital_info -- 校验相同的表或者索引的Oid是否一致 +snapshot.snap_global_record_reset_time -- 重置(重启,主备倒换,数据库删除)openGauss统计信息时间 +snapshot.snap_summary_statio_indexes_name -- 表snap_summary_statio_all_indexes的索引 +snapshot.snap_summary_statio_tables_name -- 表snap_summary_statio_all_tables的索引 +snapshot.snap_summary_stat_indexes_name -- 表snap_summary_stat_all_indexes的索引 +snapshot.snap_class_info_name -- 表snap_class_vital_info的索引 +(66 rows) +---------------------------------------------------------------------------------------------------------------- +``` +
+ +### 4. WDR报告创建 +#### 4.1 创建snapshot +```sql +-- 当开启enable_wdr_snapshot参数时,数据库默认每小时自动执行一次snapshot操作。 +-- 当然特定情况下,也可以手动使用函数创建snapshot,如:select create_wdr_snapshot(); +postgres=# select * from snapshot.snapshot offset 20; + snapshot_id | start_ts | end_ts +-------------+-------------------------------+------------------------------- + 21 | 2021-04-21 05:59:09.337877+08 | 2021-04-21 05:59:10.249162+08 + 22 | 2021-04-21 06:59:10.3209+08 | 2021-04-21 06:59:11.229808+08 + 23 | 2021-04-21 07:59:10.426882+08 | 2021-04-21 07:59:11.340277+08 + 24 | 2021-04-21 08:59:10.534251+08 | 2021-04-21 08:59:11.447762+08 + 25 | 2021-04-21 09:59:11.448225+08 | 2021-04-21 09:59:26.121124+08 +``` +#### 4.2 查询数据库节点信息 +```sql +postgres=# select * from pg_node_env; + node_name | host | process | port | installpath | datapath | log_directory +-----------+--------------+---------+-------+--------------+-------------------+--------------------------------- + dn_6001 | 192.168.0.99 | 9442 | 26000 | /gaussdb/app | /gaussdb/data/db1 | /gaussdb/log/omm/pg_log/dn_6001 +``` +#### 4.3 创建WDR Report[使用gsql客户端生成] +```sql +postgres=# \a \t \o WDR_20210421.html -- 打开格式化输出,输出WDR报告:WDR_20210421.html +postgres=# select generate_wdr_report(24,25,'all','node','dn_6001'); -- 生成WDR报告 +postgres=# \o \a \t -- 关闭格式化输出 +``` +**函数说明:generate_wdr_report()** +```sql +-- 语法 +select generate_wdr_report(begin_snap_id bigint, end_snap_id bigint, report_type cstring, report_scope cstring, node_name cstring); +-- 选项: +begin_snap_id:查询时间段开始的snapshot的id(表snapshot.snaoshot中的snapshot_id) +end_snap_id: 查询时间段结束snapshot的id。默认end_snap_id大于begin_snap_id(表snapshot.snaoshot中的snapshot_id) +report_type: 指定生成report的类型。例如,summary/detail/all,其中:summary[汇总数据]/detail[明细数据]/all[包含summary和detail] +report_scope: 指定生成report的范围,可以为cluster或者node,其中:cluster是数据库级别的信息,node是节点级别的信息。 +node_name: 当report_scope指定为node时,需要把该参数指定为对应节点的名称。当report_scope为cluster时,该值可以省略或者指定为空或NULL。node[节点名称]、cluster[省略/空/NULL] +``` +
+ +### 5. WDR报告解读 +说明:为了使得WDR报告内容不空洞,本次在测试环境使用BenchmarkSQL对openGauss数据库进行压力测试。 本次解读的WDR报告样例来自于此时采集的snapshot数据。 +![](../figures/wdr1.png) +**解读:** +这一部分是WDR报告的概况信息,从这一部分我们能得到如下信息: + +
信息分类信息描述
报告采集类型Summary + Detail,即汇总数据+明细数据
Snapshot信息使用snapshot_id为24和25的快照采集2021-04-21(08:59 ~ 09:59)的运行信息
硬件配置1*1c/4g
节点名dn_6001
openGauss版本openGauss 2.0.0
+ +**相关代码:** +```sql +第一部分,Report Type/Report Scope/Report Node内容来源于执行generate_wdr_report函数时输入的参数,详见源码“GenReport::ShowReportType(report_params* params)” +第二部分查询SQL:(变量ld-->snapshot_id) +select snapshot_id as "Snapshot Id", + to_char(start_ts, 'YYYY-MM-DD HH24:MI:SS') as "Start Time", + to_char(end_ts, 'YYYY-MM-DD HH24:MI:SS') as "End Time" +from snapshot.snapshot +where snapshot_id = %ld or snapshot_id = %ld; + +第三部分查询SQL:(变量ld-->snapshot_id) +select 'CPUS', x.snap_value +from (select * from pg_node_env) t, + (select * from snapshot.snap_global_os_runtime) x + where x.snap_node_name = t.node_name + and x.snapshot_id = %ld + and (x.snap_name = 'NUM_CPUS'); + +select 'CPU Cores', x.snap_value +from (select * from pg_node_env) t, + (select * from snapshot.snap_global_os_runtime) x + where x.snap_node_name = t.node_name + and x.snapshot_id = %ld + and x.snap_name = 'NUM_CPU_CORES'; + +select 'CPU Sockets', x.snap_value +from (select * from pg_node_env) t, + (select * from snapshot.snap_global_os_runtime) x + where x.snap_node_name = t.node_name + and x.snapshot_id = %ld + and x.snap_name = 'NUM_CPU_SOCKETS'; + +select 'Physical Memory', pg_size_pretty(x.snap_value) +from (select * from pg_node_env) t, + (select * from snapshot.snap_global_os_runtime) x +where x.snap_node_name = t.node_name +and x.snapshot_id = %ld +and x.snap_name = 'PHYSICAL_MEMORY_BYTES'; + +select node_name as "Host Node Name" from pg_node_env; +select version() as "openGauss Version"; +``` +![](../figures/wdr2.png) +**解读:** +这一部分是实例的效率百分比,目标值是100%,即越接近100%,数据库运行越健康。 +**Buffer Hit:**             即数据库请求的数据在buffer中命中的比例,该指标越高代表openGauss在buffer中查询到目标数据的概率越高,数据读取性能越好。 +**Effective CPU:**       即有效的CPU使用比例,该指标偏小则说明CPU的有效使用偏低,处于等待状态的比例可能较高。 +**WalWrite NoWait:** 即WAL日志写入时不等待的比例,该指标接近100%,说明buffer容量充足,可以满足WAL写操作的需求,若指标值偏小则可能需要调大buffer容量。 +**Soft Parse:**             即SQL软解析的比例,该指标接近100%,说明当前执行的SQL基本都可以在Buffer中找到,若指标值偏小则说明存在大量硬解析,需要分析原因,对DML语句进行适度优化。 +**Non-Parse CPU:**    即非解析占用的CPU比例,该指标接近100%,说明SQL解析并没有占用较多的CPU时间。 +**相关代码:** + +```sql +-- 变量:ld指的是snapshot_id,手动执行以下SQL语句时请自行替换对应的snapshot_id +select +unnest(array['Buffer Hit %%', 'Effective CPU %%', 'WalWrite NoWait %%', 'Soft Parse %%', 'Non-Parse CPU %%']) as "Metric Name", +unnest(array[case when s1.all_reads = 0 then 1 else round(s1.blks_hit * 100 / s1.all_reads) end, s2.cpu_to_elapsd, s3.walwrite_nowait, s4.soft_parse, s5.non_parse]) as "Metric Value" +from +(select (snap_2.all_reads - coalesce(snap_1.all_reads, 0)) as all_reads, + (snap_2.blks_hit - coalesce(snap_1.blks_hit, 0)) as blks_hit + from + (select sum(coalesce(snap_blks_read, 0) + coalesce(snap_blks_hit, 0)) as all_reads, + coalesce(sum(snap_blks_hit), 0) as blks_hit + from snapshot.snap_summary_stat_database + where snapshot_id = %ld) snap_1, + (select sum(coalesce(snap_blks_read, 0) + coalesce(snap_blks_hit, 0)) as all_reads, + coalesce(sum(snap_blks_hit), 0) as blks_hit + from snapshot.snap_summary_stat_database + where snapshot_id = %ld) snap_2 + ) s1, + (select round(cpu_time.snap_value * 100 / greatest(db_time.snap_value, 1)) as cpu_to_elapsd + from + (select coalesce(snap_2.snap_value, 0) - coalesce(snap_1.snap_value, 0) as snap_value + from + (select snap_stat_name, snap_value from snapshot.snap_global_instance_time + where snapshot_id = %ld and snap_stat_name = 'CPU_TIME') snap_1, + (select snap_stat_name, snap_value from snapshot.snap_global_instance_time + where snapshot_id = %ld and snap_stat_name = 'CPU_TIME') snap_2) cpu_time, + (select coalesce(snap_2.snap_value, 0) - coalesce(snap_1.snap_value, 0) as snap_value + from + (select snap_stat_name, snap_value from snapshot.snap_global_instance_time + where snapshot_id = %ld and snap_stat_name = 'DB_TIME') snap_1, + (select snap_stat_name, snap_value from snapshot.snap_global_instance_time + where snapshot_id = %ld and snap_stat_name = 'DB_TIME') snap_2) db_time + ) s2, + (select (bufferAccess.snap_wait - bufferFull.snap_wait) * 100 / greatest(bufferAccess.snap_wait, 1) as walwrite_nowait + from + (select coalesce(snap_2.snap_wait) - coalesce(snap_1.snap_wait, 0) as snap_wait + from + (select snap_wait from snapshot.snap_global_wait_events + where snapshot_id = %ld and snap_event = 'WALBufferFull') snap_1, + (select snap_wait from snapshot.snap_global_wait_events + where snapshot_id = %ld and snap_event = 'WALBufferFull') snap_2) bufferFull, + (select coalesce(snap_2.snap_wait) - coalesce(snap_1.snap_wait, 0) as snap_wait + from + (select snap_wait from snapshot.snap_global_wait_events + where snapshot_id = %ld and snap_event = 'WALBufferAccess') snap_1, + (select snap_wait from snapshot.snap_global_wait_events + where snapshot_id = %ld and snap_event = 'WALBufferAccess') snap_2) bufferAccess + ) s3, + (select round((snap_2.soft_parse - snap_1.soft_parse) * 100 / greatest((snap_2.hard_parse + snap_2.soft_parse)-(snap_1.hard_parse + snap_1.soft_parse), 1)) as soft_parse + from + (select sum(snap_n_soft_parse) as soft_parse, sum(snap_n_hard_parse) as hard_parse from snapshot.snap_summary_statement + where snapshot_id = %ld ) snap_1, + (select sum(snap_n_soft_parse) as soft_parse, sum(snap_n_hard_parse) as hard_parse from snapshot.snap_summary_statement + where snapshot_id = %ld ) snap_2 + ) s4, + (select round((snap_2.elapse_time - snap_1.elapse_time) * 100 /greatest((snap_2.elapse_time + snap_2.parse_time)-(snap_1.elapse_time + snap_1.parse_time), 1)) as non_parse + from + (select sum(snap_total_elapse_time) as elapse_time, sum(snap_parse_time) as parse_time from snapshot.snap_summary_statement + where snapshot_id = %ld ) snap_1, + (select sum(snap_total_elapse_time) as elapse_time, sum(snap_parse_time) as parse_time from snapshot.snap_summary_statement + where snapshot_id = %ld ) snap_2 + ) s5; +``` +![](../figures/wdr3.png) +**解读:** +这一部分列出了数据库Top 10的等待事件、等待次数、总等待时间、平均等待时间、等待事件类型。 +等待事件主要分为等待状态、等待轻量级锁、等待IO、等待事务锁这4类,详见下表所示: + +- 等待状态列表 +

wait_status值

含义

none

没在等任意事件。

acquire lock

等待加锁,要么加锁成功,要么加锁等待超时。

acquire lwlock

等待获取轻量级锁。

wait io

等待IO完成。

wait cmd

等待完成读取网络通信包。

wait pooler get conn

等待pooler完成获取连接。

wait pooler abort conn

等待pooler完成终止连接。

wait pooler clean conn

等待pooler完成清理连接。

pooler create conn: [\nodename], total N

等待pooler建立连接,当前正在与nodename指定节点建立连接,且仍有N个连接等待建立。

get conn

获取到其他节点的连接。

set cmd: [\nodename]

在连接上执行SET/RESET/TRANSACTION BLOCK LEVEL PARA SET/SESSION LEVEL PARA SET,当前正在nodename指定节点上执行。

cancel query

取消某连接上正在执行的SQL语句。

stop query

停止某连接上正在执行的查询。

wait node: [\nodename](plevel), total N, [phase]

等待接收与某节点的连接上的数据,当前正在等待nodename节点plevel线程的数据,且仍有N个连接的数据待返回。如果状态包含phase信息,则可能的阶段状态有:
  • begin:表示处于事务开始阶段。
  • commit:表示处于事务提交阶段。
  • rollback:表示处于事务回滚阶段。

wait transaction sync: xid

等待xid指定事务同步。

wait wal sync

等待特定LSN的wal log完成到备机的同步。

wait data sync

等待完成数据页到备机的同步。

wait data sync queue

等待把行存的数据页或列存的CU放入同步队列。

flush data: [\nodename](plevel), [phase]

等待向网络中nodename指定节点的plevel对应线程发送数据。如果状态包含phase信息,则可能的阶段状态为wait quota,即当前通信流正在等待quota值。

stream get conn: [\nodename], total N

初始化stream flow时,等待与nodename节点的consumer对象建立连接,且当前有N个待建连对象。

wait producer ready: [\nodename](plevel), total N

初始化stream flow时,等待每个producer都准备好,当前正在等待nodename节点plevel对应线程的producer对象准备好,且仍有N个producer对象处于等待状态。

synchronize quit

stream plan结束时,等待stream线程组内的线程统一退出。

wait stream nodegroup destroy

stream plan结束时,等待销毁stream node group。

wait active statement

等待作业执行,正在资源负载管控中。

analyze: [relname], [phase]

当前正在对表relname执行analyze。如果状态包含phase信息,则为autovacuum,表示是数据库自动开启AutoVacuum线程执行的analyze分析操作。

vacuum: [relname], [phase]

当前正在对表relname执行vacuum。如果状态包含phase信息,则为autovacuum,表示是数据库自动开启AutoVacuum线程执行的vacuum清理操作。

vacuum full: [relname]

当前正在对表relname执行vacuum full清理。

create index

当前正在创建索引。

HashJoin - [ build hash | write file ]

当前是HashJoin算子,主要关注耗时的执行阶段。
  • build hash:表示当前HashJoin算子正在建立哈希表。
  • write file:表示当前HashJoin算子正在将数据写入磁盘。

HashAgg - [ build hash | write file ]

当前是HashAgg算子,主要关注耗时的执行阶段。
  • build hash:表示当前HashAgg算子正在建立哈希表。
  • write file:表示当前HashAgg算子正在将数据写入磁盘。

HashSetop - [build hash | write file ]

当前是HashSetop算子,主要关注耗时的执行阶段。
  • build hash:表示当前HashSetop算子正在建立哈希表。
  • write file:表示当前HashSetop算子正在将数据写入磁盘。

Sort | Sort - write file

当前是Sort算子做排序,write file表示Sort算子正在将数据写入磁盘。

Material | Material - write file

当前是Material算子,write file表示Material算子正在将数据写入磁盘。

NestLoop

当前是NestLoop算子。

wait memory

等待内存获取。

wait sync consumer next step

Stream算子等待消费者执行。

wait sync producer next step

Stream算子等待生产者执行。

+ + 当wait_status为acquire lwlock、acquire lock或者wait io时,表示有等待事件。 + 正在等待获取wait_event列对应类型的轻量级锁、事务锁,或者正在进行IO。 + +- 轻量级锁等待事件列表 +当wait_status值为acquire lwlock(轻量级锁)时对应的wait_event等待事件类型即为轻量级锁等待。 +wait_event为extension时,表示此时的轻量级锁是动态分配的锁,未被监控。 +

wait_event类型

类型描述

ShmemIndexLock

用于保护共享内存中的主索引哈希表。

OidGenLock

用于避免不同线程产生相同的OID。

XidGenLock

用于避免两个事务获得相同的xid。

ProcArrayLock

用于避免并发访问或修改ProcArray共享数组。

SInvalReadLock

用于避免与清理失效消息并发执行。

SInvalWriteLock

用于避免与其它写失效消息、清理失效消息并发执行。

WALInsertLock

用于避免与其它WAL插入操作并发执行。

WALWriteLock

用于避免并发WAL写盘。

ControlFileLock

用于避免pg_control文件的读写并发、写写并发。

CheckpointLock

用于避免多个checkpoint并发执行。

CLogControlLock

用于避免并发访问或者修改Clog控制数据结构。

SubtransControlLock

用于避免并发访问或者修改子事务控制数据结构。

MultiXactGenLock

用于串行分配唯一MultiXact id。

MultiXactOffsetControlLock

用于避免对pg_multixact/offset的写写并发和读写并发。

MultiXactMemberControlLock

用于避免对pg_multixact/members的写写并发和读写并发。

RelCacheInitLock

用于失效消息场景对init文件进行操作时加锁。

CheckpointerCommLock

用于向checkpointer发起文件刷盘请求场景,需要串行的向请求队列插入请求结构。

TwoPhaseStateLock

用于避免并发访问或者修改两阶段信息共享数组。

TablespaceCreateLock

用于确定tablespace是否已经存在。

BtreeVacuumLock

用于防止vacuum清理B-tree中还在使用的页面。

AutovacuumLock

用于串行化访问autovacuum worker数组。

AutovacuumScheduleLock

用于串行化分配需要vacuum的table。

AutoanalyzeLock

用于获取和释放允许执行Autoanalyze的任务资源。

SyncScanLock

用于确定heap扫描时某个relfilenode的起始位置。

NodeTableLock

用于保护存放数据库节点信息的共享结构。

PoolerLock

用于保证两个线程不会同时从连接池里取到相同的连接。

RelationMappingLock

用于等待更新系统表到存储位置之间映射的文件。

AsyncCtlLock

用于避免并发访问或者修改共享通知状态。

AsyncQueueLock

用于避免并发访问或者修改共享通知信息队列。

SerializableXactHashLock

用于避免对于可串行事务共享结构的写写并发和读写并发。

SerializableFinishedListLock

用于避免对于已完成可串行事务共享链表的写写并发和读写并发。

SerializablePredicateLockListLock

用于保护对于可串行事务持有的锁链表。

OldSerXidLock

用于保护记录冲突可串行事务的结构。

FileStatLock

用于保护存储统计文件信息的数据结构。

SyncRepLock

用于在主备复制时保护xlog同步信息。

DataSyncRepLock

用于在主备复制时保护数据页同步信息。

CStoreColspaceCacheLock

用于保护列存表的CU空间分配。

CStoreCUCacheSweepLock

用于列存CU Cache循环淘汰。

MetaCacheSweepLock

用于元数据循环淘汰。

ExtensionConnectorLibLock

用于初始化ODBC连接场景,在加载与卸载特定动态库时进行加锁。

SearchServerLibLock

用于GPU加速场景初始化加载特定动态库时,对读文件操作进行加锁。

LsnXlogChkFileLock

用于串行更新特定结构中记录的主备机的xlog flush位置点。

ReplicationSlotAllocationLock

用于主备复制时保护主机端的流复制槽的分配。

ReplicationSlotControlLock

用于主备复制时避免并发更新流复制槽状态。

ResourcePoolHashLock

用于避免并发访问或者修改资源池哈希表。

WorkloadStatHashLock

用于避免并发访问或者修改包含数据库主节点的SQL请求构成的哈希表。

WorkloadIoStatHashLock

用于避免并发访问或者修改用于统计当前数据库节点的IO信息的哈希表。

WorkloadCGroupHashLock

用于避免并发访问或者修改Cgroup信息构成的哈希表。

OBSGetPathLock

用于避免对obs路径的写写并发和读写并发。

WorkloadUserInfoLock

用于避免并发访问或修改负载管理的用户信息哈希表。

WorkloadRecordLock

用于避免并发访问或修改在内存自适应管理时对数据库主节点收到请求构成的哈希表。

WorkloadIOUtilLock

用于保护记录iostat,CPU等负载信息的结构。

WorkloadNodeGroupLock

用于避免并发访问或者修改内存中的nodegroup信息构成的哈希表。

JobShmemLock

用于定时任务功能中保护定时读取的全局变量。

OBSRuntimeLock

用于获取环境变量,如GASSHOME。

LLVMDumpIRLock

用于导出动态生成函数所对应的汇编语言。

LLVMParseIRLock

用于在查询开始处从IR文件中编译并解析已写好的IR函数。

CriticalCacheBuildLock

用于从共享或者本地缓存初始化文件中加载cache的场景。

WaitCountHashLock

用于保护用户语句计数功能场景中的共享结构。

BufMappingLock

用于保护对共享缓冲映射表的操作。

LockMgrLock

用于保护常规锁结构信息。

PredicateLockMgrLock

用于保护可串行事务锁结构信息。

OperatorRealTLock

用于避免并发访问或者修改记录算子级实时数据的全局结构。

OperatorHistLock

用于避免并发访问或者修改记录算子级历史数据的全局结构。

SessionRealTLock

用于避免并发访问或者修改记录query级实时数据的全局结构。

SessionHistLock

用于避免并发访问或者修改记录query级历史数据的全局结构。

CacheSlotMappingLock

用于保护CU Cache全局信息。

BarrierLock

用于保证当前只有一个线程在创建Barrier。

dummyServerInfoCacheLock

用于保护缓存加速openGauss连接信息的全局哈希表。

RPNumberLock

用于加速openGauss的数据库节点对正在执行计划的任务线程的计数。

ClusterRPLock

用于加速openGauss的CCN中维护的openGauss负载数据的并发存取控制。

CBMParseXlogLock

Cbm 解析xlog时的保护锁

RelfilenodeReuseLock

避免错误地取消已重用的列属性文件的链接。

RcvWriteLock

防止并发调用WalDataRcvWrite。

PercentileLock

用于保护全局PercentileBuffer

CSNBufMappingLock

保护csn页面

UniqueSQLMappingLock

用于保护uniquesql hash table

DelayDDLLock

防止并发ddl。

CLOG Ctl

用于避免并发访问或者修改Clog控制数据结构

Async Ctl

保护Async buffer

MultiXactOffset Ctl

保护MultiXact offet的slru buffer

MultiXactMember Ctl

保护MultiXact member的slrubuffer

OldSerXid SLRU Ctl

保护old xids的slru buffer

ReplicationSlotLock

用于保护ReplicationSlot

PGPROCLock

用于保护pgproc

MetaCacheLock

用于保护MetaCache

DataCacheLock

用于保护datacache

InstrUserLock

用于保护InstrUserHTAB。

BadBlockStatHashLock

用于保护global_bad_block_stat hash表。

BufFreelistLock

用于保证共享缓冲区空闲列表操作的原子性。

CUSlotListLock

用于控制列存缓冲区槽位的并发操作。

AddinShmemInitLock

保护共享内存对象的初始化。

AlterPortLock

保护协调节点更改注册端口号的操作。

FdwPartitionCaheLock

HDFS分区表缓冲区的管理锁。

DfsConnectorCacheLock

DFSConnector缓冲区的管理锁。

DfsSpaceCacheLock

HDFS表空间管理缓冲区的管理锁。

FullBuildXlogCopyStartPtrLock

用于保护全量Build中Xlog拷贝的操作。

DfsUserLoginLock

用于HDFS用户登录以及认证。

LogicalReplicationSlotPersistentDataLock

用于保护逻辑复制过程中复制槽位的数据。

WorkloadSessionInfoLock

保护负载管理session info内存hash表访问。

InstrWorkloadLock

保护负载管理统计信息的内存hash表访问。

PgfdwLock

用于管理实例向Foreign server建立连接。

InstanceTimeLock

用于获取实例中会话的时间信息。

XlogRemoveSegLock

保护Xlog段文件的回收操作。

DnUsedSpaceHashLock

用于更新会话对应的空间使用信息。

CsnMinLock

用于计算CSNmin。

GPCCommitLock

用于保护全局Plan Cache hash表的添加操作。

GPCClearLock

用于保护全局Plan Cache hash表的清除操作。

GPCTimelineLock

用于保护全局Plan Cache hash表检查Timeline的操作。

TsTagsCacheLock

用于时序tag缓存管理。

InstanceRealTLock

用于保护共享实例统计信息hash表的更新操作。

CLogBufMappingLock

用于提交日志缓存管理。

GPCMappingLock

用于全局Plan Cache缓存管理。

GPCPrepareMappingLock

用于全局Plan Cache缓存管理。

BufferIOLock

保护共享缓冲区页面的IO操作。

BufferContentLock

保护共享缓冲区页面内容的读取、修改。

CSNLOG Ctl

用于CSN日志管理。

DoubleWriteLock

用于双写的管理操作。

RowPageReplicationLock

用于管理行存储的数据页复制。

extension

其他轻量锁。

+ +- IO等待事件列表 +当wait_status值为wait io时对应的wait_event等待事件类型即为IO等待事件。 +

wait_event类型

类型描述

BufFileRead

从临时文件中读取数据到指定buffer。

BufFileWrite

向临时文件中写入指定buffer中的内容。

ControlFileRead

读取pg_control文件。主要在数据库启动、执行checkpoint和主备校验过程中发生。

ControlFileSync

将pg_control文件持久化到磁盘。数据库初始化时发生。

ControlFileSyncUpdate

将pg_control文件持久化到磁盘。主要在数据库启动、执行checkpoint和主备校验过程中发生。

ControlFileWrite

写入pg_control文件。数据库初始化时发生。

ControlFileWriteUpdate

更新pg_control文件。主要在数据库启动、执行checkpoint和主备校验过程中发生。

CopyFileRead

copy文件时读取文件内容。

CopyFileWrite

copy文件时写入文件内容。

DataFileExtend

扩展文件时向文件写入内容。

DataFileFlush

将表数据文件持久化到磁盘

DataFileImmediateSync

将表数据文件立即持久化到磁盘。

DataFilePrefetch

异步读取表数据文件。

DataFileRead

同步读取表数据文件。

DataFileSync

将表数据文件的修改持久化到磁盘。

DataFileTruncate

表数据文件truncate。

DataFileWrite

向表数据文件写入内容。

LockFileAddToDataDirRead

读取”postmaster.pid”文件。

LockFileAddToDataDirSync

将”postmaster.pid”内容持久化到磁盘。

LockFileAddToDataDirWrite

将pid信息写到”postmaster.pid”文件。

LockFileCreateRead

读取LockFile文件”%s.lock”。

LockFileCreateSync

将LockFile文件”%s.lock”内容持久化到磁盘。

LockFileCreateWRITE

将pid信息写到LockFile文件”%s.lock”。

RelationMapRead

读取系统表到存储位置之间的映射文件

RelationMapSync

将系统表到存储位置之间的映射文件持久化到磁盘。

RelationMapWrite

写入系统表到存储位置之间的映射文件。

ReplicationSlotRead

读取流复制槽文件。重新启动时发生。

ReplicationSlotRestoreSync

将流复制槽文件持久化到磁盘。重新启动时发生。

ReplicationSlotSync

checkpoint时将流复制槽临时文件持久化到磁盘。

ReplicationSlotWrite

checkpoint时写流复制槽临时文件。

SLRUFlushSync

将pg_clog、pg_subtrans和pg_multixact文件持久化到磁盘。主要在执行checkpoint和数据库停机时发生。

SLRURead

读取pg_clog、pg_subtrans和pg_multixact文件。

SLRUSync

将脏页写入文件pg_clog、pg_subtrans和pg_multixact并持久化到磁盘。主要在执行checkpoint和数据库停机时发生。

SLRUWrite

写入pg_clog、pg_subtrans和pg_multixact文件。

TimelineHistoryRead

读取timeline history文件。在数据库启动时发生。

TimelineHistorySync

将timeline history文件持久化到磁盘。在数据库启动时发生。

TimelineHistoryWrite

写入timeline history文件。在数据库启动时发生。

TwophaseFileRead

读取pg_twophase文件。在两阶段事务提交、两阶段事务恢复时发生。

TwophaseFileSync

将pg_twophase文件持久化到磁盘。在两阶段事务提交、两阶段事务恢复时发生。

TwophaseFileWrite

写入pg_twophase文件。在两阶段事务提交、两阶段事务恢复时发生。

WALBootstrapSync

将初始化的WAL文件持久化到磁盘。在数据库初始化发生。

WALBootstrapWrite

写入初始化的WAL文件。在数据库初始化发生。

WALCopyRead

读取已存在的WAL文件并进行复制时产生的读操作。在执行归档恢复完后发生。

WALCopySync

将复制的WAL文件持久化到磁盘。在执行归档恢复完后发生。

WALCopyWrite

读取已存在WAL文件并进行复制时产生的写操作。在执行归档恢复完后发生。

WALInitSync

将新初始化的WAL文件持久化磁盘。在日志回收或写日志时发生。

WALInitWrite

将新创建的WAL文件初始化为0。在日志回收或写日志时发生。

WALRead

从xlog日志读取数据。两阶段文件redo相关的操作产生。

WALSyncMethodAssign

将当前打开的所有WAL文件持久化到磁盘。

WALWrite

写入WAL文件。

WALBufferAccess

WAL Buffer访问(出于性能考虑,内核代码里只统计访问次数,未统计其访问耗时)。

WALBufferFull

WAL Buffer满时,写wal文件相关的处理。

DoubleWriteFileRead

双写 文件读取。

DoubleWriteFileSync

双写 文件强制刷盘。

DoubleWriteFileWrite

双写 文件写入。

PredoProcessPending

并行日志回放中当前记录回放等待其它记录回放完成。

PredoApply

并行日志回放中等待当前工作线程等待其他线程回放至本线程LSN。

DisableConnectFileRead

HA锁分片逻辑文件读取。

DisableConnectFileSync

HA锁分片逻辑文件强制刷盘。

DisableConnectFileWrite

HA锁分片逻辑文件写入。

+ +- 事务锁等待事件列表 +当wait_status值为acquire lock(事务锁)时对应的wait_event等待事件类型为事务锁等待事件。 + +|wait_event类型 |类型描述| +|-|-| +|relation|对表加锁| +|extend|对表扩展空间时加锁| +|partition|对分区表加锁| +|partition_seq|对分区表的分区加锁| +|page|对表页面加锁| +|tuple|对页面上的tuple加锁| +|transactionid|对事务ID加锁| +|virtualxid|对虚拟事务ID加锁| +|object|加对象锁| +|cstore_freespace|对列存空闲空间加锁| +|userlock|加用户锁| +|advisory|加advisory锁| + +**相关代码:** +```sql +-- 说明:变量ld-->snapshot_id, 变量s-->node_name +select snap_event as "Event", + snap_wait as "Waits", + snap_total_wait_time as "Total Wait Times(us)", + round(snap_total_wait_time/snap_wait) as "Wait Avg(us)", + snap_type as "Wait Class" +from ( + select snap_2.snap_event as snap_event, + snap_2.snap_type as snap_type, + snap_2.snap_wait - snap_1.snap_wait as snap_wait, + snap_2.total_time - snap_1.total_time as snap_total_wait_time + from + (select snap_event, + snap_wait, + snap_total_wait_time as total_time, + snap_type + from snapshot.snap_global_wait_events + where snapshot_id = %ld + and snap_event != 'none' + and snap_event != 'wait cmd' + and snap_event != 'unknown_lwlock_event' + and snap_nodename = '%s') snap_1, + (select snap_event, + snap_wait, + snap_total_wait_time as total_time, + snap_type + from snapshot.snap_global_wait_events + where snapshot_id = %ld + and snap_event != 'none' + and snap_event != 'wait cmd' + and snap_event != 'unknown_lwlock_event' + and snap_nodename = '%s') snap_2 + where snap_2.snap_event = snap_1.snap_event + order by snap_total_wait_time desc limit 10) +where snap_wait != 0; +``` +![](../figures/wdr4.png) +**解读:** +这一部分按照等待类型(STATUS、IO_EVENT、LWLOCK_EVENT、LOCK_EVENT),分类统计等待次数、总等待时间、平均等待时间。 +**相关代码:** + +```sql +-- 说明: 变量ld-->snapshot_id, 变量s-->node_name +select + snap_2.type as "Wait Class", + (snap_2.wait - snap_1.wait) as "Waits", + (snap_2.total_wait_time - snap_1.total_wait_time) as "Total Wait Time(us)", + round((snap_2.total_wait_time - snap_1.total_wait_time) / greatest((snap_2.wait - snap_1.wait), 1)) as "Wait Avg(us)" +from + (select + snap_type as type, + sum(snap_total_wait_time) as total_wait_time, + sum(snap_wait) as wait from snapshot.snap_global_wait_events + where snapshot_id = %ld + and snap_nodename = '%s' + and snap_event != 'unknown_lwlock_event' + and snap_event != 'none' + group by snap_type) snap_2 + left join + (select + snap_type as type, + sum(snap_total_wait_time) as total_wait_time, + sum(snap_wait) as wait + from snapshot.snap_global_wait_events + where snapshot_id = %ld + and snap_nodename = '%s' + and snap_event != 'unknown_lwlock_event' and snap_event != 'none' + group by snap_type) snap_1 + on snap_2.type = snap_1.type + order by "Total Wait Time(us)" desc; +``` +![](../figures/wdr5.png) +**解读:** +这一部分主机CPU的负载情况:CPU的平均负载、用户使用占比、系统使用占比、IO等待占比、空闲占比。 +**相关SQL:** + +```sql +select + snap_2.cpus as "Cpus", + snap_2.cores as "Cores", + snap_2.sockets as "Sockets", + snap_1.load as "Load Average Begin", + snap_2.load as "Load Average End", + round(coalesce((snap_2.user_time - snap_1.user_time), 0) / greatest(coalesce((snap_2.total_time - snap_1.total_time), 0), 1) * 100, 2) as "%User", + round(coalesce((snap_2.sys_time - snap_1.sys_time), 0) / greatest(coalesce((snap_2.total_time - snap_1.total_time), 0), 1) * 100, 2) as "%System", + round(coalesce((snap_2.iowait_time - snap_1.iowait_time), 0) / greatest(coalesce((snap_2.total_time - snap_1.total_time), 0), 1) * 100, 2) as "%WIO", + round(coalesce((snap_2.idle_time - snap_1.idle_time), 0) / greatest(coalesce((snap_2.total_time - snap_1.total_time), 0), 1) * 100, 2) as "%Idle" +from + (select H.cpus, H.cores, H.sockets, H.idle_time, H.user_time, H.sys_time, H.iowait_time, + (H.idle_time + H.user_time + H.sys_time + H.iowait_time) AS total_time, H.load from + (select C.cpus, E.cores, T.sockets, I.idle_time, U.user_time, S.sys_time, W.iowait_time, L.load from + (select snap_value as cpus from snapshot.snap_global_os_runtime + where (snapshot_id = %ld and snap_node_name = '%s' and snap_name = 'NUM_CPUS')) AS C, + (select snap_value as cores from snapshot.snap_global_os_runtime + where (snapshot_id = %ld and snap_node_name = '%s' and snap_name = 'NUM_CPU_CORES')) AS E, + (select snap_value as sockets from snapshot.snap_global_os_runtime + where (snapshot_id = %ld and snap_node_name = '%s' and snap_name = 'NUM_CPU_SOCKETS')) AS T, + (select snap_value as idle_time from snapshot.snap_global_os_runtime + where (snapshot_id = %ld and snap_node_name = '%s' and snap_name = 'IDLE_TIME')) AS I, + (select snap_value as user_time from snapshot.snap_global_os_runtime + where (snapshot_id = %ld and snap_node_name = '%s' and snap_name = 'USER_TIME')) AS U, + (select snap_value as sys_time from snapshot.snap_global_os_runtime + where (snapshot_id = %ld and snap_node_name = '%s' and snap_name = 'SYS_TIME')) AS S, + (select snap_value as iowait_time from snapshot.snap_global_os_runtime + where (snapshot_id = %ld and snap_node_name = '%s' and snap_name = 'IOWAIT_TIME')) AS W, + (select snap_value as load from snapshot.snap_global_os_runtime + where (snapshot_id = %ld and snap_node_name = '%s' and snap_name = 'LOAD')) AS L ) as H ) as snap_2, + + (select H.cpus, H.cores, H.sockets, H.idle_time, H.user_time, H.sys_time, H.iowait_time, + (H.idle_time + H.user_time + H.sys_time + H.iowait_time) AS total_time, H.load from + (select C.cpus, E.cores, T.sockets, I.idle_time, U.user_time, S.sys_time, W.iowait_time, L.load from + (select snap_value as cpus from snapshot.snap_global_os_runtime + where (snapshot_id = %ld and snap_node_name = '%s' and snap_name = 'NUM_CPUS')) AS C, + (select snap_value as cores from snapshot.snap_global_os_runtime + where (snapshot_id = %ld and snap_node_name = '%s' and snap_name = 'NUM_CPU_CORES')) AS E, + (select snap_value as sockets from snapshot.snap_global_os_runtime + where (snapshot_id = %ld and snap_node_name = '%s' and snap_name = 'NUM_CPU_SOCKETS')) AS T, + (select snap_value as idle_time from snapshot.snap_global_os_runtime + where (snapshot_id = %ld and snap_node_name = '%s' and snap_name = 'IDLE_TIME')) AS I, + (select snap_value as user_time from snapshot.snap_global_os_runtime + where (snapshot_id = %ld and snap_node_name = '%s' and snap_name = 'USER_TIME')) AS U, + (select snap_value as sys_time from snapshot.snap_global_os_runtime + where (snapshot_id = %ld and snap_node_name = '%s' and snap_name = 'SYS_TIME')) AS S, + (select snap_value as iowait_time from snapshot.snap_global_os_runtime + where (snapshot_id = %ld and snap_node_name = '%s' and snap_name = 'IOWAIT_TIME')) AS W, + (select snap_value as load from snapshot.snap_global_os_runtime + where (snapshot_id = %ld and snap_node_name = '%s' and snap_name = 'LOAD')) AS L ) as H ) as snap_1 ; +``` +![](../figures/wdr6.png) +**解读:** +这一部分描述了openGauss在快照期间的IO负载情况。 +**Database requests:** 即每秒IO请求次数,包括请求次数总和、读请求次数、写请求次数. +**Database(blocks):** 即每秒block请求数量,包含请求的block总和数量、读block的数量和写block的数量. +**Database(MB):** 即将block换算成容量(MB)[如:blocks * 8/1024],增加数据的可读性。 +**Redo requests和Redo(MB)** 分别表示每秒redo的写请求次数和redo写的数据量。 +相关代码: + +```sql +-- 由于源码中相关SQL融合了C++程序语法,像我这种不做开发的DBA读起来有些难以理解【如:(phyblkwrt * %d) >> 20 这个段没有很好理解】。 +-- 但是依旧不影响我们对这些数据采集方法的理解,相关SQL如下: +-- 两个snapshot_id(24和25)期间,数据块的IO统计信息(数值除以3600即换算成以秒为单位的WDR数据) +postgres=# select + (snap_2.phytotal - snap_1.phytotal) as phytotal, + (snap_2.phyblktotal - snap_1.phyblktotal) as phyblktotal, + (snap_2.phyrds - snap_1.phyrds) as phyrds, + (snap_2.phyblkrd - snap_1.phyblkrd) as phyblkrd, + (snap_2.phywrts - snap_1.phywrts) as phywrts, + (snap_2.phyblkwrt - snap_1.phyblkwrt) as phyblkwrt + from + (select (snap_phyrds + snap_phywrts) as phytotal, + (snap_phyblkwrt + snap_phyblkrd) as phyblktotal, + snap_phyrds as phyrds, snap_phyblkrd as phyblkrd, + snap_phywrts as phywrts, snap_phyblkwrt as phyblkwrt + from snapshot.snap_global_rel_iostat + where snapshot_id = 24 and snap_node_name = 'dn_6001') snap_1, + (select (snap_phyrds + snap_phywrts) as phytotal, + (snap_phyblkwrt + snap_phyblkrd) as phyblktotal, + snap_phyrds as phyrds, snap_phyblkrd as phyblkrd, + snap_phywrts as phywrts, snap_phyblkwrt as phyblkwrt + from snapshot.snap_global_rel_iostat + where snapshot_id = 25 and snap_node_name = 'dn_6001') snap_2; + phytotal | phyblktotal | phyrds | phyblkrd | phywrts | phyblkwrt +----------+-------------+---------+----------+---------+----------- + 4626892 | 4626892 | 2955639 | 2955639 | 1671253 | 1671253 + +-- 两个snapshot_id(24和25)期间,redo的统计信息(数值除以3600即换算成以秒为单位的WDR数据) +postgres=# select + (snap_2.phywrts - snap_1.phywrts) as phywrts, + (snap_2.phyblkwrt - snap_1.phyblkwrt) as phyblkwrt + from + (select sum(snap_phywrts) as phywrts, sum(snap_phyblkwrt) as phyblkwrt + from snapshot.snap_global_file_redo_iostat + where snapshot_id = 24 and snap_node_name = 'dn_6001') snap_1, + (select sum(snap_phywrts) as phywrts, sum(snap_phyblkwrt) as phyblkwrt + from snapshot.snap_global_file_redo_iostat + where snapshot_id = 25 and snap_node_name = 'dn_6001') snap_2; + phywrts | phyblkwrt +---------+----------- + 132721 | 509414 +``` +![](../figures/wdr7.png) +**解读:** +    这一部分描述了节点内存的变化信息,通过这些变化信息,我们可以了解到在两次快照期间,数据库的内存变化情况,作为数据库性能分析或异常分析的参考。数据来源于snapshot.snap_global_memory_node_detail。 +**这部分分别描述了:** 内存的类型 以及 对应的起始大小和终止大小。 +**这里没有采集到数据的原因:** 测试环境内存太小,导致启动时将memory protect关闭了,从而导致无法查询dbe_perf.global_memory_node_detail视图。 而WDR的内存统计数据(snapshot.snap_global_memory_node_detail)则来源于该视图。 +另外,请确保disable_memory_protect=off。 +关于这部分Memory Type常见类型如下: + +|Memory 类型|说明| +|-|-| +|max_process_memory |openGauss实例所占用的内存大小| +|process_used_memory|进程所使用的内存大小| +|max_dynamic_memory |最大动态内存| +|dynamic_used_memory|已使用的动态内存| +|dynamic_peak_memory|内存的动态峰值| +|dynamic_used_shrctx|最大动态共享内存上下文| +|dynamic_peak_shrctx|共享内存上下文的动态峰值| +|max_shared_memory |最大共享内存| +|shared_used_memory |已使用的共享内存| +|max_cstore_memory |列存所允许使用的最大内存| +|cstore_used_memory |列存已使用的内存大小| +|max_sctpcomm_memory|sctp通信所允许使用的最大内存| +|sctpcomm_used_memory|sctp通信已使用的内存大小| +|sctpcomm_peak_memory|sctp通信的内存峰值| +|other_used_memory |其他已使用的内存大小| +|gpu_max_dynamic_memory |GPU最大动态内存| +|gpu_dynamic_used_memory|GPU已使用的动态内存| +|gpu_dynamic_peak_memory|GPU内存的动态峰值| +|pooler_conn_memory |链接池申请内存计数| +|pooler_freeconn_memory |链接池空闲连接的内存计数| +|storage_compress_memory|存储模块压缩使用的内存大小| +|udf_reserved_memory |UDF预留的内存大小| +**相关代码:** +```sql +-- 说明:%s代表node_name,%ld代表snapshot_id +select + snap_2.snap_memorytype as "Memory Type", + snap_1.snap_memorymbytes as "Begin(MB)", + snap_2.snap_memorymbytes as "End(MB)" +from + (select snap_memorytype, snap_memorymbytes + from + snapshot.snap_global_memory_node_detail + where (snapshot_id = %ld and snap_nodename = '%s') + and + (snap_memorytype = 'max_process_memory' + or snap_memorytype = 'process_used_memory' + or snap_memorytype = 'max_shared_memory' + or snap_memorytype = 'shared_used_memory')) + as snap_2 +left join + (select snap_memorytype, snap_memorymbytes + from + snapshot.snap_global_memory_node_detail + where (snapshot_id = %ld and snap_nodename = '%s') + and (snap_memorytype = 'max_process_memory' + or snap_memorytype = 'process_used_memory' + or snap_memorytype = 'max_shared_memory' + or snap_memorytype = 'shared_used_memory')) + as snap_1 +on snap_2.snap_memorytype = snap_1.snap_memorytype; +``` +![](../figures/wdr8.png) +**解读:** +这一部分描述了数据库各种状态所消耗的时间,关于Stat Name的解释如下: + +|Stat Name|说明| +|-|-| +|DB_TIME| 作业在多核下的有效时间花销| +|CPU_TIME|CPU的时间花销| +|EXECUTION_TIME|执行器内的时间花销| +|PARSE_TIME| SQL解析的时间花销| +|PLAN_TIME| 生成Plan的时间花销| +|REWRITE_TIME| SQL重写的时间花销| +|PL_EXECUTION_TIME | plpgsql(存储过程)执行的时间花销| +|PL_COMPILATION_TIME|plpgsql(存储过程)编译的时间花销| +|NET_SEND_TIME|网络上的时间花销| +|DATA_IO_TIME| IO上的时间花销| +**相关代码:** +```sql +-- 说明:%s代表node_name,%ld代表snapshot_id +select t2.snap_node_name as "Node Name", + t2.snap_stat_name as "Stat Name", + (t2.snap_value - coalesce(t1.snap_value, 0)) as "Value(us)" +from + (select * from snapshot.snap_global_instance_time + where snapshot_id = %ld + and snap_node_name = '%s') t1 +right join + (select * from snapshot.snap_global_instance_time + where snapshot_id = %ld + and snap_node_name = '%s') t2 +on t1.snap_stat_name = t2.snap_stat_name +order by "Value(us)" +desc limit 200; +``` +![](../figures/wdr9.png) +![](../figures/wdr10.png) +![](../figures/wdr11.png) +**解读:** +这一部分分别从SQL执行时间、SQL消耗CPU的时间、SQL返回的行数、SQL扫描的行数、SQL执行的次数、SQL物理读的次数、SQL逻辑读的次数等多维度对两次快照期间的SQL执行情况进行统计。 +关于表中列的含义,如下所示: + +|列名称| 备注| +|-|-| +|Unique SQL Id| 归一化的SQL ID, 即SQL唯一标识| +|User Name| 执行SQL的用户| +|Logical Read| 逻辑读,即Buffer的块访问次数| +|Calls| SQL的调用次数| +|Min Elapse Time(us)| SQL在内核中的最小运行时间(单位:微秒)| +|Max Elapse Time(us)| SQL在内核中的最大运行时间(单位:微秒)| +|Total Elapse Time(us)| SQL在内核中的总运行时间 (单位:微秒)| +|Avg Elapse Time(us)| SQL在内核中的平均运行时间(单位:微秒)| +|Returned Rows| SELECT返回的结果集行数| +|Tuples Read| SQL扫描的总行数(包括顺序扫描和随机扫描)| +|Tuples Affected| SQL删除的行数| +|Physical Read| 物理读,即下盘读取block进入buffer的次数| +|CPU Time(us)| SQL消耗的CPU时间(单位:微秒)| +|Data IO Time(us)| IO上的时间花费(单位:微秒)| +|Sort Count| SQL排序执行的次数| +|Sort Time(us)| SQL排序执行的时间(单位:微秒)| +|Sort Mem Used(KB)| 排序过程中使用的work memory大小(单位:KB)| +|Sort Spill Count| 排序过程中发生落盘写文件的次数| +|Sort Spill Size(KB)| 排序过程中发生落盘写文件的大小(单位:KB)| +|Hash Count| hash执行的次数| +|Hash Time(us)| hash执行的时间(单位:微秒)| +|Hash Mem Used(KB)| hash过程中使用的work memory大小(单位:KB)| +|Hash Spill Count| hash过程中发生落盘写文件的次数| +|Hash Spill Size(KB)| hash过程中发生落盘写文件的大小(单位:KB)| +|SQL Text| SQL语句内容| +Tips:Top200显得有些冗余,多余的SQL信息并没有太大用处,反而降低了可读性,希望将来能优化到Top20,。 +**相关代码:** +```sql +-- 由于多个SQL统计信息的SQL语句类似,这里仅列举SQL执行时间的统计SQL,其他的类似。 +-- 说明:%s代表node_name,%ld代表snapshot_id +select t2.snap_unique_sql_id as "Unique SQL Id", + t2.snap_user_name as "User Name", + (t2.snap_total_elapse_time - coalesce(t1.snap_total_elapse_time, 0)) as "Total Elapse Time(us)", + (t2.snap_n_calls - coalesce(t1.snap_n_calls, 0)) as "Calls", + round("Total Elapse Time(us)"/greatest("Calls", 1), 0) as "Avg Elapse Time(us)", + t2.snap_min_elapse_time as "Min Elapse Time(us)", + t2.snap_max_elapse_time as "Max Elapse Time(us)", + (t2.snap_n_returned_rows - coalesce(t1.snap_n_returned_rows, 0)) as "Returned Rows", + ((t2.snap_n_tuples_fetched - coalesce(t1.snap_n_tuples_fetched, 0)) + + (t2.snap_n_tuples_returned - coalesce(t1.snap_n_tuples_returned, 0))) as "Tuples Read", + ((t2.snap_n_tuples_inserted - coalesce(t1.snap_n_tuples_inserted, 0)) + + (t2.snap_n_tuples_updated - coalesce(t1.snap_n_tuples_updated, 0)) + + (t2.snap_n_tuples_deleted - coalesce(t1.snap_n_tuples_deleted, 0))) as "Tuples Affected", + (t2.snap_n_blocks_fetched - coalesce(t1.snap_n_blocks_fetched, 0)) as "Logical Read", + ((t2.snap_n_blocks_fetched - coalesce(t1.snap_n_blocks_fetched, 0)) - + (t2.snap_n_blocks_hit - coalesce(t1.snap_n_blocks_hit, 0))) as "Physical Read", + (t2.snap_cpu_time - coalesce(t1.snap_cpu_time, 0)) as "CPU Time(us)", + (t2.snap_data_io_time - coalesce(t1.snap_data_io_time, 0)) as "Data IO Time(us)", + (t2.snap_sort_count - coalesce(t1.snap_sort_count, 0)) as "Sort Count", + (t2.snap_sort_time - coalesce(t1.snap_sort_time, 0)) as "Sort Time(us)", + (t2.snap_sort_mem_used - coalesce(t1.snap_sort_mem_used, 0)) as "Sort Mem Used(KB)", + (t2.snap_sort_spill_count - coalesce(t1.snap_sort_spill_count, 0)) as "Sort Spill Count", + (t2.snap_sort_spill_size - coalesce(t1.snap_sort_spill_size, 0)) as "Sort Spill Size(KB)", + (t2.snap_hash_count - coalesce(t1.snap_hash_count, 0)) as "Hash Count", + (t2.snap_hash_time - coalesce(t1.snap_hash_time, 0)) as "Hash Time(us)", + (t2.snap_hash_mem_used - coalesce(t1.snap_hash_mem_used, 0)) as "Hash Mem Used(KB)", + (t2.snap_hash_spill_count - coalesce(t1.snap_hash_spill_count, 0)) as "Hash Spill Count", + (t2.snap_hash_spill_size - coalesce(t1.snap_hash_spill_size, 0)) as "Hash Spill Size(KB)", + LEFT(t2.snap_query, 25) as "SQL Text" +from + (select * from snapshot.snap_summary_statement where snapshot_id = %ld and snap_node_name = '%s') t1 + right join + (select * from snapshot.snap_summary_statement where snapshot_id = %ld and snap_node_name = '%s') t2 + on t1.snap_unique_sql_id = t2.snap_unique_sql_id + and t1.snap_user_id = t2.snap_user_id + order by "Total Elapse Time(us)" + desc limit 200; +``` +![](../figures/wdr12.png) +**解读:** +这一部分分别从等待时长、等待次数这两个维度对等待事件进行统计。 +表格中列的含义即就是列的英文翻译,这里就不再复述了。 +具体的等待事件的介绍详见前文:“Top 10 Events by Total Wait Time”部分的内容,这里也不再复述。 +**相关代码:** + +```sql +-- 说明:%s代表node_name,%ld代表snapshot_id,无论从哪个维度统计,基本的SQL语句差异不大,这里仅列举"by wait time"的SQL示例 +select t2.snap_type as "Type", + t2.snap_event as "Event", + (t2.snap_total_wait_time - coalesce(t1.snap_total_wait_time, 0)) as "Total Wait Time (us)", + (t2.snap_wait - coalesce(t1.snap_wait, 0)) as "Waits", + (t2.snap_failed_wait - coalesce(t1.snap_failed_wait, 0)) as "Failed Waits", + (case "Waits" + when 0 then 0 + else round("Total Wait Time (us)" / "Waits", 2) + end) as "Avg Wait Time (us)", + t2.snap_max_wait_time as "Max Wait Time (us)" +from + (select * from snapshot.snap_global_wait_events + where snapshot_id = %ld + and snap_nodename = '%s' + and snap_event != 'unknown_lwlock_event' + and snap_event != 'none') t1 +right join + (select * from snapshot.snap_global_wait_events + where snapshot_id = %ld + and snap_nodename = '%s' + and snap_event != 'unknown_lwlock_event' + and snap_event != 'none') t2 +on t1.snap_event = t2.snap_event +order by "Total Wait Time (us)" +desc limit 200; +``` +![](../figures/wdr13.png) +**解读:** +这一部分根据Heap block的命中率排序统计用户表的IO活动状态。 +数据来源于snapshot.snap_global_statio_all_indexes表和snapshot.snap_global_statio_all_tables表。 +该表相关列的介绍如下: + +|列名|描述| +|-|-| +|DB Name| 数据库名| +|Schema Name| 模式名| +|Table Name| 表名| +|%Heap Blks Hit Ratio| 数据块读取缓存命中率=heap_blks_hit/(heap_blks_read+heap_blks_hit)*100| +|Heap Blks Read| 从该表中读取的磁盘块数| +|Heap Blks Hit| 此表缓存命中数| +|Idx Blks Read| 从表中所有索引读取的磁盘块数| +|Idx Blks Hit| 表中所有索引命中缓存数| +|Toast Blks Read| 此表的TOAST表读取的磁盘块数(如果存在)| +|Toast Blks Hit| 此表的TOAST表命中缓冲区数(如果存在)| +|Tidx Blks Read| 此表的TOAST表索引读取的磁盘块数(如果存在)| +|Tidx Blks Hit| 此表的TOAST表索引命中缓冲区数(如果存在)| +**相关代码:** +```sql +-- 说明:%s代表node_name,%ld代表snapshot_id +SELECT table_io.db_name as "DB Name", + table_io.snap_schemaname as "Schema Name", + table_io.snap_relname as "Table Name", + table_io.heap_blks_hit_ratio as "%Heap Blks Hit Ratio", + table_io.heap_blks_read as "Heap Blks Read", + table_io.heap_blks_hit as "Heap Blks Hit", + idx_io.idx_blks_read as "Idx Blks Read", + idx_io.idx_blks_hit as "Idx Blks Hit", + table_io.toast_blks_read as "Toast Blks Read", + table_io.toast_blks_hit as "Toast Blks Hit", + table_io.tidx_blks_read as "Tidx Blks Read", + table_io.tidx_blks_hit as "Tidx Blks Hit" +FROM + (select t2.db_name, t2.snap_schemaname , t2.snap_relname , + (case + when ((t2.snap_heap_blks_read - coalesce(t1.snap_heap_blks_read, 0)) + (t2.snap_heap_blks_hit - coalesce(t1.snap_heap_blks_hit, 0))) = 0 + then 0 + else round((t2.snap_heap_blks_hit - coalesce(t1.snap_heap_blks_hit, 0))/ + ((t2.snap_heap_blks_read - coalesce(t1.snap_heap_blks_read, 0)) + (t2.snap_heap_blks_hit - coalesce(t1.snap_heap_blks_hit, 0))) * 100, 2) + end ) as heap_blks_hit_ratio, + (t2.snap_heap_blks_read - coalesce(t1.snap_heap_blks_read, 0)) as heap_blks_read, + (t2.snap_heap_blks_hit - coalesce(t1.snap_heap_blks_hit, 0)) as heap_blks_hit, + (t2.snap_idx_blks_read - coalesce(t1.snap_idx_blks_read, 0)) as idx_blks_read, + (t2.snap_idx_blks_hit - coalesce(t1.snap_idx_blks_hit, 0)) as idx_blks_hit, + (t2.snap_toast_blks_read - coalesce(t1.snap_toast_blks_read, 0)) as toast_blks_read, + (t2.snap_toast_blks_hit - coalesce(t1.snap_toast_blks_hit, 0)) as toast_blks_hit, + (t2.snap_tidx_blks_read - coalesce(t1.snap_tidx_blks_read, 0)) as tidx_blks_read, + (t2.snap_tidx_blks_hit - coalesce(t1.snap_tidx_blks_hit, 0)) as tidx_blks_hit from + (select * from snapshot.snap_global_statio_all_tables + where snapshot_id = %ld and snap_node_name = '%s' + and snap_schemaname NOT IN ('pg_catalog', 'information_schema', 'snapshot') + and snap_schemaname !~ '^pg_toast') t1 + right join + (select * from snapshot.snap_global_statio_all_tables + where snapshot_id = %ld + and snap_node_name = '%s' + and snap_schemaname NOT IN ('pg_catalog', 'information_schema', 'snapshot') + and snap_schemaname !~ '^pg_toast') t2 + on t1.snap_relid = t2.snap_relid + and t2.db_name = t1.db_name + and t2.snap_schemaname = t1.snap_schemaname ) as table_io +LEFT JOIN + (select t2.db_name , t2.snap_schemaname , t2.snap_relname, + (t2.snap_idx_blks_read - coalesce(t1.snap_idx_blks_read, 0)) as idx_blks_read, + (t2.snap_idx_blks_hit - coalesce(t1.snap_idx_blks_hit, 0)) as idx_blks_hit + from + (select * from snapshot.snap_global_statio_all_indexes + where snapshot_id = %ld + and snap_node_name = '%s' + and snap_schemaname NOT IN ('pg_catalog', 'information_schema', 'snapshot') + and snap_schemaname !~ '^pg_toast') t1 + right join + (select * from snapshot.snap_global_statio_all_indexes + where snapshot_id = %ld + and snap_node_name = '%s' + and snap_schemaname NOT IN ('pg_catalog', 'information_schema', 'snapshot') + AND snap_schemaname !~ '^pg_toast') t2 + on t1.snap_relid = t2.snap_relid + and t2.snap_indexrelid = t1.snap_indexrelid + and t2.db_name = t1.db_name and t2.snap_schemaname = t1.snap_schemaname) as idx_io +on table_io.db_name = idx_io.db_name +and table_io.snap_schemaname = idx_io.snap_schemaname +and table_io.snap_relname = idx_io.snap_relname +order by "%%Heap Blks Hit Ratio" +asc limit 200; +``` +![](../figures/wdr14.png) +**解读:** +这一部分根据索引缓存命中率,统计用户索引IO活动信息。 +数据来源于snapshot.snap_global_statio_all_indexes表。 +相关列信息如下: + +|列名| 介绍| +|-|-| +|DB Name| 数据库名| +|Schema Name| 模式名| +|Table Name| 表名| +|Index Name| 索引名| +|%Idx Blks Hit Ratio| 索引缓冲命中率="Idx Blks Hit"/("Idx Blks Hit"+"Idx Blks Read")*100| +|Idx Blks Read| 从索引中读取的磁盘块数| +|Idx Blks Hit| 索引命中缓存数| +**相关代码:** +```sql +-- 说明:%s代表node_name,%ld代表snapshot_id +select t2.db_name as "DB Name", + t2.snap_schemaname as "Schema Name", + t2.snap_relname as "Table Name", + t2.snap_indexrelname as "Index Name", + (case + when ((t2.snap_idx_blks_read - coalesce(t1.snap_idx_blks_read, 0)) + (t2.snap_idx_blks_hit - coalesce(t1.snap_idx_blks_hit, 0))) = 0 then 0 + else + round((t2.snap_idx_blks_hit - coalesce(t1.snap_idx_blks_hit, 0))/((t2.snap_idx_blks_hit - coalesce(t1.snap_idx_blks_hit, 0)) + + (t2.snap_idx_blks_read - coalesce(t1.snap_idx_blks_read, 0))) * 100, 2) + end) as "%Idx Blks Hit Ratio", + (t2.snap_idx_blks_read - coalesce(t1.snap_idx_blks_read, 0)) as "Idx Blks Read", + (t2.snap_idx_blks_hit - coalesce(t1.snap_idx_blks_hit, 0)) as "Idx Blks Hit" +from + (select * from snapshot.snap_global_statio_all_indexes + where snapshot_id = %ld + and snap_node_name = '%s' + and snap_schemaname NOT IN ('pg_catalog', 'information_schema', 'snapshot') + and snap_schemaname !~ '^pg_toast') t1 +right join + (select * from snapshot.snap_global_statio_all_indexes + where snapshot_id = %ld + and snap_node_name = '%s' + and snap_schemaname NOT IN ('pg_catalog', 'information_schema', 'snapshot') + and snap_schemaname !~ '^pg_toast') t2 +on t1.snap_relid = t2.snap_relid +and t2.snap_indexrelid = t1.snap_indexrelid +and t2.db_name = t1.db_name +and t2.snap_schemaname = t1.snap_schemaname +order by "%Idx Blks Hit Ratio" +asc limit 200; +``` +![](../figures/wdr15.png) +**解读:** +**此处存在缺陷,统计的数据有问题,个人认为SQL语句需要修改,详见“相关代码”。** +这一部分描述的是后台写操作的统计信息,数据来源于snapshot.snap_global_bgwriter_stat表。 +具体内容如下: + +|列名| 数据获取相关函数| 说明| +|-|-|-| +|Checkpoints Timed| pg_stat_get_bgwriter_timed_checkpoints()| 执行的定期检查点数| +|Checkpoints Require| pg_stat_get_bgwriter_requested_checkpoints()| 执行的需求检查点数| +|Checkpoint Write Time(ms)| pg_stat_get_checkpoint_write_time()| 检查点操作中,文件写入到磁盘消耗的时间(单位:毫秒)| +|Checkpoint Sync Time(ms)| pg_stat_get_checkpoint_sync_time()| 检查点操作中,文件同步到磁盘消耗的时间(单位:毫秒)| +|Buffers Checkpoint| pg_stat_get_bgwriter_buf_written_checkpoints()| 检查点写缓冲区数量| +|Buffers Clean| pg_stat_get_bgwriter_buf_written_clean()| 后端写线程写缓冲区数量| +|Maxwritten Clean| pg_stat_get_bgwriter_maxwritten_clean()| 后端写线程停止清理Buffer的次数| +|Buffers Backend| pg_stat_get_buf_written_backend()| 通过后端直接写缓冲区数| +|Buffers Backend| Fsync pg_stat_get_buf_fsync_backend()| 后端不得不执行自己的fsync调用的时间数(通常后端写进程处理这些即使后端确实自己写)| +|Buffers Alloc| pg_stat_get_buf_alloc() 分配的缓冲区数量| +|Stats Reset| pg_stat_get_bgwriter_stat_reset_time()| 这些统计被重置的时间| +**相关代码:** +```sql +-- 说明:%s代表node_name,%ld代表snapshot_id +select (snap_2.snap_checkpoints_timed - coalesce(snap_1.snap_checkpoints_timed, 0)) AS "Checkpoints Timed", + (snap_2.snap_checkpoints_req - coalesce(snap_1.snap_checkpoints_req, 0)) AS "Checkpoints Require", + (snap_2.snap_checkpoint_write_time - coalesce(snap_1.snap_checkpoint_write_time, 0)) AS "Checkpoint Write Time(ms)", + (snap_2.snap_checkpoint_sync_time - coalesce(snap_1.snap_checkpoint_sync_time, 0)) AS "Checkpoint Sync Time(ms)", + (snap_2.snap_buffers_checkpoint - coalesce(snap_1.snap_buffers_checkpoint, 0)) AS "Buffers Checkpoint", + (snap_2.snap_buffers_clean - coalesce(snap_1.snap_buffers_clean, 0)) AS "Buffers Clean", + (snap_2.snap_maxwritten_clean - coalesce(snap_1.snap_maxwritten_clean, 0)) AS "Maxwritten Clean", + (snap_2.snap_buffers_backend - coalesce(snap_1.snap_buffers_backend, 0)) AS "Buffers Backend", + (snap_2.snap_buffers_backend_fsync - coalesce(snap_1.snap_buffers_backend_fsync, 0)) AS "Buffers Backend Fsync", + (snap_2.snap_buffers_alloc - coalesce(snap_1.snap_buffers_alloc, 0)) AS "Buffers Alloc", + to_char(snap_2.snap_stats_reset, 'YYYY-MM-DD HH24:MI:SS') AS "Stats Reset" +from + (select * from snapshot.snap_global_bgwriter_stat + where snapshot_id = %ld + and snap_node_name = '%s') snap_2 +LEFT JOIN + (select * from snapshot.snap_global_bgwriter_stat + where snapshot_id = %ld + and snap_node_name = '%s') snap_1 +on snap_2.snapshot_id = snap_1.snapshot_id --错误点:snap_2.snapshot_id = snap_1.snapshot_id ? 这其实还是同一个snapshot +and snap_2.snap_node_name = snap_1.snap_node_name +and snap_2.snap_stats_reset = snap_1.snap_stats_reset +limit 200; + +-- 统计信息应该是2次snapshot之间的数据,而以上SQL并不能正确输出相关数据。个人觉得可以删除LEFT JOIN连接。 +-- 建议修改如下: +select (snap_2.snap_checkpoints_timed - coalesce(snap_1.snap_checkpoints_timed, 0)) AS "Checkpoints Timed", + (snap_2.snap_checkpoints_req - coalesce(snap_1.snap_checkpoints_req, 0)) AS "Checkpoints Require", + (snap_2.snap_checkpoint_write_time - coalesce(snap_1.snap_checkpoint_write_time, 0)) AS "Checkpoint Write Time(ms)", + (snap_2.snap_checkpoint_sync_time - coalesce(snap_1.snap_checkpoint_sync_time, 0)) AS "Checkpoint Sync Time(ms)", + (snap_2.snap_buffers_checkpoint - coalesce(snap_1.snap_buffers_checkpoint, 0)) AS "Buffers Checkpoint", + (snap_2.snap_buffers_clean - coalesce(snap_1.snap_buffers_clean, 0)) AS "Buffers Clean", + (snap_2.snap_maxwritten_clean - coalesce(snap_1.snap_maxwritten_clean, 0)) AS "Maxwritten Clean", + (snap_2.snap_buffers_backend - coalesce(snap_1.snap_buffers_backend, 0)) AS "Buffers Backend", + (snap_2.snap_buffers_backend_fsync - coalesce(snap_1.snap_buffers_backend_fsync, 0)) AS "Buffers Backend Fsync", + (snap_2.snap_buffers_alloc - coalesce(snap_1.snap_buffers_alloc, 0)) AS "Buffers Alloc", + to_char(snap_2.snap_stats_reset, 'YYYY-MM-DD HH24:MI:SS') AS "Stats Reset" +from + (select * from snapshot.snap_global_bgwriter_stat + where snapshot_id = %ld + and snap_node_name = '%s') snap_2, + (select * from snapshot.snap_global_bgwriter_stat + where snapshot_id = %ld + and snap_node_name = '%s') snap_1 +limit 200; +``` +![](../figures/wdr16.png) +**解读:**[本次实验环境是单机,没有复制槽数据] +这一部分描述的是复制槽的相关信息。数据来源于:snapshot.snap_global_replication_slots表。 +信息内容如下所示: + +|列名| 描述| +|-|-| +|Slot Name| 复制槽名称| +|Slot Type| 复制槽类型| +|DB Name| 数据库名称| +|Active| 是否为激活状态| +|Xmin| 事务标识,最早的事务ID(txid)| +|Restart Lsn| 事务槽开始复制时的LSN信息,用来判断哪些事务需要被复制| +|Dummy Standby| 是否为假备| +**相关代码:** +```sql +-- 说明:%s代表node_name,%ld代表snapshot_id +SELECT snap_slot_name as "Slot Name", + snap_slot_type as "Slot Type", + snap_database as "DB Name", + snap_active as "Active", + snap_x_min as "Xmin", + snap_restart_lsn as "Restart Lsn", + snap_dummy_standby as "Dummy Standby" +FROM snapshot.snap_global_replication_slots +WHERE snapshot_id = %ld +and snap_node_name = '%s' +limit 200; +``` +![](../figures/wdr17.png) +**解读:**[本次实验环境是单机,没有复制槽数据] +这一部分描述事务槽详细的状态信息,数据源于snapshot.snap_global_replication_stat表。 +信息内容如下所示: + +|列名| 描述| +|-|-| +|Thread Id| 线程ID| +|Usesys Id| 用户OID| +|Usename| 用户名| +|Application Name| 应用程序名称| +|Client Addr| 客户端地址| +|Client Hostname| 客户端主机名| +|Client Port| 客户端端口| +|Backend Start| 程序启动时间| +|State| 日志复制状态【追赶状态/一直的流状态】| +|Sender Sent Location| 日志发送的位置| +|Receiver Write Location| 日志接收端write的位置| +|Receiver Flush Location| 日志接收端flush的位置| +|Receiver Replay Location| 日志接收端replay的位置| +|Sync Priority| 同步复制的优先级(0表示异步复制)| +|Sync State| 同步状态【异步复制/同步复制/潜在同步复制】| +**相关代码:** +```sql +-- 说明:%s代表node_name,%ld代表snapshot_id +SELECT snap_pid as "Thread Id", + snap_usesysid as "Usesys Id", + snap_usename as "Usename", + snap_application_name as "Application Name", + snap_client_addr as "Client Addr", + snap_client_hostname as "Client Hostname", + snap_client_port as "Client Port", + snap_backend_start as "Backend Start", + snap_state as "State", + snap_sender_sent_location as "Sender Sent Location", + snap_receiver_write_location as "Receiver Write Location", + snap_receiver_flush_location as "Receiver Flush Location", + snap_receiver_replay_location as "Receiver Replay Location", + snap_sync_priority as "Sync Priority", + snap_sync_state as "Sync State" +FROM snapshot.snap_global_replication_stat +WHERE snapshot_id = %ld +and snap_node_name = '%s' limit 200; +``` +![](../figures/wdr18.png) +**解读:** +这一部分描述用户表状态的统计信息,数据源于snapshot.snap_global_stat_all_tables表。 +信息内容如下所示: + +|列名| 描述| +|-|-| +|DB Name| 数据库名称| +|Schema| 模式名称| +|Relname| 表名称| +|Seq Scan| 顺序扫描的次数| +|Seq Tup Read| 顺序扫描获取的活跃行数| +|Index Scan| 索引扫描次数| +|Index Tup Fetch| 索引扫描获取的活跃行数| +|Tuple Insert| 插入的行数| +|Tuple Update| 更新的行数| +|Tuple Delete| 删除的行数| +|Tuple Hot Update| HOT(Heap Only Tuple)更新行数备注:HOT更新指,如果更新后的新行和旧行位于同一个数据块内,则旧行会有一个指针指向新行,这样就不用更新索引了,通过索引访问到旧行数据,进而访问到新行数据。| +|Live Tuple| 活跃行数(估值)| +|Dead Tuple|死行数(估值)| +|Last Vacuum| 最后一次手动Vacuum的时间(不包含VACUUM FULL)| +|Last Autovacuum| 最后一次autovacuum的时间| +|Last Analyze| 最后一次手动Analyze表的时间| +|Last Autoanalyze| 最后一次autovacuum线程Analyze表的时间| +|Vacuum Count| 手动vacuum的次数(不包含VACUUM FULL)| +|Autovacuum Count| autovacuum的次数| +|Analyze Count| 手动Analyze的次数| +|Autoanalyze Count| autovacuum线程Analyze表的次数| +**相关代码:** +```sql +-- 说明:%s代表node_name,%ld代表snapshot_id +SELECT snap_2.db_name as "DB Name", + snap_2.snap_schemaname as "Schema", + snap_2.snap_relname as "Relname", + (snap_2.snap_seq_scan - coalesce(snap_1.snap_seq_scan, 0)) as "Seq Scan", + (snap_2.snap_seq_tup_read - coalesce(snap_1.snap_seq_tup_read, 0)) as "Seq Tup Read", + (snap_2.snap_idx_scan - coalesce(snap_1.snap_idx_scan, 0)) as "Index Scan", + (snap_2.snap_idx_tup_fetch - coalesce(snap_1.snap_idx_tup_fetch, 0)) as "Index Tup Fetch", + (snap_2.snap_n_tup_ins - coalesce(snap_1.snap_n_tup_ins, 0)) as "Tuple Insert", + (snap_2.snap_n_tup_upd - coalesce(snap_1.snap_n_tup_upd, 0)) as "Tuple Update", + (snap_2.snap_n_tup_del - coalesce(snap_1.snap_n_tup_del, 0)) as "Tuple Delete", + (snap_2.snap_n_tup_hot_upd - coalesce(snap_1.snap_n_tup_hot_upd, 0)) as "Tuple Hot Update", + snap_2.snap_n_live_tup as "Live Tuple", + snap_2.snap_n_dead_tup as "Dead Tuple", + to_char(snap_2.snap_last_vacuum, 'YYYY-MM-DD HH24:MI:SS') as "Last Vacuum", + to_char(snap_2.snap_last_autovacuum, 'YYYY-MM-DD HH24:MI:SS') as "Last Autovacuum", + to_char(snap_2.snap_last_analyze, 'YYYY-MM-DD HH24:MI:SS') as "Last Analyze", + to_char(snap_2.snap_last_autoanalyze, 'YYYY-MM-DD HH24:MI:SS') as "Last Autoanalyze", + (snap_2.snap_vacuum_count - coalesce(snap_1.snap_vacuum_count, 0)) as "Vacuum Count", + (snap_2.snap_autovacuum_count - coalesce(snap_1.snap_autovacuum_count, 0)) as "Autovacuum Count", + (snap_2.snap_analyze_count - coalesce(snap_1.snap_analyze_count, 0)) as "Analyze Count", + (snap_2.snap_autoanalyze_count - coalesce(snap_1.snap_autoanalyze_count, 0)) as "Autoanalyze Count" +FROM + (SELECT * FROM snapshot.snap_global_stat_all_tables + WHERE snapshot_id = %ld + and snap_node_name = '%s' + and snap_schemaname NOT IN ('pg_catalog', 'information_schema', 'snapshot') + AND snap_schemaname !~ '^pg_toast') snap_2 +LEFT JOIN + (SELECT * FROM snapshot.snap_global_stat_all_tables + WHERE snapshot_id = %ld + and snap_node_name = '%s' + and snap_schemaname NOT IN ('pg_catalog', 'information_schema', 'snapshot') + AND snap_schemaname !~ '^pg_toast') snap_1 +ON snap_2.snap_relid = snap_1.snap_relid +AND snap_2.snap_schemaname = snap_1.snap_schemaname +AND snap_2.snap_relname = snap_1.snap_relname +AND snap_2.db_name = snap_1.db_name +order by snap_2.db_name, snap_2.snap_schemaname +limit 200; +``` +![](../figures/wdr19.png) +**解读:** +这一部分描述用户索引状态的统计信息,数据源于snapshot.snap_global_stat_all_indexes表。 +信息内容如下所示: + +|列名| 描述| +|-|-| +|DB Name| 数据库名称| +|Schema| 模式名称| +|Relname| 表名称| +|Index Relname| 索引名称| +|Index Scan| 索引扫描次数| +|Index Tuple Read| 索引扫描返回的索引条目数| +|Index Tuple Fetch| 索引扫描获取的活跃行数| +**相关代码:** +```sql +-- 说明:%s代表node_name,%ld代表snapshot_id +SELECT snap_2.db_name as "DB Name", + snap_2.snap_schemaname as "Schema", + snap_2.snap_relname as "Relname", + snap_2.snap_indexrelname as "Index Relname", + (snap_2.snap_idx_scan - coalesce(snap_1.snap_idx_scan, 0)) as "Index Scan", + (snap_2.snap_idx_tup_read - coalesce(snap_1.snap_idx_tup_read, 0)) as "Index Tuple Read", + (snap_2.snap_idx_tup_fetch - coalesce(snap_1.snap_idx_tup_fetch, 0)) as "Index Tuple Fetch" +FROM + (SELECT * FROM snapshot.snap_global_stat_all_indexes + WHERE snapshot_id = %ld + and snap_node_name = '%s' + and snap_schemaname NOT IN ('pg_catalog', 'information_schema', 'snapshot') + and snap_schemaname !~ '^pg_toast') snap_2 +LEFT JOIN + (SELECT * FROM snapshot.snap_global_stat_all_indexes + WHERE snapshot_id = %ld + and snap_node_name = '%s' + and snap_schemaname NOT IN ('pg_catalog', 'information_schema', 'snapshot') + and snap_schemaname !~ '^pg_toast') snap_1 +ON snap_2.snap_relid = snap_1.snap_relid +and snap_2.snap_indexrelid = snap_1.snap_indexrelid +and snap_2.snap_schemaname = snap_1.snap_schemaname +and snap_2.snap_relname = snap_1.snap_relname +and snap_2.snap_indexrelname = snap_1.snap_indexrelname +and snap_2.db_name = snap_1.db_name +order by snap_2.db_name, snap_2.snap_schemaname +limit 200; +``` +![](../figures/wdr20.png) +**解读:** +这一部分描述坏块的统计信息,数据源于snapshot.snap_global_stat_bad_block表。 +信息内容如下所示: + +|列名| 描述| +|-|-| +|DB Id| 数据库OID| +|Tablespace Id| 表空间OID| +|Relfilenode| relation的filenode号| +|Fork Number| fork编号| +|Error Count| error数量| +|First Time| 坏块第一次出现的时间| +|Last Time| 坏块最近一次出现的时间| +**相关代码:** +```sql +-- 说明:%s代表node_name,%ld代表snapshot_id + SELECT snap_2.snap_databaseid AS "DB Id", + snap_2.snap_tablespaceid AS "Tablespace Id", + snap_2.snap_relfilenode AS "Relfilenode", + snap_2.snap_forknum AS "Fork Number", + (snap_2.snap_error_count - coalesce(snap_1.snap_error_count, 0)) AS "Error Count", + snap_2.snap_first_time AS "First Time", + snap_2.snap_last_time AS "Last Time" +FROM + (SELECT * FROM snapshot.snap_global_stat_bad_block + WHERE snapshot_id = %ld + and snap_node_name = '%s') snap_2 +LEFT JOIN + (SELECT * FROM snapshot.snap_global_stat_bad_block + WHERE snapshot_id = %ld + and snap_node_name = '%s') snap_1 +ON snap_2.snap_databaseid = snap_1.snap_databaseid +and snap_2.snap_tablespaceid = snap_1.snap_tablespaceid +and snap_2.snap_relfilenode = snap_1.snap_relfilenode +limit 200; +``` +![](../figures/wdr21.png) +**解读:** +这一部分描述的是数据库参数配置信息,数据源于snapshot.snap_global_config_settings表。 +信息内容如下所示: + +|列名| 描述| +|-|-| +|Name| 参数名称| +|Abstract| 参数的简单描述| +|Type| 参数类型(bool/enum/integer/real/string)| +|Curent Value| 参数当前值| +|Min Value| 最小参数值| +|Max Value| 最大参数值| +|Category| 参数逻辑组| +|Enum Values| 参数枚举值| +|Default Value| 启动时的默认参数值| +|Reset Value| 重置时的默认参数值| +**相关代码:** +```sql +-- 说明:%s代表node_name,%ld代表snapshot_id +select snap_name as "Name", + snap_short_desc as "Abstract", + snap_vartype as "Type", + snap_setting as "Curent Value", + snap_min_val as "Min Value", + snap_max_val as "Max Value", + snap_category as "Category", + snap_enumvals as "Enum Values", + snap_boot_val as "Default Value", + snap_reset_val as "Reset Value" +FROM + snapshot.snap_global_config_settings + WHERE snapshot_id = %ld + and snap_node_name = '%s'; +``` +![](../figures/wdr22.png) +**解读:** +这一部分描述的是SQL语句的详细信息,数据来源于snapshot.snap_summary_statement表。 +**Unique SQL Id:** 即SQL的唯一识别ID +**SQL Text:** 即SQL的具体内容。 +**相关代码:** + +```sql +-- 说明:%s代表node_name,%ld代表snapshot_id +select (t2.snap_unique_sql_id) as "Unique SQL Id", + (t2.snap_query) as "SQL Text" +from + snapshot.snap_summary_statement t2 + where snapshot_id = %ld + and snap_node_name = '%s'; +``` +### 小结: +        关于openGauss WDR报告的梳理和解读就到这里,这些内容对于刚刚接触openGauss的老DBA已经足够了,只要读懂WDR报告的数据计算方法和含义即意味着看懂了这份WDR报告内容,剩下的数据库优化工作完全可以参照已有的数据库优化经验操作。当然了,“小白”DBA也完全可以参照网络上大佬们分享的数据库优化案例以及类似Oracle等主流数据库的AWR报告解读,学习openGauss的WDR报告,从而执行openGauss数据库相关的优化工作。 +        **以上所有观点仅代表个人,如有不正确之处欢迎大佬们指正。** + + diff --git "a/content/zh/post/jiajunfeng/openGauss\351\200\273\350\276\221\350\247\243\347\240\201.md" "b/content/zh/post/jiajunfeng/openGauss\351\200\273\350\276\221\350\247\243\347\240\201.md" new file mode 100644 index 0000000000000000000000000000000000000000..52b0c8aaad7f896dac7a064b73b8892c427aaf53 --- /dev/null +++ "b/content/zh/post/jiajunfeng/openGauss\351\200\273\350\276\221\350\247\243\347\240\201.md" @@ -0,0 +1,116 @@ ++++ + +title = "openGauss逻辑解码" + +date = "2021-06-01" + +tags = ["openGauss核心技术"] + +archives = "2021-06" + +author = "贾军锋" + +summary = "openGauss逻辑解码" + +img = "/zh/post/jiajunfeng/title/img4.png" + +times = "12:30" + ++++ + +# openGauss逻辑解码 + +逻辑复制由两部分组成:逻辑解码和数据复制。逻辑解码会输出以事务为单位组织的逻辑日志。业务或数据库中间件将会对逻辑日志进行解析并最终实现数据复制。 + +openGauss当前只提供逻辑解码功能,因此文只对逻辑解码进行简单说明和测试。 + +逻辑解码为逻辑复制提供事务解码的基础能力,openGauss使用SQL函数接口进行逻辑解码。此方法调用方便,不需使用工具,对接外部工具接口也比较清晰,不需要额外适配。 + +由于逻辑日志是以事务为单位的,在事务提交后才能输出,且逻辑解码是由用户驱动的;因此为了防止事务开始时的xlog被系统回收,或所需的事务信息被VACUUM回收,openGauss新增了逻辑复制槽,用于阻塞xlog的回收。 + +## 注意事项 + +- 不支持DDL语句解码。 +- 不支持列存、数据页复制的解码。 +- 不支持备机与级联备机进行逻辑解码。 +- 当执行DDL语句(如alter table)后,该DDL语句前尚未解码的物理日志可能会丢失。 +- 使用逻辑解码功能时,禁止进行数据库在线扩容。 +- 使用逻辑解码功能时,禁止执行VACUUM FULL。 +- 单条元组大小不超过1GB,考虑解码结果可能大于插入数据,因此建议单条元组大小不超过500MB。 +- openGauss支持解码的数据类型为:INTEGER、BIGINT、SMALLINT、TINYINT、SERIAL、SMALLSERIAL、BIGSERIAL、FLOAT、DOUBLE PRECISION、DATE、TIME\[WITHOUT TIME ZONE\]、TIMESTAMP\[WITHOUT TIME ZONE\]、CHAR\(n\)、VARCHAR\(n\)、TEXT。 +- 目前默认不支持ssl连接,如果需要ssl连接需要设置guc参数ssl=on。 +- 如果使用JDBC创建逻辑复制槽,则逻辑复制槽名称必须小于64个字符,且只包含字母、数字或者下划线中的一种或几种。 +- 当前逻辑复制不支持MOT特性。 +- 当逻辑复制槽所在数据库被删除后,这些复制槽变为不可用状态,需要用户手动删除。 +- 仅支持utf-8字符集。 +- 对多库的解码需要分别在库内创建流复制槽并开始解码,每个库的解码都需要单独扫一遍日志。 +- 不支持强起,强起后需要重新全量导出数据。 + +## 准备工作 + +``` +$ gs_guc reload -N all -I all -c "ssl=on" +$ gs_guc reload -N all -I all -c "wal_level=logical" +$ gs_guc reload -N all -I all -c "max_replication_slots=10" ## max_replication_slots>每个节点所需的(物理流复制槽数+逻辑复制槽数) +$ gs_om -t stop && gs_om -t start +``` + +Tips: + +- 物理流复制槽用于支撑主备HA。数据库所需要的物理流复制槽数为:备节点\(包括从备\)与主节点之间的比例\(假设数据库的高可用方案为1主3备,则所需物理流复制槽数为3\)。 +- 一个逻辑复制槽只能解码一个Database的修改,如果需要解码多个Database,则需要创建多个逻辑复制槽。 +- 如果需要多路逻辑复制同步给多个目标数据库,在源端数据库需要创建多个逻辑复制槽,每个逻辑复制槽对应一条逻辑复制链路。 +- 仅限数据库管理员和拥有REPLICATION权限的用户进行操作。 + +## 创建逻辑复制槽 + +``` +$ gsql -d postgres -p 26000 -r -q +-- 创建一个名为slot1的逻辑复制槽,plugin_name当前仅支持mppdb_decoding +postgres=# SELECT * FROM pg_create_logical_replication_slot('slot1', 'mppdb_decoding'); + slotname | xlog_position +----------+--------------- + slot1 | 0/8948D100 -- 逻辑复制槽解码的起始LSN位置 +``` + +## 创建测试数据 + +``` +postgres=# create table logic_test(id int,date1 date); +postgres=# insert into logic_test values(1,now()); +postgres=# select * from logic_test ; + id | date1 +----+--------------------- + 1 | 2021-05-31 10:02:59 +``` + +## 读取复制槽逻辑解码结果 + +``` +postgres=# select * from pg_replication_slots; + slot_name | plugin | slot_type | datoid | database | active | xmin | catalog_xmin | restart_lsn | dummy_standby +-----------+----------------+-----------+--------+----------+--------+------+--------------+-------------+--------------- + slot1 | mppdb_decoding | logical | 15103 | postgres | f | | 13620 | 0/8948D080 | f +postgres=# select * from pg_logical_slot_peek_changes('slot1',null,4096); -- (slot_name,LSN,upto_nchanges) + location | xid | data +------------+-------+------------------------------------------------------------------------------------------------------------------ + 0/8948D278 | 13620 | BEGIN 13620 + 0/8948F088 | 13620 | COMMIT 13620 CSN 2277 + 0/8948F1F8 | 13621 | BEGIN 13621 + 0/8948F1F8 | 13621 | {"table_name":"public.logic_test","op_type":"INSERT","columns_name":["id","date1"],"columns_type":["integer","tim +estamp without time zone"],"columns_val":["1","'2021-05-31 10:02:59'"],"old_keys_name":[],"old_keys_type":[],"old_keys_val":[]} + 0/8948F300 | 13621 | COMMIT 13621 CSN 2278 +``` + +Tips: + +- slot\_name: 流复制槽名称 +- LSN: 日志的LSN,表示只解码小于等于此LSN的日志, NULL表示不对解码截止的日志位置做限制 +- upto\_nchanges: 解码条数。假设一共有3条事务,分别包含3、5、7条记录,如果upto\_nchanges为4,那么会解码出前两个事务共8条记录。 + +## 删除逻辑复制槽slot1 + +``` +postgres=# select * from pg_drop_replication_slot('slot1'); +``` + diff --git "a/content/zh/post/jiajunfeng/\344\270\200\351\224\256\351\203\250\347\275\262openGauss2-0-0.md" "b/content/zh/post/jiajunfeng/\344\270\200\351\224\256\351\203\250\347\275\262openGauss2-0-0.md" new file mode 100644 index 0000000000000000000000000000000000000000..cd0393a6ff7bf6b9bfccf744d6dfb059a96a9da9 --- /dev/null +++ "b/content/zh/post/jiajunfeng/\344\270\200\351\224\256\351\203\250\347\275\262openGauss2-0-0.md" @@ -0,0 +1,264 @@ ++++ + +title = "一键部署openGauss2.0.0" + +date = "2021-04-19" + +tags = ["openGauss安装部署"] + +archives = "2021-04" + +author = "贾军锋" + +summary = "一键部署openGauss2.0.0" + +img = "/zh/post/jiajunfeng/title/img33.png" + +times = "15:30" + ++++ + +# 一键部署openGauss2.0.0 + +openGauss 从发布至今,安装部署碰到的问题比较多,也是大家学习openGauss数据库的第一道坎。 + +为了提高大家部署openGauss数据库的效率,个人将安装步骤写入shell脚本,在openEuler操作系统可以连接外网的情况下,实现一键式配置、下载、安装,希望对大家有所帮助。 + +``` +vi /root/auto_install.sh +----------------------------------------------------------------------------------------------------- +#!/bin/bash + +## Author: 贾军锋 +## Date: 2021-04-15 +## OS: openEuler20.03LTS [最小硬件配置:2c/4G] +## Database:openGauss 2.0.0 +## Description:一键式实现操作系统环境配置、openGauss软件下载、openGauss软件安装等步骤,帮助大家提升安装openGauss数据库效率 +## Tips: 请确保操作系统可以连接外网 + +## 0.关闭virbr0网卡 [本地虚拟机标准化安装openEuler系统会默认存在virbr0网卡,删除该网卡以避免干扰数据库的安装] +## virsh net-destroy default +## virsh net-list +## echo "Net device virbr0 is disabled." + + +## 1.定义主机信息[请根据实际情况修改] +export MY_HOSTNAME=node1 ## 主机名 +export MY_HOSTIP=192.168.8.133 ## IP地址 +export MY_SOFTWARE_DIRECTORY=/soft/openGauss ## 软件包所在目录 +export MY_XML=/soft/openGauss/clusterconfig.xml ## 集群配置文件XML +export openGauss_Download_url=https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.0/x86_openEuler/openGauss-2.0.0-openEuler-64bit-all.tar.gz ## openGauss软件包下载地址 + +## 1. 设置主机名并配置hosts文件 +hostnamectl set-hostname $MY_HOSTNAME +sed -i '/$MY_HOSTIP/d' /etc/hosts +echo "$MY_HOSTIP $MY_HOSTNAME #Gauss OM IP Hosts Mapping" >> /etc/hosts +cat /etc/hosts +echo "1.Configure /etc/hosts completed." +echo -e "\n" + +## 2. 关闭防火墙 +systemctl disable firewalld.service +systemctl stop firewalld.service +echo "Firewalld " `systemctl status firewalld|grep Active` +echo "2.Disable firewalld service completed." +echo -e "\n" + +## 3. 关闭SELinux +sed -i '/^SELINUX=/d' /etc/selinux/config +echo "SELINUX=disabled" >> /etc/selinux/config +cat /etc/selinux/config|grep "SELINUX=disabled" +echo "3.Disable SELINUX completed." +echo -e "\n" + + +## 4. 设置操作系统字符集编码 +echo "LANG=en_US.UTF-8" >> /etc/profile +source /etc/profile +echo $LANG +echo "4.Configure encoding completed." +echo -e "\n" + +## 5. 设置操作系统时区 +rm -fr /etc/localtime +ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime +date -R +hwclock +echo "5.Configure Timezone completed." +echo -e "\n" + +## 6. 关闭SWAP分区 [对于2G内存的设备,建议待安装完毕后再打开SWAP以间接 “扩容内存容量”] +sed -i '/swap/s/^/#/' /etc/fstab +swapoff -a +free -m +echo "6.Close swap partition completed." +echo -e "\n" + + +## 7. 配置SSH服务,关闭Banner,允许root远程登录 +sed -i '/Banner/s/^/#/' /etc/ssh/sshd_config +sed -i '/PermitRootLogin/s/^/#/' /etc/ssh/sshd_config +echo -e "\n" >> /etc/ssh/sshd_config +echo "Banner none " >> /etc/ssh/sshd_config +echo "PermitRootLogin yes" >> /etc/ssh/sshd_config +cat /etc/ssh/sshd_config |grep -v ^#|grep -E 'PermitRoot|Banner' +echo "7.Configure SSH Service completed." +echo -e "\n" + +## 8. 配置YUM源、安装依赖包、修改默认Python3版本 +mkdir /etc/yum.repos.d/bak +mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/bak/ +wget -O /etc/yum.repos.d/openEulerOS.repo https://repo.huaweicloud.com/repository/conf/openeuler_x86_64.repo +yum clean all +yum install -y bzip2 python3 +yum install -y libaio-devel libnsl flex bison ncurses-devel glibc-devel patch readline-devel net-tools tar +mv /usr/bin/python /usr/bin/python2_bak +ln -s /usr/bin/python3 /usr/bin/python +python -V +echo "8.Configure Install Packages and change default Python version completed." +echo -e "\n" + + +## 9. 配置 sysctl.conf 和 performance.sh +cat >> /etc/sysctl.conf << EOF +net.ipv4.tcp_retries1 = 5 +net.ipv4.tcp_syn_retries = 5 +net.sctp.path_max_retrans = 10 +net.sctp.max_init_retransmits = 10 +EOF +sysctl -p + +sed -i '/vm.min_free_kbytes/s/^/#/' /etc/profile.d/performance.sh ## Only for openEuler +cat /etc/profile.d/performance.sh|grep vm.min_free_kbytes + +echo "9.Configure sysctl.conf and performance.sh completed." +echo -e "\n" + + +## 10. 配置资源限制 +echo "* soft stack 3072" >> /etc/security/limits.conf +echo "* hard stack 3072" >> /etc/security/limits.conf +echo "* soft nofile 1000000" >> /etc/security/limits.conf +echo "* hard nofile 1000000" >> /etc/security/limits.conf +echo "* soft nproc unlimited" >> /etc/security/limits.d/90-nproc.conf +tail -n 4 /etc/security/limits.conf +tail -n 1 /etc/security/limits.d/90-nproc.conf +echo "10.Configure resource limits completed." +echo -e "\n" + +## 11. 关闭透明大页[Only for CentOS] +cat >>/etc/rc.d/rc.local< /sys/kernel/mm/transparent_hugepage/enabled +fi +if test -f /sys/kernel/mm/transparent_hugepage/defrag; then + echo never > /sys/kernel/mm/transparent_hugepage/defrag +fi +EOF +chmod +x /etc/rc.d/rc.local +/usr/bin/sh /etc/rc.d/rc.local +cat /sys/kernel/mm/transparent_hugepage/enabled +cat /sys/kernel/mm/transparent_hugepage/defrag +echo "11.Close transparent_hugepage completed." +echo -e "\n" + + +## 12. 禁用RemoveIPC[Only for openEuler] +sed -i '/^RemoveIPC/d' /etc/systemd/logind.conf +sed -i '/^RemoveIPC/d' /usr/lib/systemd/system/systemd-logind.service +echo "RemoveIPC=no" >> /etc/systemd/logind.conf +echo "RemoveIPC=no" >> /usr/lib/systemd/system/systemd-logind.service +systemctl daemon-reload +systemctl restart systemd-logind +loginctl show-session | grep RemoveIPC +systemctl show systemd-logind | grep RemoveIPC +echo "12.Disable RemoveIPC completed." +echo -e "\n" + + +## 13. 下载openGauss软件包 +mkdir -p $MY_SOFTWARE_DIRECTORY +cd $MY_SOFTWARE_DIRECTORY +wget https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.0/x86_openEuler/openGauss-2.0.0-openEuler-64bit-all.tar.gz +echo "13.openGauss software download completed." +echo -e "\n" + +## 14. 配置XML文件 +rm -fr $MY_XML +cat >> $MY_XML < + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +EOF +cat $MY_XML +echo "14.Configure XML file completed." +echo -e "\n" + + +## 15. 解压安装包并修改目录权限 +echo "Begin to Uncompress openGauss Package and Modify directory permissions:" +cd $MY_SOFTWARE_DIRECTORY +tar -zxvf *all.tar.gz +tar -zxvf *om.tar.gz +ls -l +chmod -R 775 $MY_SOFTWARE_DIRECTORY +echo "15.Uncompress openGauss Package completed." +echo -e "\n" + +## 16. 执行 gs_preinstall +echo "Begin to execute openGauss preinstall:" +python $MY_SOFTWARE_DIRECTORY/script/gs_preinstall -U omm -G dbgrp -X $MY_XML +echo "16.openGauss preinstall completed." +echo -e "\n" + + +## 17. 检查预安装环境 +echo "Begin to Check OS environment:" +$MY_SOFTWARE_DIRECTORY/script/gs_checkos -i A -h $MY_HOSTNAME --detail + +## 18. 执行 gs_install +echo "Begin to execute openGauss install:" +touch /home/omm/install_db +cat >> /home/omm/install_db < + +TPC-H 是一个面向分析型业务\(AP\)的基准测试,它由一系列热点查询组成,这些热点查询都是高度复杂的,因此执行时间往往都比较长。 + +在本次实验测试中,将手动向数据库加载TPC-H数据,并保存在名为 tpch 的数据库中。默认TPC-H数据库的表缺少索引,数据库的参数并没有做任何优化,因此执行效率会比较差。 + +本实验内容比较浅显,使用openGauss的索引推荐\(Index-advisor\)功能,对数据库进行性能优化,同时也让大家对Index-advisor功能有一个初步的了解。 + +## 环境信息 + +OS: CentOS Linux release 7.6.1810 + +openGauss:2.0.0 + +CPU:1core + +Memory:4GB + +测试数据脚本清单如下: + +``` +[omm@lab01 ~]$ ls -l ~/tpch-kit-back/ +total 1076780 +-rw------- 1 omm dbgrp 24196144 Apr 24 15:39 customer.tbl +-rw------- 1 omm dbgrp 3814 Apr 24 15:39 dss.ddl +-rw------- 1 omm dbgrp 753862072 Apr 24 15:39 lineitem.tbl +-rw------- 1 omm dbgrp 287 May 25 10:52 load.sh +-rw------- 1 omm dbgrp 2199 Apr 24 15:16 nation.tbl +-rw------- 1 omm dbgrp 170452161 Apr 24 15:16 orders.tbl +-rw------- 1 omm dbgrp 10553197 Apr 24 15:11 out0 +-rw------- 1 omm dbgrp 118184616 Apr 24 15:10 partsupp.tbl +-rw------- 1 omm dbgrp 23935125 Apr 24 15:11 part.tbl +drwx------ 3 omm dbgrp 4096 Apr 24 15:39 queries +-rw------- 1 omm dbgrp 384 Apr 24 15:07 region.tbl +-rw------- 1 omm dbgrp 1399184 Apr 24 15:07 supplier.tbl +``` + +## 1. 创建数据库并导入数据 + +``` +-- 创建数据库tpch +[omm@lab01 ~]$ gsql -d postgres -p 26000 -c "create database tpch with encoding='UTF-8';" +-- 创建测试表 +[omm@lab01 ~]$ gsql -d tpch -p 26000 -f ~/tpch-kit-back/dss.ddl +-- 加载测试数据并统计分析 +[omm@lab01 ~]$ vi load.sh +--------------------------------------- +for i in `ls *.tbl`; do + table=${i/.tbl/} + echo "Loading $table..." + sed 's/|$//' $i > /tmp/$i + gsql -d tpch -p 26000 -c "TRUNCATE $table" + gsql -d tpch -p 26000 -c "\\copy $table FROM '/home/omm/tpch-kit-back/$i' CSV DELIMITER '|'" + gsql -d tpch -p 26000 -c "ANALYZE $table" +done +--------------------------------------- +sh load.sh +``` + +## 2. 执行第一次查询测试\(耗时:106s\) + +``` +[omm@lab01 ~]$ time gsql -d tpch -p 26000 -f /home/omm/tpch-kit-back/queries/queries.sql -o out0 +total time: 105949 ms +real 1m46.063s +user 0m0.707s +sys 0m0.026s +``` + +## 3. 索引信息查询\(当前没有任何索引\) + +``` +[omm@lab01 ~]$ gsql -d tpch -p 26000 -r +tpch=# \d + List of relations + Schema | Name | Type | Owner | Storage +--------+----------+-------+-------+---------------------------------- + public | customer | table | omm | {orientation=row,compression=no} + public | lineitem | table | omm | {orientation=row,compression=no} + public | nation | table | omm | {orientation=row,compression=no} + public | orders | table | omm | {orientation=row,compression=no} + public | part | table | omm | {orientation=row,compression=no} + public | partsupp | table | omm | {orientation=row,compression=no} + public | region | table | omm | {orientation=row,compression=no} + public | supplier | table | omm | {orientation=row,compression=no} +(8 rows) +tpch=# \di +No relations found. +tpch=# select * from pg_indexes where schemaname='public'; + schemaname | tablename | indexname | tablespace | indexdef +------------+-----------+-----------+------------+---------- +(0 rows) +``` + +## 4. 单条SQL查询索引推荐 + +``` +-- 未添加索引的查询效率(约4.9s) +[omm@lab01 ~]$ time gsql -d tpch -p 26000 -c "select * from lineitem where l_orderkey < 100 and l_suppkey > 50;" +real 0m4.916s +user 0m0.014s +sys 0m0.001s + +-- 使用索引推荐函数(gs_index_advise)获取优化建议 +tpch=# select *from gs_index_advise('select * from lineitem where l_orderkey < 100 and l_suppkey > 50;'); + table | column +----------+-------------- + lineitem | (l_orderkey) + +-- 创建索引 +tpch=# create index idx1 on lineitem(l_orderkey); + +-- 查看优化结果(约2.3s) +[omm@lab01 ~]$ time gsql -d tpch -p 26000 -c "select * from lineitem where l_orderkey < 100 and l_suppkey > 50;" +real 0m2.337s +user 0m0.009s +sys 0m0.007s +``` + +## 5. Workload级别索引推荐\(针对一批SQL语句的索引推荐\) + +``` +-- 获取推荐索引 +[omm@lab01 ~]$ cd /gauss/app/bin/dbmind/index_advisor/ +[omm@lab01 index_advisor]$ python3 ./index_advisor_workload.py 26000 tpch ~/queries/queries.sql -- 端口:26000 数据库:tpch +####################################### Generate candidate indexes ####################################### +table: lineitem columns: l_returnflag,l_linestatus +table: part columns: p_partkey,p_size +table: supplier columns: s_suppkey,s_nationkey +table: partsupp columns: ps_partkey,ps_suppkey +table: nation columns: n_nationkey,n_regionkey +table: orders columns: o_orderkey,o_custkey +table: customer columns: c_custkey,c_nationkey +table: orders columns: o_custkey,o_orderkey +table: lineitem columns: l_orderkey,l_suppkey +table: customer columns: c_custkey +table: part columns: p_partkey,p_type +table: supplier columns: s_suppkey +table: lineitem columns: l_suppkey,l_partkey,l_orderkey +table: part columns: p_partkey +table: lineitem columns: l_orderkey,l_partkey,l_suppkey +table: orders columns: o_orderkey +table: partsupp columns: ps_suppkey +table: lineitem columns: l_shipdate,l_receiptdate,l_commitdate,l_orderkey +table: lineitem columns: l_partkey +######################################## Determine optimal indexes ######################################## +create index ind0 on lineitem(l_shipdate,l_receiptdate,l_commitdate,l_orderkey); +create index ind1 on lineitem(l_returnflag,l_linestatus); +create index ind2 on lineitem(l_suppkey,l_partkey,l_orderkey); +create index ind3 on orders(o_orderkey,o_custkey); +create index ind4 on partsupp(ps_partkey,ps_suppkey); +create index ind5 on part(p_partkey,p_size); +create index ind6 on part(p_partkey,p_type); +create index ind7 on customer(c_custkey,c_nationkey); +create index ind8 on supplier(s_suppkey,s_nationkey); +create index ind9 on nation(n_nationkey,n_regionkey); + +-- 创建推荐的索引 +[omm@lab01 ~]$ gsql -d tpch -p 26000 -r +tpch=# create index ind0 on lineitem(l_shipdate,l_receiptdate,l_commitdate,l_orderkey); +tpch=# create index ind1 on lineitem(l_returnflag,l_linestatus); +tpch=# create index ind2 on lineitem(l_suppkey,l_partkey,l_orderkey); +tpch=# create index ind3 on orders(o_orderkey,o_custkey); +tpch=# create index ind4 on partsupp(ps_partkey,ps_suppkey); +tpch=# create index ind5 on part(p_partkey,p_size); +tpch=# create index ind6 on part(p_partkey,p_type); +tpch=# create index ind7 on customer(c_custkey,c_nationkey); +tpch=# create index ind8 on supplier(s_suppkey,s_nationkey); +tpch=# create index ind9 on nation(n_nationkey,n_regionkey); + +-- 测试查询脚本时间(耗时:77s,SQL查询相比之前快了29s) +[omm@lab01 ~]$ time gsql -d tpch -p 26000 -f /home/omm/tpch-kit-back/queries/queries.sql -o out0 +total time: 77200 ms +real 1m17.233s +user 0m0.665s +sys 0m0.020s +``` + diff --git "a/content/zh/post/jiajunfeng/\345\210\235\347\252\245openGauss-\344\271\213\345\217\202\346\225\260\350\207\252\350\260\203\344\274\230X-Tuner.md" "b/content/zh/post/jiajunfeng/\345\210\235\347\252\245openGauss-\344\271\213\345\217\202\346\225\260\350\207\252\350\260\203\344\274\230X-Tuner.md" new file mode 100644 index 0000000000000000000000000000000000000000..18e1f5f55b91bf737c673cec37b06ceb9af1f729 --- /dev/null +++ "b/content/zh/post/jiajunfeng/\345\210\235\347\252\245openGauss-\344\271\213\345\217\202\346\225\260\350\207\252\350\260\203\344\274\230X-Tuner.md" @@ -0,0 +1,311 @@ ++++ + +date = "2021-06-04" + +tags = ["openGauss核心技术"] + +archives = "2021-06" + +author = "贾军锋" + +summary = "初窥openGauss 之参数自调优X-Tuner)" + +img = "/zh/post/jiajunfeng/title/img33.png" + +times = "12:30" + ++++ + + + +# **初窥openGauss 之参数自调优X-Tuner\** + +TPC-H 是一个面向分析型业务\(AP\)的基准测试,它由一系列热点查询组成,这些热点查询都是高度复杂的,因此执行时间往往都比较长。 在本次实验测试中,将手动向数据库加载TPC-H数据,并保存在名为 tpch 的数据库中。默认TPC-H数据库的表缺少索引,数据库的参数并没有做任何优化,因此执行效率会比较差。 本实验比较浅显,使用openGauss的参数自调优\(X-Tuner:gs\_xtuner\)功能,对数据库进行参数优化,以提升数据库运行性能,让大家对X-Tuner参数自调优有一个初步的了解。 + +## **环境信息** + +OS:CentOS Linux release 7.6.1810 + +openGauss:2.0.0 + +CPU:1core + +Memory:4GB + +测试数据脚本清单如下: + +``` +[omm@lab01 ~]$ ls -l ~/tpch-kit-back/ +total 1076780 +-rw------- 1 omm dbgrp 24196144 Apr 24 15:39 customer.tbl +-rw------- 1 omm dbgrp 3814 Apr 24 15:39 dss.ddl +-rw------- 1 omm dbgrp 753862072 Apr 24 15:39 lineitem.tbl +-rw------- 1 omm dbgrp 287 May 25 10:52 load.sh +-rw------- 1 omm dbgrp 2199 Apr 24 15:16 nation.tbl +-rw------- 1 omm dbgrp 170452161 Apr 24 15:16 orders.tbl +-rw------- 1 omm dbgrp 10553197 Apr 24 15:11 out0 +-rw------- 1 omm dbgrp 118184616 Apr 24 15:10 partsupp.tbl +-rw------- 1 omm dbgrp 23935125 Apr 24 15:11 part.tbl +drwx------ 3 omm dbgrp 4096 Apr 24 15:39 queries +-rw------- 1 omm dbgrp 384 Apr 24 15:07 region.tbl +-rw------- 1 omm dbgrp 1399184 Apr 24 15:07 supplier.tbl +``` + +## 1. 配置pip,并安装setuptools-rust模块 + +``` +[root@lab01 ~]# wget https://bootstrap.pypa.io/get-pip.py +[root@lab01 ~]# python3 get-pip.py +[root@lab01 ~]# pip -V pip 21.1.2 from /usr/local/lib/python3.6/site-packages/ +pip (python 3.6) +[root@lab01 ~]# pip install setuptools-rust +``` + +## 2. 安装依赖包 + +``` +[omm@lab01 xtuner]$ pip install joblib +[omm@lab01 xtuner]$ pip install threadpoolctl +``` + +## 3. 创建数据库并导入数据 + +``` +-- 创建数据库tpch +[omm@lab01 ~]$ gsql -d postgres -p 26000 -c "create database tpch with encoding='UTF-8';" +-- 创建测试表 + [omm@lab01 ~]$ gsql -d tpch -p 26000 -f ~/tpch-kit-back/dss.ddl + -- 加载测试数据并统计分析 +[omm@lab01 ~]$ vi load.sh +--------------------------------------- +for i in `ls *.tbl`; do +table=${i/.tbl/} + echo "Loading $table..." + sed 's/|$//' $i > /tmp/$i + gsql -d tpch -p 26000 -c "TRUNCATE $table" + gsql -d tpch -p 26000 -c "\\copy $table FROM '/home/omm/tpch-kit-back/$i' CSV DELIMITER '|'" + gsql -d tpch -p 26000 -c "ANALYZE $table" +done + --------------------------------------- +sh load.sh +``` + +## 4. 编辑requirements.txt文件 + +``` +[omm@lab01 ~]$ cd /gauss/app/bin/dbmind/xtuner/ +[omm@lab01 xtuner]$ vi requirements.txt +--------------------------------------- +删除: + tensorflow>=2.2.0 + keras-rl2 + --------------------------------------- +``` + +## 5. 生成gs\_xtuner参数调优工具\(需要连接外网\) + +``` +[omm@lab01 ~]$ cd /gauss/app/bin/dbmind/xtuner +[omm@lab01 xtuner]$ python3 setup.py install --user +``` + +## 6. 执行快速推荐命令(基于已经作业执行的信息进行推荐,信息来源pg\_stat\_database等) + +``` +[omm@lab01 xtuner]$ gs_xtuner recommend --db-name tpch --db-user omm --host 192.168.0.99 --host-user omm --port 26000 +Please input the password of database: +Please input the password of host: +Start to recommend knobs. Just a moment, please. +************************************* Knob Recommendation Report **************************************** +INFO: ++---------------------------------------+----------------------+ +| Metric | Value | + +---------------------------------------+----------------------+ +| workload_type | ap | +| dirty_background_bytes | 0 | +| current_locks_count | 0.0 | +| current_prepared_xacts_count | 0.0 | +| rollback_commit_ratio | 0.0 | +| average_connection_age | 0.004575 | +| checkpoint_proactive_triggering_ratio | 0.00863557858376511 | +| fetched_returned_ratio | 0.055316264644388206 | +| cache_hit_rate | 0.5028061903026831 | +| os_cpu_count | 1 | +| current_connections | 1.0 | +| checkpoint_avg_sync_time | 1.07037996545769 | +| write_tup_speed | 101.161719229361 | +| used_mem | 131846656.0 | +| all_database_size | 2292057.41015625 | +| shared_buffer_heap_hit_rate | 25.917067253117217 | +| current_free_mem | 3270760 | +| temp_file_size | 3573.07285767967 | +| uptime | 38.3688171772222 | +| os_mem_total | 3879956 | +| checkpoint_dirty_writing_time_window | 450.0 | +| read_write_ratio | 47.82294541597867 | +| read_tup_speed | 4837.86775193848 | +| max_processes | 503 | +| track_activity_size | 503.0 | +| search_modify_ratio | 658741.9884425476 | +| ap_index | 7.5 | +| shared_buffer_toast_hit_rate | 76.6304347826087 | +| block_size | 8.0 | +| shared_buffer_tidx_hit_rate | 82.7893175074184 | +| shared_buffer_idx_hit_rate | 97.6601060219748 | +| enable_autovacuum | True | +| is_64bit | True | +| is_hdd | True | +| load_average | [1.19, 0.82, 0.8] | ++---------------------------------------+----------------------+ +p.s: The unit of storage is kB. +WARN: +[0]. + The number of CPU cores is a little small. Please do not run too high concurrency. + You are recommended to set max_connections based on the number of CPU cores. + If your job does not consume much CPU, you can also increase it. +[1]. + The value of wal_buffers is a bit high. Generally, an excessively large value does not bring better performance. + You can also set this parameter to -1. + The database automatically performs adaptation. +*********************************** Recommended Knob Settings ********************************************** ++---------------------------+-----------+--------+---------+---------+ +| name | recommend | min | max | restart | ++---------------------------+-----------+--------+---------+---------+ +| shared_buffers | 121256 | 72752 | 139448 | True | +| max_connections | 134 | 15 | 269 | True | +| effective_cache_size | 2909967 | 121256 | 2909967 | False | +| wal_buffers | 3789 | 2048 | 3789 | True | +| random_page_cost | 3.0 | 2.0 | 3.0 | False | +| default_statistics_target | 1000 | 100 | 1000 | False | ++---------------------------+-----------+--------+---------+---------+ +注意:修改该推荐值之前,请确保硬件条件满足,否则可能会造成数据库无法启动的问题。 +``` + +## 7. \[可选\]迭代推荐命令(全局搜索算法,迭代式执行,每轮执行约2分钟) + +- 1\> 修改配置文件 + +``` +vi /home/omm/.local/lib/python3.6/site-packages/openGauss_xtuner-2.0.0-py3.6.egg/tuner/xtuner.conf + ------------------------------------------------- +修改如下行: + max_iterations = 3 (从100轮改为3) + benchmark_path = /home/omm/queries +------------------------------------------------ +``` + +- 2\> 执行命令,观察Reward数值变化,粉色输出的轮次为当前较优数值 + +``` +[omm@lab01 ~]$ time gs_xtuner tune --db-name tpch --db-user omm --host localhost --host-user omm --port 26000 +Please input the password of database: +Please input the password of host: +Start to recommend knobs. Just a moment, please. +WARN: The database may restart several times during tuning, continue or not [yes|no]:yes +2021-05-26 11:09:12,710: Recorder is starting. +| iter | target | random... | +------------------------------------- +2021-05-26 11:10:58,017: [0] Current reward is -102.935543, knobs: {'random_page_cost': '2.64'}. +2021-05-26 11:10:58,018: [0] Best reward is -102.935543, knobs: {'random_page_cost': '2.64'}. +2021-05-26 11:10:58,018: [1] Database metrics: [0.6400000000000001, 0.6007798155874045, 0.65]. +2021-05-26 11:10:58,018: [1] Benchmark score: -102.899098, used mem: 36444544 kB, reward: -102.935543. +| 1 | -102.9 | 0.6426 | +2021-05-26 11:12:30,939: [1] Current reward is -91.541441, knobs: {'random_page_cost': '2'}. +2021-05-26 11:12:30,941: [1] Best reward is -91.541441, knobs: {'random_page_cost': '2'}. +2021-05-26 11:12:30,941: [2] Database metrics: [0.0, 0.6107552017890537, 2.6]. +2021-05-26 11:12:30,942: [2] Benchmark score: -91.504996, used mem: 36444544 kB, reward: -91.541441. +| 2 | -91.54 | 0.003251 | +2021-05-26 11:13:38,617: [2] Current reward is -66.684871, knobs: {'random_page_cost': '2.46'}. +2021-05-26 11:13:38,618: [2] Best reward is -66.684871, knobs: {'random_page_cost': '2.46'}. +2021-05-26 11:13:38,618: [3] Database metrics: [0.45999999999999996, 0.621014394376401, 3.47]. +2021-05-26 11:13:38,618: [3] Benchmark score: -66.648426, used mem: 36444544 kB, reward: -66.684871. +| 3 | -66.68 | 0.4565 | +2021-05-26 11:14:53,250: [3] Current reward is -73.748742, knobs: {'random_page_cost': '2.9'}. +2021-05-26 11:14:53,252: [3] Best reward is -66.684871, knobs: {'random_page_cost': '2.46'}. +2021-05-26 11:14:53,252: [4] Database metrics: [0.8999999999999999, 0.6286889335789447, 3.65]. +2021-05-26 11:14:53,252: [4] Benchmark score: -73.712297, used mem: 36444544 kB, reward: -73.748742. +| 4 | -73.75 | 0.9016 | +2021-05-26 11:15:58,798: [4] Current reward is -64.467620, knobs: {'random_page_cost': '2.45'}. +2021-05-26 11:15:58,799: [4] Best reward is -64.467620, knobs: {'random_page_cost': '2.45'}. +2021-05-26 11:15:58,799: [5] Database metrics: [0.4500000000000002, 0.633784310797396, 3.45]. +2021-05-26 11:15:58,799: [5] Benchmark score: -64.431175, used mem: 36444544 kB, reward: -64.467620. +| 5 | -64.47 | 0.4544 | +2021-05-26 11:16:59,097: [5] Current reward is -59.161970, knobs: {'random_page_cost': '2.43'}. +2021-05-26 11:16:59,099: [5] Best reward is -59.161970, knobs: {'random_page_cost': '2.43'}. +2021-05-26 11:16:59,099: [6] Database metrics: [0.43000000000000016, 0.6393591990442545, 3.91]. +2021-05-26 11:16:59,099: [6] Benchmark score: -59.125525, used mem: 36444544 kB, reward: -59.161970. +| 6 | -59.16 | 0.4304 | +2021-05-26 11:18:08,157: [6] Current reward is -67.964937, knobs: {'random_page_cost': '2.39'}. +2021-05-26 11:18:08,158: [6] Best reward is -59.161970, knobs: {'random_page_cost': '2.43'}. +2021-05-26 11:18:08,158: [7] Database metrics: [0.3900000000000001, 0.6445245622485726, 4.05]. +2021-05-26 11:18:08,158: [7] Benchmark score: -67.928493, used mem: 36444544 kB, reward: -67.964937. +| 7 | -67.96 | 0.3854 | +2021-05-26 11:19:11,917: [7] Current reward is -62.842104, knobs: {'random_page_cost': '2.43'}. +2021-05-26 11:19:11,918: [7] Best reward is -59.161970, knobs: {'random_page_cost': '2.43'}. +2021-05-26 11:19:11,918: [8] Database metrics: [0.43000000000000016, 0.6489102035318035, 3.5]. +2021-05-26 11:19:11,918: [8] Benchmark score: -62.805659, used mem: 36444544 kB, reward: -62.842104. + | 8 | -62.84 | 0.4301 | +===================================== +2021-05-26 11:19:11,926: The tuning process is complete. The best reward is -59.161970, best knobs are: +{'random_page_cost': '2.43'}. + ****************************************** Knob Recommendation Report ************************************* +INFO: ++---------------------------------------+-----------------------+ +| Metric | Value | ++---------------------------------------+-----------------------+ +| workload_type | ap | +| dirty_background_bytes | 0 | +| current_locks_count | 0.0 | +| current_prepared_xacts_count | 0.0 | +| rollback_commit_ratio | 0.0002477694554770677 | +| average_connection_age | 0.004734 | +| checkpoint_proactive_triggering_ratio | 0.00938967136150235 | +| fetched_returned_ratio | 0.09276922373936373 | +| uptime | 0.224322521666667 | +| cache_hit_rate | 0.6006356117493342 | +| os_cpu_count | 1 | +| current_connections | 1.0 | +| checkpoint_avg_sync_time | 1.06359368331199 | +| search_modify_ratio | 1007080.6984163317 | +| max_processes | 137 | +| track_activity_size | 137.0 | +| all_database_size | 2292057.41015625 | +| temp_file_size | 2694.18229367111 | +| current_free_mem | 3298680 | +| shared_buffer_heap_hit_rate | 36.42339765350299 | +| used_mem | 36444544.0 | +| os_mem_total | 3879956 | +| checkpoint_dirty_writing_time_window | 450.0 | +| ap_index | 7.5 | +| shared_buffer_toast_hit_rate | 74.11273486430062 | +| read_tup_speed | 7942.47638202933 | +| block_size | 8.0 | +| read_write_ratio | 80.26596656844558 | +| shared_buffer_tidx_hit_rate | 84.41330998248687 | +| shared_buffer_idx_hit_rate | 96.54182833084825 | +| write_tup_speed | 98.9516516216125 | +| enable_autovacuum | True | +| is_64bit | True | +| is_hdd | True | +| load_average | [0.62, 1.08, 1.13] | + +---------------------------------------+-----------------------+ +p.s: The unit of storage is kB. +WARN: +[0]. The number of CPU cores is a little small. Please do not run too high concurrency. You are recommended to set max_connections based on the number of CPU cores. If your job does not consume much CPU, you can also increase it. +BAD: +[0]. The value of wal_buffers is too high. Generally, a large value does not bring better performance. ********************************************* Recommended Knob Settings ******************************************** ++---------------------------+-----------+--------+---------+---------+ +| name | recommend | min | max | restart | ++---------------------------+-----------+--------+---------+---------+ +| random_page_cost | 2.43 | 2.0 | 3.0 | False | +| shared_buffers | 121256 | 72752 | 139448 | True | +| max_connections | 134 | 15 | 269 | True | +| effective_cache_size | 2909967 | 121256 | 2909967 | False | +| wal_buffers | 3789 | 1894 | 3789 | True | +| default_statistics_target | 1000 | 100 | 1000 | False | ++---------------------------+-----------+--------+---------+---------+ +real 10m12.961s user 0m6.827s sys 0m1.076s +``` + +![](figures/20210526-2330b2f9-8cb1-4739-8c62-ee6ed6621ba4.png) + diff --git "a/content/zh/post/jiajunfeng/\346\265\205\350\201\212openGauss\344\275\223\347\263\273\346\236\266\346\236\204.md" "b/content/zh/post/jiajunfeng/\346\265\205\350\201\212openGauss\344\275\223\347\263\273\346\236\266\346\236\204.md" new file mode 100644 index 0000000000000000000000000000000000000000..18f22040cad523e6bd5167fe501cc8cae5758837 --- /dev/null +++ "b/content/zh/post/jiajunfeng/\346\265\205\350\201\212openGauss\344\275\223\347\263\273\346\236\266\346\236\204.md" @@ -0,0 +1,292 @@ ++++ + +title = "浅聊openGauss体系架构" + +date = "2021-04-19" + +tags = ["openGauss安装部署"] + +archives = "2021-06" + +author = "贾军锋" + +summary = "浅聊openGauss体系架构" + +img = "/zh/post/jiajunfeng/title/img33.png" + +times = "15:30" + ++++ + +# **浅聊openGauss体系架构** + +2020年7月openGauss刚刚开源,我便开始对openGauss数据库的学习。根据以往学习数据库的经验,最先想了解的是openGauss数据库的架构,希望对即将使用的数据库各个模块有所了解。但鉴于时间有限,仅有的资料图是源码doc目录内的“openGauss逻辑结构图”,便针对该图做了简单介绍,并形成文档[《浅聊openGauss逻辑架构》](https://www.modb.pro/db/41842),感兴趣的小伙伴可以参考。 + +虽然已发表关于openGauss逻辑架构介绍的文章供大家参考,但总感觉缺少点什么\(想念学习Oracle时的那张体系架构图\)。今年初准备培训资料时参考相关资料绘制了一份简易的openGauss体系架构图,后来因为忙于其他工作,把这个事情忘记了。借着本次墨天轮举办的“我的国产数据库之路”,使我重新想起了这件事情,希望将这张图和相关介绍分享出来供大家参考。 + +**说明:** 本文内容仅代表个人观点。 + +![](figures/20210616-ec5e1c95-e663-4973-9626-e1d4316db95b.png) + +## **一、首先了解一下架构图中的Instance部分** + +学习过Oracle等主流数据库的小伙伴都清楚,Instance部分其实主要指的是数据库运行时的内存部分。 openGauss属于单进程多线程模型的数据库,客户端可以使用JDBC/ODBC/Libpq/Psycopg等驱动程序,向openGauss的后端管理线程GaussMaster发起连接请求。 + +**补充知识点:** + +- **JDBC** + + JDBC\(Java Database Connectivity,Java数据库连接\)是一种用于执行SQL语句的Java API,可以为多种关系数据库提供统一访问接口,应用程序可基于它操作数据。openGauss库提供了对JDBC 4.0特性的支持,需要使用JDK1.8版本编译程序代码,不支持JDBC桥接ODBC方式。 + +- **ODBC** + + ODBC\(Open Database Connectivity,开放数据库互连\)是由Microsoft公司基于X/OPEN CLI提出的用于访问数据库的应用程序编程接口。应用程序通过ODBC提供的API与数据库进行交互,增强了应用程序的可移植性、扩展性和可维护性。openGauss目前提供对ODBC 3.5的支持。但需要注意的是,当前数据库ODBC驱动基于开源版本,对于tinyint、smalldatetime、nvarchar2类型,在获取数据类型的时候,可能会出现不兼容。 + +- **Libpq** + + Libpq是openGauss的C语言程序接口。 客户端应用程序可以通过Libpq向openGauss后端服务进程发送查询请求并且获得返回的结果。需要注意的是,在官方文档中提到,openGauss没有对这个接口在应用程序开发场景下的使用做验证,不推荐用户使用这个接口做应用程序开发,建议用户使用ODBC或JDBC接口来替代。 + +- **Psycopg** + + Psycopg可以为openGauss数据库提供统一的Python访问接口,用于执行SQL语句。openGauss数据库支持Psycopg2特性,Psycopg2是对libpq的封装,主要使用C语言实现,既高效又安全。它具有客户端游标和服务器端游标、异步通信和通知、支持“COPY TO/COPY FROM”功能。支持多种类型Python开箱即用,适配PostgreSQL数据类型;通过灵活的对象适配系统,可以扩展和定制适配。Psycopg2兼容Unicode和Python 3。 + + +当 **GaussMaster** 线程接收到客户端程序发送过来的服务请求后,会根据收到的信息会立即fork\(\)一个子线程,这个子线程对请求进行身份验证成功后成为对应的后端业务处理子线程\( **gaussdb** \)。之后该客户端发送的请求将由此业务处理子线程\(gaussdb\)负责处理。当业务处理子线程\(gaussdb\)接收到客户端发送过来的查询\(SQL\)后,会调用openGauss的SQL引擎对SQL语句进行词法解析、语法解析、语义解析、查询重写等处理操作,然后使用查询优化器生成最小代价的查询路径计划。之后,SQL执行器会按照已制定的最优执行计划对SQL语句进行执行,并将执行结果反馈给客户端。 + +在SQL执行器的执行过程中通常会先访问内存的共享缓冲区\(如:shared buffer、cstore buffer、MOT等\),内存共享缓冲区缓存数据库常被访问的索引、表数据、执行计划等内容, 共享缓冲区的高速RAM硬件,为SQL的执行提供了高效的运行环境,大幅减少了磁盘IO,极大地提升了数据库性能,是数据库非常重要的组件之一。 + +**如图所示:** + +- **shared buffer** 是行存引擎默认使用的缓冲区,openGauss的行存引擎是将表按行存储到硬盘分区上,采用MVCC多版本并发控制,事务之间读写互不冲突,有着很好的并发性能,适合于OLTP场景。 +- **cstore buffers** 是列存引擎默认使用的缓冲区,列存引擎将整个表按照不同列划分为若干个CU\(Compression Unit,压缩单元\),以CU为单位进行管理,适合于OLAP场景。 +- **MOT** 是内存引擎默认使用的缓冲区,openGauss的MOT内存引擎的索引结构以及整体的数据组织都是基于Masstree模型实现的,其乐观并发控制和高效的缓存块利用率使得openGauss可以充分发挥内存的性能,同时,在确保高性能的前提下,内存引擎有着与openGauss原有机制相兼容的并行持久化和检查点能力\(CALC逻辑一致性异步检查点\),确保数据的永久存储,适合于高吞吐低时延的业务处理场景。 + +SQL执行器在共享缓冲区中对数据页的操作会被记录到 **WAL buffer** 中,当客户端发起事务的commit请求时,WAL buffer的内容将被WalWriter线程刷新到磁盘并保存在WAL日志文件中,确保那些已提交的事务都被永久记录,不会丢失。 但需要注意的是,当walwriter的写操作跟不上时数据库实际的需求时,常规后端线程仍然有权进行WAL日志的刷盘动作。这意味着WALWriter不是一个必要的进程,可以在请求时快速关闭。 + +- **maintenance\_work\_mem** 一般是在openGauss执行维护性操作时使用,如:VACUUM、CREATE INDEX、ALTER TABLE ADD FOREIGN KEY等操作,maintenance\_work\_mem内存区域的大小决定了维护操作的执行效率。 +- **temp\_buffer** 是每个数据库会话使用的LOCAL临时缓冲区,主要缓存会话所访问的临时表数据。需要注意的是,openGauss支持全局临时表和会话级临时表,全局临时表的表定义是全局的,而临时表的数据是各个会话私有的。 +- **work\_mem** 是事务执行内部排序或Hash表写入临时文件之前使用的内存缓冲区。 + +## **二、接下来我们再了解一下openGauss的后台辅助线程** + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

线程名称

+

描述

+

jemalloc_bg_thd

+

管理并实现内存的动态分配

+

StatCollector

+

负责统计openGauss数据库的信息,包括:物理硬件资源使用信息、对象属性及使用信息、SQL运行信息、会话信息、锁信息、线程信息等,并且将这些收集到的统计信息保存在pgstat.stat文件中

+

Auditor

+

使用重定向的方式从管理线程、后台线程以及其他子线程获取审计数据,并保存在审计文件中

+

LWLockMonitor

+

负责检测轻量级锁(LWLock)产生的死锁,轻量级锁主要提供对共享内存的互斥访问控制,比如Clog buffer(事务提交状态缓存)、Shared buffers(数据页缓存)、Substran buffer(子事务缓存)等

+

sysLogger

+

使用重定向的方式捕获管理线程、后台线程以及其他子线程的stderr输出,并写入日志文件中

+

Jobworker

+

JOB线程分为调度线程和工作线程。调度线程(JobScheduler)会根据pg_job表里面定义的JOB周期,对已经过期的JOB进行调用,由工作线程(Jobworker)执行实际的JOB任务

+

percentworker

+

根据percentile参数设置的值计算sql响应时间的百分比信息,目前percentile参数仅支持80和95

+

snapshotworker

+

收集snapshot信息,openGauss数据库的WDR报告依赖于snapshot

+

ashworker

+

统计历史活动会话相关信息

+

alarm

+

openGauss的告警检测线程

+

清理线程(AutoVacLauncher+AutoVacWorker)

+

AutoVacLauncher线程由Postmaster线程启动,它不断地将数据库需要做vacuum的对象信息保存在共享内存中,当表上被删除或更新的记录数超过设定的阈值时,会调用AutoVacWorker线程对表的存储空间执行回收清理工作

+

WalSender

+

运行在openGauss主备环境中主节点,发送预写日志给备节点

+

WalReceiver

+

运行在openGauss主备环境中备节点,接收预写日志记录

+

pagewriter

+

负责将脏页数据拷贝至双写(double-writer)区域并落盘,然后将脏页转发给bgwriter子线程进行数据下盘操作,如果发生数据页"折断"的问题,就会从双写空间里找到完整的数据页进行恢复

+

bgwriter

+

负责对共享缓冲区的脏页数据持续的进行刷盘操作,目的是让数据库线程在进行用户查询时可以很少或者几乎不等待写动作的发生,这样的机制同样也减少了检查点造成的性能下降

+

Checkpointer

+

周期性的发起数据库检查点,在这个检查点时刻,所有的数据文件都被更新,脏数据页也被刷新到磁盘,此刻数据库是一致的。openGauss支持全量检查点和增量检查点,增量检查点打开后会小批量的分阶段的滚筒式的去进行脏页刷盘

+
+ +关于其他后台辅助线程的介绍,可以参考文章[《浅聊openGauss逻辑架构》](https://www.modb.pro/db/41842)。 + +## **三、Database相关文件** + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

目录名称

+

描述

+

base

+

openGauss数据库对象默认存储在该目录,如默认的数据库postgres、用户创建的数据库及关联的表等对象

+

global

+

存储openGauss共享的系统表或者说是共享的数据字典表

+

pg_tblspc

+

即是openGauss的表空间目录,里面存储openGauss定义的表空间的目录软链接,这些软链接指向openGauss数据库表空间文件的实际存储目录

+

pg_xlog

+

存储openGauss数据库的WAL日志文件

+

pg_clog

+

存储openGauss数据库事务提交状态信息

+

pg_csnlog

+

存储openGauss数据库的快照信息,openGauss事务启动时会创建一个CSN快照,在MVCC机制下,CSN作为openGauss的逻辑时间戳,模拟数据库内部的时序,用来判断其他事务对于当前事务是否可见

+

pg_twophase

+

存储两阶段事务提交信息,用来确保数据一致性

+

pg_serial

+

存储已提交的可序列化事务信息

+

pg_multixact

+

存储多事务状态信息,一般用于共享行级锁(shared row locks)

+
+ +## **四、openGauss配置相关文件** + + + + + + + + + + + + + + + + + + + +

文件名称

+

描述

+

postgresql.conf

+

openGauss的配置文件,在gaussmaster线程启动时会读取该文件,获取监听地址、服务端口、内存分配、功能设置等配置信息,并且根据该文件,在openGauss启动时创建共享内存和信号量池等

+

pg_hba.conf

+

基于主机的接入认证配置文件,主要保存鉴权信息(如:允许访问的数据库、用户、IP段、加密方式等)

+

pg_ident.conf

+

客户端认证的配置文件,主要保存用户映射信息,将主机操作系统的用户与openGauss数据库用户做映射

+

gaussdb.state

+

主要保存数据库当前的状态信息(如:主备HA的角色、rebuild进度及原因、sync状态、LSN信息等)

+
+ +## **五、openGauss其他重要文件** + + + + + + + + + + + + + + + + + + + +

目录名称

+

描述

+

Archived WAL

+

openGauss数据库WAL日志的归档目录,保存openGauss的历史WAL日志

+

pg_audit

+

存储openGauss数据库的审计日志文件

+

pg_replslot

+

存储openGauss数据库的复制事务槽数据

+

pg_llog

+

保存逻辑复制时的状态数据

+
+ +关于openGauss体系架构就为大家介绍到这里,鉴于笔者并非openGauss内核开发人员,这里仅从DBA的角度粗浅的对openGauss数据库体系架构为大家做一个简单介绍,若文章有描述错误之处,欢迎指正。将及时修正以免误导大家。 + +## **文末寄语:** + +openGauss开源至今已将近1年,合作伙伴和广大数据库爱好者的努力使得openGauss数据库得以快速健康地向前发展,openGauss目前已发布至 2.0.0 版本,按照发布计划应该会在6月31日发布新的版本,欢迎各位小伙伴关注。 在个人的工作中,偶尔有小伙伴反馈openGauss有些不足之处使其对国产数据库失去了信心,为国产数据库的发展前景画了一个问号。这里我想阐述一下个人观点: Oracle起源于1977年,MySQL起源于1979年,SQL Server1987年,那么我们国产数据库呢? 根据墨天轮发布的2021年6月份[《国产数据库流行度排行榜》](https://www.modb.pro/dbRank),前三甲国产数据库中,2016年12月TiDB RC1发布,2011年OceanBase 0.1发布,2017年PolarDB发布。而国产数据库真正的发力时间点个人认为应该是在2020年,从2020年至今,国产数据库可谓是百家争鸣,无论是开源建设、技术发展还是生态建设都处于突飞猛进的状态。从时间上看,国产数据库真正的发展时间比国外主流数据库晚将近40载,在一个良好的数据库技术生态环境下,经历40载的光阴打磨,形成一款优秀的数据库软件是理所当然。所以,综合起来看,以Oracle为代表的数据库无论是生态建设还是技术成熟度都比国产数据库有一定的先天优势。 但当今的国产数据库发展现状已经不可同日而语,IT从业人员基本也都意识到核心IT技术国产化的重要性。无论是传统数据库厂商、互联网厂商还是其他IT厂商等等诸多厂商都在做属于自己的数据库产品,仅墨天轮《国产数据库流行度排行榜》所统计的数据库就多达130+种,而且这些数据库的技术起点已经处于一个较高且成熟的水平。在互联网、金融、政府、能源、电信等诸多核心行业中都在不断地尝试国产数据库在核心业务场景下的使用。 为了打造一个更好的国产数据库技术生态,openGauss、TiDB、OceanBase等优秀的数据库源代码已对外开放,合作伙伴可以根据源代码,发行属于自己的商业版本数据库,促进国产数据库的生态建设。 同时,我国的各大高等院校也增加了大量国产数据库的课程内容,为国产数据库后续的人才建设提供了有力保障,这些是国产数据库的未来。 所以,理性且客观的看,国产数据库和全球主流数据库存在些许差距,但国产数据库的发展环境已经不可同日而语,生态建设、人才发展、技术演进正在紧锣密鼓的向前“奔跑”,对国产数据库未来的发展应该有绝对的信心和自信,国产数据库与全球主流数据库的差距正在迅速缩小,至于什么时候与全球主流数据库比肩甚至超越,我相信只是时间问题,但不会太久。 + +**关于国产数据库,您怎么看? 欢迎留言讨论。** + diff --git a/content/zh/post/jingjingwu/01.getting-started-with-python.md b/content/zh/post/jingjingwu/01.getting-started-with-python.md new file mode 100644 index 0000000000000000000000000000000000000000..940e69b1329e1d62225bcffcc92f561070463e4b --- /dev/null +++ b/content/zh/post/jingjingwu/01.getting-started-with-python.md @@ -0,0 +1,242 @@ ++++ + +title = "OpenGauss数据库之Python驱动快速入门" + +date = "2021-04-04" + +tags = ["openGauss step by step系列"] + +archives = "2021-04" + +author = "吴京京" + +summary = "step by step系列之:openGauss1.0.1 Docker版本单机安装指南" + +img = "/zh/post/jiangdianbin/title/img38.png" + +times = "22:59" + ++++ + +# OpenGauss数据库之Python驱动 + +openGauss是一款开源关系型数据库管理系统,采用木兰宽松许可证v2发行。openGauss内核源自PostgreSQL,深度融合华为在数据库领域多年的经验,结合企业级场景需求,持续构建竞争力特性。 + +可是目前针对于OpenGauss数据库的Python应用程序的开发少之又少,这其中的一个原因在于不知道用什么驱动来连接该数据库,特别是Python应用程序,在此我将给大家介绍如何使用Python驱动连接OpenGauss数据库,同时翻译了psycopg2的文档:[psycopg2中文文档](https://wjmcat.gitee.io/py-opengauss/),目前仍在维护和调整中,如果有任何建议,欢迎提PR来进行修正。 + +## 一、数据库部署 + +教程的第一步当然是引导大家部署数据库了,由于OpenGauss数据库与操作系统有一定的依赖关系,为了屏蔽掉不同操作系统之间部署的区别,我推荐使用[Docker](https://hub.docker.com/r/enmotech/opengauss)来进行部署,部署的脚本如下所示: + +```shell +# 1.拉取镜像 +docker pull enmotech/opengauss + +# 2.开启opengauss数据库服务 +docker run --name opengauss \ + --privileged=true -d \ + -e GS_USERNAME=gaussdb \ + -e GS_PASSWORD=Secretpassword@123 \ + -p 5432:5432 \ + enmotech/opengauss:latest +``` + +在以上代码中,默认数据库用户名为`gaussdb`,数据库密码为`Secretpassword@123`,开启服务的端口为`5432`,相信熟悉Docker的同学一眼就能看明白代码的含义。 + +可是在此部署的镜像当中只存在一个默认数据库:`gaussdb`,如果要添加新的数据库节点的话可以使用以下代码: + +```shell +# 1. 进入运行的Docker容器 +docker exec -it opengauss /bin/bash + +# 2. 设置环境变量 +export GAUSSHOME=/usr/local/opengauss +export PATH=$GAUSSHOME/bin:$GAUSSHOME:$GAUSSHOME/lib:$PATH +export LD_LIBRARY_PATH=$GAUSSHOME/lib:$LD_LIBRARY_PATH +export DATADIR='/var/lib/opengauss/data' + +# 3. 使用gsql登陆opengauss数据库 +gsql -U gaussdb -W Secretpassword@123 -d postgres -p 5432 + +# 4. 创建test_db数据库 +CREATE DATABASE test_db WITH ENCODING 'UTF8' template = template0; + +# 5. 重新加载OpenGauss数据库 +gs_ctl reload -D $DATADIR +``` + +以上命令执行完毕之后即可创建对应的数据库。 + +## 安装教程 + +要想使用Python驱动在OpenGauss数据库上开发应用,非常简单,只需要安装以下包即可: + +```shell +pip install psycopg2-binary +``` + +安装步骤只需要一步即可,接下来就可以开始应用程序的开发。 + +## Simple Operations with OpenGauss + +为了演示与数据库的基本操作,我将从创建会话连接、创建表、插入数据、修改数据、删除数据以及查询数据等六个方面来演示。 + +任何数据库的操作都是需要先创建连接来管理对应的事务,OpenGauss也不例外: + +### 创建会话连接 + +```python +from psycopg2 import connect + + +def create_conn(): + """get connection from envrionment variable by the conn factory + + Returns: + [type]: the psycopg2's connection object + """ + env = os.environ + params = { + 'database': env.get('OG_DATABASE', 'opengauss'), + 'user': env.get('OG_USER', 'gaussdb'), + 'password': env.get('OG_PASSWORD', 'Secretpassword@123'), + 'host': env.get('OG_HOST', '127.0.0.1'), + 'port': env.get('OG_PORT', 5432) + } + conn: connection = connect(**params) + return conn +``` + +以上代码中从环境变量中获取对应配置,从而创建与数据库的会话连接。 + +### 创建表 + +所有的数据操作都是在表上的操作,所以接下来就是需要创建对应的表: + +```python +def create_table(conn): + """check and create table by example + + Args: + table_name (str): the name of the table + corsor (type): the corsor type to get into operation with db + """ + sql = f"""SELECT EXISTS + ( + SELECT 1 + FROM pg_tables + WHERE tablename = '{table_name}' + );""" + with conn: + with conn.cursor() as cursor: + cursor.execute(sql) + result = cursor.fetchone() + if not result[0]: + logger.info(f'creating table<{table_name}>') + sql = f"""CREATE TABLE {table_name} (id serial PRIMARY KEY, name varchar, course varchar, grade integer);""" + result = cursor.execute(sql) + conn.commit() +``` + +以上代码中,首先是检测斗对应的表是否存在,如果不存在的话,便创建对应的表。 + + +### 插入数据 + +```python +def insert_data(conn) -> int: + """insert faker data + + Args: + cnn ([type]): the connection object to the databse + """ + faker = Faker(locale=['zh-cn']) + sql = f"insert into {table_name} (name, course, grade) values (%s,%s,%s) RETURNING *;" + with conn: + with conn.cursor() as cursor: + age = random.randint(20, 80) + result = cursor.execute(sql, (faker.name(), faker.name(), age)) + result = cursor.fetchone() + logger.info(f'add data<{result}> to the databse') + conn.commit() + return result[0] if result else None +``` + +使用SQL语句来插入数据,语法与Mysql等数据库有些不一样,可是大同小异,都是能够看懂。在语句的后面返回当前操作的结果,也就是能够获取插入数据的ID。 + +### 修改数据 + +```python +def update_data(conn, student): + """insert faker data + + Args: + cnn ([type]): the connection object to the databse + """ + faker = Faker(locale=['zh-cn']) + sql = f"update {table_name} name=%s, course=%s, grade=%s where id={student.id};" + with conn: + with conn.cursor() as cursor: + age = random.randint(20, 80) + result = cursor.execute(sql, (faker.name(), faker.name(), age)) + result = cursor.fetchone() + logger.info(f'update data<{result}> to the databse') + conn.commit() +``` + +修改数据只需要使用以上代码的SQL语句即可,相信熟悉SQL的同学一眼就能看懂。 + +接下来就是删除数据了: + +### 删除数据 + +```python +def delete_data_by_id(conn, id: int): + """delete data by primary key + + Args: + conn ([type]): the connection object + id (int): the primary key of the table + """ + sql = f"delete from {table_name} where id = %s;" + with conn: + with conn.cursor() as cursor: + cursor.execute(sql, (id,)) + logger.info(f'delete data from databse by id<{id}>') +``` + +### 获取数据 + +```python +def get_data_by_id(conn, id: int): + """fetch data by id + + Args: + conn ([type]): the connection object + id (int): the primary key of the table + + Returns: + [type]: the tuple data of the table + """ + sql = f"select * from {table_name} where id = %s;" + with conn: + with conn.cursor() as cursor: + cursor.execute(sql, (id,)) + result = cursor.fetchone() + logger.info(f'select data<{result}> from databse') + return result +``` + +在以上代码中,通过SELECT语句筛选出对应的数据,这个接口是非常简单且通用的。 + +在以上代码中有一个规律,所有的操作都是需要在Cursor上进行操作,这是因为每一个原子事务的控制都是基于cursor对象,这样通过细粒度的控制能够很好的调度应用程序中所有的数据库交互操作。 + +在以上代码中,展示了最简单的数据连接与数据库查询,使用方法非常简单,并符合[DB API v2](https://www.python.org/dev/peps/pep-0249/)的规范,从而让很多上有工具原生支持opengauss的操作,比如可直接在Sqlalchemy ORM工具中连接Opengauss数据库,这是一个非常重要的特性。 + +此处只是一个非常简单的数据库连接示例,后续我将持续发布一些深入的使用Opengauss Python数据库驱动的案例,录制了一个线上视频提供参考: + +[BiliBili-opengauss使用之python驱动](https://www.bilibili.com/video/BV1G54y1b7g4/) + +## 总结 + +Opengauss数据库是一个可处理高并发的高性能数据库,基于PostgreSql生态可轻松实现Python驱动应用程序的开发。 diff --git "a/content/zh/post/jinlixin/openGauss\347\232\204\346\211\251\345\256\271\347\274\251\345\256\271\345\222\214\351\227\256\351\242\230\345\244\204\347\220\206.md" "b/content/zh/post/jinlixin/openGauss\347\232\204\346\211\251\345\256\271\347\274\251\345\256\271\345\222\214\351\227\256\351\242\230\345\244\204\347\220\206.md" new file mode 100644 index 0000000000000000000000000000000000000000..0019f089d8e613ce721532b6932747a40326e017 --- /dev/null +++ "b/content/zh/post/jinlixin/openGauss\347\232\204\346\211\251\345\256\271\347\274\251\345\256\271\345\222\214\351\227\256\351\242\230\345\244\204\347\220\206.md" @@ -0,0 +1,197 @@ ++++ + +title = "openGauss的扩容缩容和问题处理" + +date = "2021-03-29" + +tags = ["openGauss故障处理"] + +archives = "2021-03" + +author = "金立新" + +summary = "openGauss的扩容缩容和问题处理" + +img = "/zh/post/jinlixin/title/img5.png" + +times = "12:30" + ++++ + +# openGauss的扩容缩容和问题处理 + +openGauss提供了优秀的集群管理工具gs\_om,集群管理信息写在二进制文件中,从而牺牲了增加节点和摘除节点的便利性(相对PG而言)。好在openGauss-1.1.0提供了节点扩容和缩容的工具,gs\_dropnode和gs\_expansion。 + +生产主库服务器出现硬件故障,无法启动,所以需要摘除故障老主节点和新增一台备机,以恢复集群架构。 + +集群状态:主库无法访问,gs\_om显示主库unknow,备机显示连接中。 + +操作:先将主节点切换至同步备机,使集群恢复正常。 + +在需要切换为新主库的节点执行命令: + +``` +gs_ctl failover -D /home/omm/dn1/ +``` + +其中:/home/omm/dn1/为新主库的数据目录。 + +切换完成之后查看数据库状态是否正常。 + +``` +gs_om -t status --detail +``` + +后续处理: + +执行以下下命令清除坏死节点,避免对依赖gs\_om的工具产生影响。 + +``` +gs_dropnode -U omm -G dbgrp -h 192.168.1.1
 +``` + +说明: + +- -U 指定集群的安装的用户 +- -G 指定集群的安装用户的用户组 +- -h 需要摘除的节点的IP + +摘除节点过程中产生的问题处理:目前社区官方1.1.0版本中的gs\_dropnode执行时需要gs\_om在每台机器上查询状态,服务器无法连接时会等待机器响应,导致超时,造成摘除节点卡死现象。与开发者沟通后,开发者会缩短等待无响应机器的时间为10s,超时后不再等待坏死节点响应,直接更新正常节点的集群管理信息存储文件。需要在gitee里下载新的gs\_dropnode脚本替换原脚本再执行。 + +新版本链接地址:https://gitee.com/struggle\_hw/openGauss-OM/blob/c0212048050453c57955b342dada5b6de6803622/script/gs\_dropnode + +``` +gs_dropnode -U omm -G dbgrp -h 192.168.1.1 +``` + +如下图所示,摘除节点日志如下,成功。 + +``` +[omm@kvm-yl1 ~]$ gs_dropnode -U omm -G dbgrp -h 192.168.122.92 +The target node to be dropped is (['kvm-yl2']) +Do you want to continue to drop the target node (yes/no)? yes +The cluster will have only one standalone node left after the operation! +Do you want to continue to drop the target node (yes/no)? yes +[gs_dropnode]Start to drop nodes of the cluster. +[gs_dropnode]Start to stop the target node kvm-yl2. +[gs_dropnode]End of stop the target node kvm-yl2. +[gs_dropnode]Start to backup parameter config file on kvm-yl1. +[gs_dropnode]End to backup parameter config file on kvm-yl1. +[gs_dropnode]The backup file of kvm-yl1 is /tmp/gs_dropnode_backup20210223085218/parameter_kvm-yl1.tar +[gs_dropnode]Start to parse parameter config file on kvm-yl1. +[gs_dropnode]End to parse parameter config file on kvm-yl1. +[gs_dropnode]Start to parse backup parameter config file on kvm-yl1. +[gs_dropnode]End to parse backup parameter config file kvm-yl1. +[gs_dropnode]Start to set postgresql config file on kvm-yl1. +[gs_dropnode]End of set postgresql config file on kvm-yl1. +[gs_dropnode]Start to get repl slot on primary node. +[gs_dropnode]Start to set repl slot on primary node. +[gs_dropnode]End of set repl slot on primary node. +[gs_dropnode]Start of set pg_hba config file on kvm-yl1. +[gs_dropnode]End of set pg_hba config file on kvm-yl1. +[gs_dropnode]Start to modify the cluster static conf. +[gs_dropnode]End of modify the cluster static conf. +[gs_dropnode]Remove the dynamic conf. +Only one primary node is left.It is recommended to restart the node. +Do you want to restart the primary node now (yes/no)? yes +[gs_dropnode]Start to stop the target node kvm-yl1. +[gs_dropnode]End of stop the target node kvm-yl1. +[gs_dropnode]Start to start the target node. +[gs_dropnode]End of start the target node. +[gs_dropnode]Success to drop the target nodes. +``` + +tips:节点摘除之后,为了保证数据安全,被摘除节点的数据不会被清理,如果确定不需要,可以手动清理节点,执行清理本地数据命令并清理环境变量。 + +``` +gs_uninstall --delete-data -L +``` + +扩容一台机器,以便于恢复集群原有架构。 + +1. 新节点创建omm用户和用户组dbgrp。 +2. 检查新节点环境变量,清理和openGauss相关的环境变量配置。主要检查/etc/profile和/home/omm/.bashrc两个文件。如果清理不干净,会导致扩容不成功。或者提示待扩容备机节点已经安装。 +3. 创建互信,包括root和omm用户,这里使用opengauss提供的工具创建互信。 +4. 如果是同一台机器恢复后再加入集群,需要清理root用户和omm用户的\~/.ssh/know\_host和\~/.ssh/authorized\_keys里的相关信息,都则创建互信会失败。 +5. 分别在root用户和omm用户下执行,各节点密码需要一致,后期可以再修改。 +6. 全新的机器需要安装python3。 + +``` +gs_sshexkey -f /home/omm/hostfile +``` + +执行结果提示如下代表成功 + +``` +Successfully distributed SSH trust file to all node. +Verifying SSH trust on all hosts. +Successfully verified SSH trust on all hosts. +Successfully created SSH trust. +``` + +hostfile如下: + +集群内所有的ip,每个ip一行: + +``` +cat hostfile +192.168.1.1 +192.168.1.2 +192.168.1.3 +``` + +创建新的xml文件,将老节点剔除,新节点加入。 + +``` +./gs_expansion -U omm -G dbgrp -h 192.168.122.92 -X ./clusterconfig.xml +``` + +扩容日志如下,代表扩容成功。 + +``` +Start to preinstall database on the new standby nodes. +Successfully preinstall database on the new standby nodes. +Start to install database on the new standby nodes. +installing database on node 192.168.1.1: +Please enter the password of user [omm] on node [192.168.1.1]: +Parsing the configuration file. +Check preinstall on every node. +Successfully checked preinstall on every node. +Creating the backup directory. +Successfully created the backup directory. +begin deploy.. +Installing the cluster. +begin prepare Install Cluster.. +Checking the installation environment on all nodes. +begin install Cluster.. +Installing applications on all nodes. +Successfully installed APP. +begin init Instance.. +encrypt cipher and rand files for database. +Please enter password for database: +Please repeat for database: +begin to create CA cert files +The sslcert will be generated in /data/opengauss/app/share/sslcert/om +Cluster installation is completed. +Configuring. +Deleting instances from all nodes. +Successfully deleted instances from all nodes. +Checking node configuration on all nodes. +Initializing instances on all nodes. +Updating instance configuration on all nodes. +Check consistence of memCheck and coresCheck on database nodes. +Configuring pg_hba on all nodes. +Configuration is completed. +Successfully started cluster. +Successfully installed application. +end deploy.. +Successfully install database on node ['192.168.1.1'] +Database on standby nodes installed finished. Start to establish the primary-standby relationship. +Success to expansion standby nodes. +``` + +问题处理:扩容后,gs\_om显示新备机状态异常的情况,可能有以下几种情况: + +1. 集群数据目录比较大,导致扩容脚本超时后,备机执行build还未执行完毕,需要等待备机执行完。 +2. 扩容过程中,主库配置文件更新失败,需要检查postgresql.conf文件的replconninfo和hba的集群内状态是否正常。 + diff --git a/content/zh/post/justbk/2021-03-03_zabbix_for_openGauss.md b/content/zh/post/justbk/2021-03-03_zabbix_for_openGauss.md new file mode 100644 index 0000000000000000000000000000000000000000..2b5bc382761fad22b985481375effb77042649c4 --- /dev/null +++ b/content/zh/post/justbk/2021-03-03_zabbix_for_openGauss.md @@ -0,0 +1,401 @@ ++++ +title = "zabbix适配openGauss使用指导书" +date = "2021-03-03" +tags = ["openGauss社区开发入门"] +archives = "2021-03" +author = "justbk" +summary = "openGauss社区开发入门" +img="/zh/post/justbk/title/zabbix_title.png" +times = "17:30" + ++++ + +# 一、 zabbix简介 + +zabbix是一个基于WEB界面的提供分布式系统监视以及网络监视功能的企业级的开源解决方案。 + +zabbix能监视各种网络参数,保证服务器系统的安全运营;并提供灵活的通知机制以让系统管理员快速定位/解决存在的各种问题。 + +zabbix由2部分构成,zabbix server与可选组件zabbix agent。 + +zabbix server可以通过SNMP,zabbix agent,ping,端口监视等方法提供对远程服务器/网络状态的监视,数据收集等功能,它可以运行在Linux,Solaris,HP-UX,AIX,Free BSD,Open BSD,OS X等平台上。 + +# 二、 zabbix安装与部署 + +官网安装教程: [链接](https://www.zabbix.com/cn/download?zabbix=5.0&os_distribution=centos&os_version=7&db=mysql&ws=apache) + +也可以自行搜索安装教程。 + +安装完成后,本身的server、agent及web服务可以通过下面的命令启动: + +重启: `systemctl restart zabbix-server zabbix-agent httpd rh-php72-php-fpm` + +开机启动: `systemctl enable zabbix-server zabbix-agent httpd rh-php72-php-fpm` + +# 三、 zabbix配置 + +## 1. 配置项说明 + +以下内容为web+server+2个agent(一个是linux agent,一个是windows agent)的配置, + +其中web、server和agent1安装在同一台机器上100.99.112.191记为IP1, 配置文件路径/etc/zabbix/ + +另一台agent2安装在windows电脑(10.189.44.131)记为IP2上。 + +## 2. server项配置 + +ListenPort = 10051 #server端的监听IP + +SourceIP=IP1 #外部连接ip,此ip配置不正确将导致web前端获取agent2的数据提示连接不正确 + +DBUser = zabbix + +DBPassword=password + +ListenIP=IP1 #server端侦听IP,必须使用大网,否则agent2无法连接 + +## 3. web配置 + +DB配置如图示: + +![image-20210302093148818](../img/zabbix-web配置.png "web配置") + +$ZBX_SERVER = IP1 #server的ListenIP + +$ZBX_SERVER_PORT = 10051 #server的ListenPort + +$ZBX_SERVER_NAME = zabbix # server配置的服务名 + +## 4. agent1配置(linux agent) + +### a. agent安装 + +这个ip的agent已经和server一块安装了,不用单独安装 + +### b.配置项 + +#SourceIP 不填写 + +Server=127.0.0.1,IP1 #如果不填写大网ip,将导致web侧不同主机无法互相访问 + +ServerActive=127.0.0.1 #活动的server服务,配置成127或IP1在agent1上都可以正常工作 + +Hostname=IP1 #这里配置web服务所在的ip + +Include=/etc/zabbix/zabbix_agentd.d/*.conf #增加额外的配置项,如监听项 + +UnsafeUserParameters=1 #配置1表示监听监听项接受用户输入的参数 + +### c. 自定义监听项check_opengauss_status + +UserParameter=check_opengauss_status[*],/etc/zabbix/script/check_opengauss_status.sh $1 + +check_opengauss_status.sh内容: + +`#!/bin/bash` +`if [ ! -d $1 ]; then` + `echo "no"` + `exit` +`fi` + +`if [ ! -x $1 ]; then` + `echo "no"` + `exit` +`fi` +`gs_ctl status -D $1 | grep "server" | awk 'BEGIN{} {if ($4 =="running") {print "yes"} else {print "no"}} '` + +### d. 自定义监听项check_opengauss + +UserParameter=check_opengauss[*],/etc/zabbix/script/check_opengauss.sh $1 $2 $3 + +check_opengauss.sh内容: + +`#!/bin/bash +case $3 in +*) +source /home/zabbix/env_single +gsql -h 100.99.112.191 -U test -p 50301 -d $1 -W $2 -t -c "select sum(pg_database_size(datid)) as total_size from pg_stat_database" +;; +esac` + +## 5. agent2配置(windows agent) + +### a. 安装windows版本agent + +下载windows版本的zabbix agent:[链接](https://www.zabbix.com/cn/download_agents?version=5.0+LTS&release=5.0.9&os=Windows&os_version=Any&hardware=i386&encryption=No+encryption&packaging=Archive#tab:44),并且在windows合适的路径解压: + +![image-20210304151602239](../img/zabbix-windows-agent版本.png "zabbix-windows-agent版本") + +### b 配置项 + +#SourceIP 不配置 + +Server=IP2,IP1 #配置两个ip,这两个ip都可以从agent获取监听数据 + +ServerActive=IP1 #配置结果似乎没有生效 + +Hostname=IP1 #web服务ip + +Include=D:\software\zabbix_agent-5.0.5-openssl\shell1\\*.conf #测试用配置 + +UnsafeUserParameters=1 + +### c. 自定义监听项ipconfig + +ipconfig.conf内容: + +UserParameter=ipconfig[*],D:\software\zabbix_agent-5.0.5-openssl\shell1\ipconfig.bat + +bat内容,仅回显打印justtest: + +`@echo justtest` + +# 四、 zabbix服务使用 + +以上配置完成后,即可在web服务中使用,其中agent1含有被测项check_opengauss, check_opengauss_status + +agent2中含有ipconfig检测项。 + +#### a. 创建新的主机,本次以windows IP2(10.189.44.131)为主 + +进入配置->主机->创建主机,填写信息如下: + +![image-20210302110204405](../img/zabbix-添加主机配置.png "zabbix-添加主机配置") + +模板填写内容如下,当前仅选择一个Template OS windows的模板,用于监听windows状态: + +![image-20210302110204405](../img/zabbix-添加Windows模板配置.png "zabbix-添加Windows模板配置") + +点击添加,完成主机添加,我们看到已经有部分监控项和触发器: + +![image-20210302110406136](../img/zabbix-添加主机成功.png "zabbix-添加主机成功") + +进入监控项,可以看到全部启用了: + +![image-20210302110956735](../img/zabbix-显示主机监控项.png "zabbix-显示主机监控项") + +进入菜单 监测->最新数据,可以看到此主机的所有数据都已经有刷新。 + +#### b. 创建自定义监控项 + +刚才是系统的监控项,我们可以追加自己的监控项,进入主机->点击监控项->创建监控项,参数如下: + +![image-20210302111749152](../img/zabbix-添加监控项ipconfig.png "zabbix-添加监控项ipconfig") + +可以看到我们的自定义项值可以正常获取了,点击添加完成此项新增。 + +#### c. 创建触发器 + +我们可以在监控项边上的三个点弹出的菜单上选择添加触发器,当满足触发器的规则后,zabbix会在仪表盘、问题显示问题,同时也可以响应邮件发送动作或其他触发事项。 + +![image-20210302112126889](../img/zabbix-创建触发器.png "zabbix-创建触发器") + +设置触发器表达式,可以查看参考资料:[链接](https://www.zabbix.com/documentation/4.0/zh/manual/config/triggers/trigger) + +如本次设置表达式为如果监控项返回的文本中包含just则告警为严重: + +![image-20210302112950882](../img/zabbix-编辑触发器表达式.png "zabbix-编辑触发器表达式") + +在监测->仪表板上可以看到持续有问题上报和触发: + +![image-20210302113113164](../img/zabbix-仪表盘显示.png "zabbix-仪表盘显示") + +#### d. 创建响应动作 + +进入配置->动作页面,点击创建动作,现在先在动作页面新增加一个动作,该动作为如果触发器为ipconfig_check_trigger时响应,如图示: + +![image-20210302141710513](../img/zabbix-创建触发器动作.png "zabbix-创建触发器动作") + +再增加触发此动作时的响应,本图示例为每隔1h且此问题处理,则向管理员组和用户组的成员发送邮件: + +![image-20210302141933116](../img/zabbix-增加动作响应.png "zabbix-增加动作响应") + +**请谨慎的处理些规则,如果时间间隔太短或报警数量过多,将导致邮箱爆满** + +#### e. 处理邮箱通知配置 + +选择菜单 管理->报警媒介类型->Email,按照要求配置好邮件smtp发送方信息, + +![image-20210302142346923](../img/zabbix-email媒介配置.png "zabbix-email媒介配置") + +继续选择管理->用户来配置接收方,默认只有管理员一个用户,配置上即可: + +![image-20210302142459339](../img/zabbix-用户email配置.png "zabbix-用户email配置") + +以上步骤即完成从数据获取->触发->响应->通知的全流程。 + +# 五、zabbix监听openGauss + +### a. 下载与openGauss环境匹配的agent + +可以到官网下载二进制直接解压缩包:[链接](https://www.zabbix.com/cn/download_agents?version=5.0+LTS&release=5.0.9&os=Linux&os_version=3.0&hardware=amd64&encryption=No+encryption&packaging=Archive#tab:44) + +本次使用使用环境为centOS7 x86_64架构: + +![image-20210302192249068](../img/zabbix-agent服务器配置.png "zabbix-agent服务器配置") + +选择的配置如下: + +![image-20210302192123416](../img/zabbix-agent选择对应版本.png "zabbix-agent选择对应版本") + +找到合适的位置解压(注意要与openGauss安装名相同,并赋予正确的权限),本次示意为home路径为$ZBX_AGENT_HOME,解压后有3个目录,分别是bin、sbin以及conf,将bin和sbin添加到个人环境变量~/.bashrc,方便zabbix_agentd、zabbix_get、zabbix_sender可以直接使用。 + +同时配置个人环境变量加载openGauss的环境变量,使gsql、gs_ctl等openGauss工具直接可用。 + +![image-20210302192810833](../img/zabbix-agent环境变量配置.png "zabbix-agent环境变量配置") + +### b. 修改agent配置,使其可与zabbix正常连接 + +参考3.3 agent1配置文件修改及添加两项openGauss的监听项目:check_opengauss和check_opengauss_status。 + +配置完成后即可通过zabbix_agentd启动服务,为了简化操作,这里引入shell脚本zabbix_run.sh 来辅助启动它: + +它支持-s start|stop|restart命令来启动我们的agent服务,只需要适配第二行的配置文件位置即可,内容如下: + +`#!/bin/bash` +`pfurl='zabbix_agentd -c /home/user/zabbix_agent/conf/zabbix_agentd.conf'` +`PID=0` +`kill_zabbix_agentd(){` + `ps ux | awk -r '/zabbix_agentd/{print $2}' | xargs -n1 kill -9` +`}` +`pfstart(){` + `num=ps ux | awk -r '/zabbix_agentd/{print $2}' | wc -l` + `if [ $num != "1" ];then` + `echo "zabbix_agentd is useing"` + `else` + `echo "zabbix_agentd is starting..."` + `$pfurl` + `echo "start done!"` + `fi` +`}` + +`if [ "$1" == "-s" ];then` + `if [ "$2" == "stop" ];then` + `echo "zabbix_agentd is stopping...."` + `kill_zabbix_agentd` + `echo "stop done!"` + `elif [ "$2" == "start" ];then` + `pfstart` + `elif [ "$2" == "restart" ];then` + `echo "zabbix_agentd is stopping...."` + `kill_zabbix_agentd` + `echo "stop done!"` + `echo "zabbix_agentd is starting..."` + `$pfurl` + `echo "start done!"` + `fi` +`elif [ "$1" == "--help" ];then` + `echo "zabbix_agentd -s [start/stop/restart]"` +`else` + `pfstart` +`fi` + +启动正常连接后,可通过zabbix_get 获取到数据,如下图示: + +![image-20210302195135343](../img/zabbix-agent验证监控项读取.png "zabbix-agent验证监控项读取") + +### c. 创建openGauss模板 + +进入配置->模板,点击创建模板,填入模板名称和所属群组(可提前在主机群组中创建),添加几个通用宏: + +![image-20210302195713389](../img/zabbix-模板创建.png "zabbix-模板创建") + +然后再创建2个监听项,此处可以引用宏来使不同的openGauss数据库引用不同的参数: + +![image-20210302195742230](../img/zabbix-监控项status和宏配置.png "zabbix-监控项status和宏配置") + +![image-20210302195758217](../img/zabbix-监控项size和宏配置.png "zabbix-监控项size和宏配置") + +### d. 创建openGauss主机 + +进入配置->主机,点击创建主机,输入主机名称和客户端地址(即agent的ip和端口),同时在模板中增加刚才配置的openGauss模板,再修改宏对应的值。 + +**除了openGauss模板外,系统还自带有的模板,可以增加Template OS linux的模板,那么磁盘、网络、cpu负载等也就可以完成监控了。** + +创建完成后,自动具有模板中的两个监控项: + +![image-20210302200556483](../img/zabbix-openGauss主机创建.png "zabbix-openGauss主机创建") + +### e. 配置触发器等 + +参考第四节内容配置,自行添加触发器逻辑即可。 + +以上便完成了所有的openGauss监控,包括linux系统资源、gsql和gs_ctl及其返回结果,你可以在脚本中调用其他的命令来扩展更多的监控.当简单的shell脚本不能满足我们的要求时,我们可以自行开发代理与agent交互来完成更多的监控事项,请自行探索。 + +### f. 移植zabbix postgresql的模板 + +zabbix已经支持postgresql数据库的监控,其模板名称为Template App PostgreSQL,现有的监控项没办法直接使用,我们可以从官网下载源代码并自行添加对应的配置项。zabbix github代码路径: [链接](https://github.com/zabbix/zabbix),对应的postgresql监控模板路径为:[链接](https://github.com/zabbix/zabbix/tree/master/templates/db/postgresql),内容如下: + +其中配置项template_db_postgresql.conf内容如下: + +![image-20210304161337945](../img/zabbix-postgresql监控项.png "zabbix-postgresql监控项") + +其大多数监控使用psql执行sql脚本实现,移植其监控项并不复杂,以第一条修改为例: + +UserParameter=pgsql.bgwriter[*], gsql -qtAX -h "$1" -p "$2" -U "$3" -d "$4" -W "$5" -f "/home/user/zabbix_agent/conf/zabbix_agentd/opengauss/pgsql.bgwriter.sql" + +修点点包括: + +* psql改为gsql +* 增加-W "$5" 用于增加密码输入(因为gsql在本地执行,考虑移除-h/-U/-W参数也是可行的) +* 修改将要执行的sql文件路径,使其路径为正确的地址即可。 + +由于postgresql与openGauss存在部分sql语句和函数差异,某些监控项需要移植sql,请自行处理。 + +以上agent的自定义监控项配置后,可以参考第四节内容增加到Web监控。 + +# 六、常见问题 + +### a. 添加主机报错 + +**cannot connect to [[10.183.209.119]:10050]: [111] Connection refused** + +常见错误原因: + +* 防火墙未关闭 + +[root@ctupopenga00017 ~] systemctl status firewalld.service + +[root@ctupopenga00017 ~] systemctl stop firewalld.service + +* 用户无权限创建zabbix_agentd进程文件 + +查看zabbix agent日志,分析错误原因 + +[root@ctupopenga00017 ~]# cat /var/log/zabbix/zabbix_agentd.log + +![img](../img/zabbix-agentd无目录权限.png "zabbix-agentd无目录权限") + +根据提示赋予zabbix用户/var/run/zabbix/的write权限,然后重启agent服务即可 + +[root@ctupopenga00017 ~]# chmod 755 /var/run/zabbix/ + +[root@ctupopenga00017 ~]# systemctl restart zabbix_agent.service + +* Zabbix agent配置文件填写错误 + +![img](../img/zabbix-agentd配置错误.png "zabbix-agentd配置错误") + +日志提示添加监控使用的localhost地址为127.0.0.1,而zabbix_agentd.conf中Server填写的地址未包含127.0.0.1,因此在配置文件中添加localhost地址即可 + +### b. 创建自定义监控项报错 + +* ZBX_NOTSUPPORTED: Item does not allow parameters. + +错误原因:zabbix_agentd.conf配置文件参数格式填写错误 + +解决办法:在zabbix_agentd.conf中找到UserParameter参数,在自定义键值后添加[*],代表需要传递参数 + +* 返回值类型不匹配 + +错误原因:在使用 awk 将筛选出来的值作为返回值时,由于awk的机制,会把返回值转换成string类型 + +解决办法:可以使用awk的strtonum转换成浮点数,在conf文件中添加strtonum内容,如:UserParameter=service.cpu[*],ps -aux | egrep -v 'grep|tail' | grep $1 | awk '{print strtonum($$3)}' + +* ZBX_NOTSUPPORTED: Unsupported item key. + + 监控项名称在agent侧未配置 + +* zabbix_get [87767]: Check access restrictions in Zabbix agent configuration + +​ Server路由配置错误,导致server无法访问agent diff --git a/content/zh/post/justbk/2021-08-31_shardingSphere_for_openGauss.md b/content/zh/post/justbk/2021-08-31_shardingSphere_for_openGauss.md new file mode 100644 index 0000000000000000000000000000000000000000..b1f85dad7ef55ad00db9d0c6234449d96fb15c11 --- /dev/null +++ b/content/zh/post/justbk/2021-08-31_shardingSphere_for_openGauss.md @@ -0,0 +1,182 @@ ++++ +title = "如何利用shardingSphere-proxy搭建openGauss分布式环境" +date = "2021-08-31" +tags = ["openGauss分布式解决方案"] +archives = "2021-03" +author = "justbk" +summary = "openGauss分布式解决方案" +img="/zh/post/justbk/title/shardingSphere_logo_v2.png" +times = "17:30" + ++++ + +# 一、 shardingSphere-proxy简介 + +shardingSphere-proxy(以下简称为"proxy")定位为透明化的数据库代理端,提供封装了数据库二进制协议的服务端版本,用于完成对异构语言的支持。 目前提供 MySQL 和 PostgreSQL 版本,它可以使用任何兼容 MySQL/PostgreSQL 协议的访问客户端(如:MySQL Command Client, MySQL Workbench, Navicat 等)操作数据,对 DBA 更加友好。 + +- 向应用程序完全透明,可直接当做MySQL/PostgreSQL 使用。 +- 适用于任何兼容 MySQL/PostgreSQL 协议的的客户端。 + +proxy实现分布式的核心原理是,使用netty捕获客户端(gsql或jdbc)的sql语句,通过抽象语法树解析sql,根据配置的分库分片规则,改写sql语句,使其路由到对应的数据库上并聚合多个sql的返回结果,再将结果通过netty返回给客户端,这样就完成了分库分片的全流程,如下图示: + +![image-proxy-sql-flow](../img/proxy-sql-flow.png "proxy sql处理流程") + +# 二、 shardingSphere-proxy获取 + +proxy默认支持PostgreSQL协议,openGauss也采用的是PostgreSQL协议,但是二者的认证方式和批量插入协议有区别。为了能使proxy正常工作,需要向lib目录中增加openGauss的jdbc驱动,此驱动可以从maven中央仓库下载,坐标是: + +```xml +org.opengauss +opengauss-jdbc +``` + +目前需要从master分支自行编译:[链接](https://github.com/apache/shardingsphere/tree/master "proxy_opengauss-master"),本示例为从openGauss分支上 自己编译出包。 + +# 三、 搭建openGauss分布式环境 + +## 1 解压二进制包 + +获取二进制包后,可以通过`tar -zxvf`命令进行解压,解压后的内容如下: + +![image-20210830155551388](../img/proxy-unzip-filelist.png "proxy解压文件列表") + +## 2 替换为openGauss jdbc + +进入到lib目录下,并且将原有的postgresql-42.2.5.jar删除,将opengauss-jdbc的jar放置在该目录下即可。 + +## 3 修改server.yaml + +进入conf目录, 该目录下已经有server.yaml文件的模板。该配置文件的主要作用是配置**前端**的认证数据库、用户名和密码, 以及连接相关的属性:包括分布式事务类型、sql日志等。 + +当然proxy还支持governance配置中心,它可以从配置中心读取配置或者永久保存配置,本次使用暂不涉及其使用。 + +server.yaml最简配置如下: + +```yaml +rules: + - !AUTHORITY + users: + - root@%:root + - sharding@:sharding + provider: + type: ALL_PRIVILEGES_PERMITTED + +props: + max-connections-size-per-query: 1 + executor-size: 16 # Infinite by default. + proxy-frontend-flush-threshold: 128 # The default value is 128. +``` + +server.yaml更多详细配置参考:[链接](https://shardingsphere.apache.org/document/current/cn/user-manual/shardingsphere-proxy/configuration/ "proxy-server-config") + +## 4 修改config-sharding.yaml + +进入conf目录,该目录下已经有config-sharding.yaml文件的模板。该文件主要作用是配置**后端**与openGauss数据库的连接属性,分库分表规则等。 + +本次分片示例为,数据分两个库,表分为3片,数据库分片键为ds_id,值按2取余,表分片键为ts_id,值按3取余。 + +分库后插入数据分布如下: + +| ds_id | ts_id | 前端schema | 前端表名 | 后端数据库 | 后端表 | +| ----- | ----- | ----------- | -------- | ---------- | ------ | +| 0 | 0 | sharding_db | t1 | ds_0 | t1_0 | +| 0 | 1 | sharding_db | t1 | ds_0 | t1_1 | +| 0 | 2 | sharding_db | t1 | ds_0 | t1_2 | +| 1 | 0 | sharding_db | t1 | ds_1 | t1_0 | +| 1 | 1 | sharding_db | t1 | ds_1 | t1_1 | +| 1 | 2 | sharding_db | t1 | ds_1 | t1_2 | + +config-sharding.yaml极简配置如下: + +```yaml +dataSources: + ds_0: + connectionTimeoutMilliseconds: 10000 + idleTimeoutMilliseconds: 10000 + maintenanceIntervalMilliseconds: 10000 + maxLifetimeMilliseconds: 1800000 + maxPoolSize: 200 + minPoolSize: 10 + password: Huawei@123 + url: jdbc:opengauss://90.90.44.171:44000/ds_0?serverTimezone=UTC&useSSL=false&connectTimeout=10 + username: test + ds_1: + connectionTimeoutMilliseconds: 10000 + idleTimeoutMilliseconds: 10000 + maintenanceIntervalMilliseconds: 10000 + maxLifetimeMilliseconds: 1800000 + maxPoolSize: 200 + minPoolSize: 10 + password: Huawei@123 + url: jdbc:opengauss://90.90.44.171:44000/ds_1?serverTimezone=UTC&useSSL=false&connectTimeout=10 + username: test +rules: +- !SHARDING + defaultDatabaseStrategy: + none: null + defaultTableStrategy: + none: null + shardingAlgorithms: + ds_t1_alg: + props: + algorithm-expression: ds_${ds_id % 2} + type: INLINE + ts_t1_alg: + props: + algorithm-expression: ds_${ts_id % 3} + type: INLINE + tables: + t1: + actualDataNodes: ds_${0..1}.t1_${0..2} + databaseStrategy: + standard: + shardingAlgorithmName: ds_t1_alg + shardingColumn: ds_id + tableStrategy: + standard: + shardingAlgorithmName: ts_t1_alg + shardingColumn: ts_id +schemaName: sharding_db +``` + +config-sharding.yaml更多详细配置参考:[链接](https://shardingsphere.apache.org/document/current/cn/user-manual/shardingsphere-jdbc/configuration/yaml/ "proxy-sharding-config") + +## 5 启动shardingSphere-proxy + +进入bin目录,以上配置完成后,使用`sh start.sh`即可启动proxy服务,默认绑定3307端口。可以在启动脚本时使用`sh start.sh 4000`修改为4000端口。 + +# 四、 环境验证 + +要想确认proxy是否正确启动,请查看logs/stdout.log日志,当存在提示成功启动日志后即成功。 + +![image-20210830173141347](../img/proxy-start-success.png "proxy启动成功") + +在opengauss数据库上使用`gsql -d sharding_db -h $proxy_ip -p 3307 -U sharding -W sharding -r`即可连接数据库。 + +# 五、 分布式数据库使用 + +上面的部署已经确认proxy环境可用了,那么连上gsql就可以对分布式数据进行操作了,默认已经使用gsql连接上终端了。 + +## 1 新建表 + +在gsql终端中执行`create table t1 (id int primary key, ds_id int, ts_id int, data varchar(100));`即可创建表,它的语法不需要任何修改。 + +## 2 增删改 + +* 增:`insert into t1 values (0, 0, 0, 'aaa')` +* 删:`delete from t1 where id = 0` +* 改:`update t1 set data = 'ccc' where ds_id = 1;` + +以上语法不需要任何修改即可执行 + +## 3 查 + +`select * from t1` 即可获取所有的数据,在proxy中简单的select语句几乎不需要修改语法即可执行。 + +复杂的查询语法(如二次子查询)当前支持的不是很完整,可以持续向shardingSphere社区提交issue来更新。 + +已经支持和未支持的SQL请参考:[链接](https://shardingsphere.apache.org/document/current/cn/features/sharding/use-norms/sql/ "proxy-sql-support") + +## 4 事务 + +shardingSphere事务使用方法与原来的方式一致,依然通过begin/commit/rollback来实现。 \ No newline at end of file diff --git a/content/zh/post/justbk/2021-12-17_how_to_test_perfermance_for_openGauss.md b/content/zh/post/justbk/2021-12-17_how_to_test_perfermance_for_openGauss.md new file mode 100644 index 0000000000000000000000000000000000000000..aa0aec98c0679dfa0f833ca717b1b5389adf9721 --- /dev/null +++ b/content/zh/post/justbk/2021-12-17_how_to_test_perfermance_for_openGauss.md @@ -0,0 +1,400 @@ ++++ +title = "BenchmarkSQL高性能测试" +date = "2021-12-17" +tags = ["openGauss性能测试"] +archives = "2021-12" +author = "justbk" +summary = "通过软硬件的合理配置,测试openGauss性能是否达到150万tpmc." +img="/zh/post/justbk/title/perfermance_openGauss_logo.png" +times = "12:30" + ++++ + +# 一、 性能测试预备知识 + +在官网的blog文章中,详细介绍了如何tpcc测试、安装benchmarksql和安装数据库,请先阅读它:[BenchmarkSQL性能测试](https://opengauss.org/zh/blogs/blogs.html?post/jiajunfeng/benchmarksql%E6%80%A7%E8%83%BD%E6%B5%8B%E8%AF%95/)。 + +# 二、 高性能要求的硬件&软件 + +本次openGauss数据库部署采用TaiShan 200服务器(型号2280) : + +* CPU型号: 鲲鹏920 6426, 2P 128核 + +* 内存数量: 32G * 24条 +* 网络:10GE带宽 +* 网卡:1822 SP580 25GE +* 硬盘:nvme* 3个 容量:7.3T (HWE52P438T0L005N) + +软件: + +* 操作系统 openEuler 20.03 (LTS) +* 架构 aarch64 +* BIOS 关闭 `Support Smmc`,`CPU Prefetching Configuration`,`Die Interleaving` +* 文件系统xfs的blocksize为8k + * `xfs_info $nvme挂载盘`可查看bsize是否为8192,不是则需要重新格式化nvme盘 +* 修改网络中断绑核 (参考后面的步骤) +* 安装numa +* 安装htop + +**benchmarksql的测试环境推荐使用鲲鹏920 128核机器,对内存和硬盘无要求,不过它要与数据库在同一子网中,ping延时稳定在0.05ms以内**。 + +benchmarksql上需要安装bisheng JDK,并加入环境变量:[JDK_8u292](https://mirror.iscas.ac.cn/kunpeng/archive/compiler/bisheng_jdk/bisheng-jdk-8u292-linux-aarch64.tar.gz) + +# 三、 优化数据库配置 + +有了硬件和操作系统优化后,数据库也要充分利用硬件资源才能达到最优性能,这些和数据库配置息息相关,即使在同样的机器上,配置不同也可能导致性能差异巨大。 + +## 1. 磁盘划分 + +磁盘IO通常都会成为整个数据库的瓶颈,openGauss采用wal预写式事务日志来顺序写磁盘以提升IO利用率,但是这还不够。在高性能场景下,为了最大化利用磁盘IO,我们将依赖较少的磁盘写入操作分配到不同的nvme盘,这可以使得磁盘可以并发使用,磁盘IO利用率大大提升。 + +在openGauss中,默认的数据都写入当前数据库安装路径(以下以`$gauss_home`替 代),我们可以把xlog和热点表编入不同的磁盘以提升磁盘IO利用率,在这里我们使用软链接实现,在磁盘划分时请正常退出openGauss再操作,`gs_ctl stop -D $gauss_home`。 + +xlog映射的路径为`$gauss_home\pg_xlog`, 而热点表bmsql_customer和bmsql_stock通常放入表空间,对应的路径为`$gauss_home\pg_location`。 + +**注意:表空间使用相对路径tb/exampleX创建, 创表语法:`create tablespace example2 relative location 'tb/example2'`**: + +最终磁盘映射情况及demo示例如下: + +| 磁盘 | 映射路径 | 数据目录 | 软链接 | +| ------------ | -------- | ---------------------------------------------- | ---------------------------------- | +| /dev/nvme0n1 | /usr1 | $gauss_home=/usr1/peilq_dn/test_01 | 不涉及 | +| /dev/nvme1n1 | /usr2 | /usr2/peilq_dn/test_01/pg_xlog | $gauss_home/pg_xlog | +| /dev/nvme2n1 | /usr3 | /usr3/peilq_dn/test_01/pg_location/tb/example2 | $gauss_home/pg_location/tb/exampe2 | +| /dev/nvme2n1 | /usr3 | /usr3/peilq_dn/test_01/pg_location/tb/example3 | $gauss_home/pg_location/tb/exampe3 | + +![目录划分](../img/高性能测试-目录划分.png "目录划分") + +## 2. 高性能配置 + +在`$gauss_home/postgresql.conf`追加以下内容,(重复配置以最后一次位置为准): + +```properties +remote_read_mode = non_authentication +replication_type = 1 +sync_config_strategy = none_node +recovery_max_workers = 20 + +max_connections = 4096 +allow_concurrent_tuple_update = true +audit_enabled = off +cstore_buffers = 16MB +enable_alarm = off +enable_codegen = false +enable_data_replicate = off +full_page_writes = off +max_files_per_process = 100000 +max_prepared_transactions = 2048 +shared_buffers = 350GB +use_workload_manager = off +wal_buffers = 1GB +work_mem = 1MB +transaction_isolation = 'read committed' +default_transaction_isolation = 'read committed' +synchronous_commit = off +fsync = on +maintenance_work_mem = 2GB +vacuum_cost_limit = 10000 +autovacuum = on +autovacuum_mode = vacuum +autovacuum_max_workers = 20 +autovacuum_naptime = 5s +autovacuum_vacuum_cost_delay = 10 +update_lockwait_timeout = 20min +enable_mergejoin = off +enable_nestloop = off +enable_hashjoin = off +enable_material = off +wal_log_hints = off +log_duration = off +checkpoint_timeout = 15min +autovacuum_vacuum_scale_factor = 0.1 +autovacuum_analyze_scale_factor = 0.02 +enable_save_datachanged_timestamp = false +enable_double_write = on +enable_incremental_checkpoint = on +enable_opfusion = on +advance_xlog_file_num = 100 +numa_distribute_mode = 'all' +track_activities = off +enable_instr_track_wait = off +enable_instr_rt_percentile = off +track_counts = on +track_sql_count = off +enable_instr_cpu_timer = off +plog_merge_age = 0 +session_timeout = 0 +enable_instance_metric_persistent = off +enable_logical_io_statistics = off +enable_page_lsn_check = off +enable_user_metric_persistent = off +enable_xlog_prune = off +enable_resource_track = off +instr_unique_sql_count=0 + +remote_read_mode=non_authentication +wal_level = archive +hot_standby = off +hot_standby_feedback = off +client_min_messages = ERROR +log_min_messages = FATAL +enable_asp = off +enable_bbox_dump = off +bgwriter_flush_after = 32 +minimum_pool_size = 200 +wal_keep_segments = 1025 +enable_bitmapscan = off +enable_seqscan = off +enable_beta_opfusion=on +enable_thread_pool = on +checkpoint_segments=8000 +enable_stmt_track=false +bgwriter_thread_num = 1 +bgwriter_delay = 5s +incremental_checkpoint_timeout = 5min +thread_pool_attr = '464,4,(cpubind:1-28,32-60,64-92,96-124)' +xloginsert_locks = 16 +wal_writer_cpu=0 +wal_file_init_num = 20 +xlog_idle_flushes_before_sleep = 500000000 +pagewriter_sleep = 10ms +``` + +核心增强配置项: + +shared_buffers = 350GB + +enable_thread_pool = on + +checkpoint_segments=8000 + +incremental_checkpoint_timeout = 5min + +thread_pool_attr = '464,4,(cpubind:1-28,32-60,64-92,96-124)' + +wal_writer_cpu=0 + +相关参数配置说明请参考开发者手册描述:[开发者手册](https://opengauss.org/zh/docs/2.1.0/docs/Developerguide/Developerguide.html) + +## 3. 绑核配置 + +网络中断避免系统随机调度导致的性能损失,所以可以将特定的CPU专门用于处理网络中断以提升网络IO吞吐量。此脚本bind_net_irq_12.sh需在root下运行,使用12核处理网络中断,它使用的CPU核与数据库进程占用的CPU核不相交,注意intf参数为高性能网卡名,请正确修改之: + +```bash +#!/bin/bash +# 网卡名 +intf=enp4s0 +systemctl stop irqbalance.service +systemctl disable irqbalance.service +ethtool -L $intf combined 12 + +irq_list=`cat /proc/interrupts | grep $intf | awk {'print $1'} | tr -d ":"` +irq_array_net=($irq_list) + +cpu_array_irq=(29 30 31 61 62 63 93 94 95 125 126 127) + +for (( i=0;i<12;i++ )) +do + echo "${cpu_array_irq[$i]}" > /proc/irq/${irq_array_net[$i]}/smp_affinity_list +done + +for j in ${irq_array_net[@]} +do + cat /proc/irq/$j/smp_affinity_list +done +``` + +可以使用下面的irq_check.sh脚本来检查绑核是否生效: + +```bash +#!/bin/bash +# 网卡名 +intf=enp4s0 +# 获取中断号 +rx_irq_list=(`cat /proc/interrupts | grep ${intf} | awk -F':' '{print $1}'`) +echo "check irf of net interface ${intf}" + +echo "rx" +for rx_irq in ${rx_irq_list[@]} +do + echo `cat /proc/irq/$rx_irq/smp_affinity_list` +done +``` + +以上配置完成后,即可通过`numactl -C 0-28,32-60,64-92,96-124 gs_ctl -D $gauss_home start &`来启动数据库。 + +## 4. benchmarksql配置 + +openGauss已经提供了和pg兼容的jdbc,此驱动可以从maven中央仓库下载,坐标是: + +```xml +org.opengauss +opengauss-jdbc +2.0.1-compatibility +``` + +将jdbc放到benchmarksql/lib/postgres目录下即可。 + +jdbc修改了驱动类为:`org.opengauss.Driver`, 同时识别串改为`jdbc:opengauss://` + +对应的benchmarksql的props.og文件修改如下,注意conn连接串中增加的jdbc参数,terminal参数是并发数,当前150万tpmc经验值为696: + +```properties +db=postgres +driver=org.opengauss.Driver +conn=jdbc:opengauss://数据库ip:端口/test?loggerLevel=OFF&prepareThreshold=1&fetchsize=10&preparedStatementCacheSizeMiB=0&preparedStatementCacheQueries=0 +user=test +password=******** + +warehouses=1000 +loadWorkers=80 + +terminals=696 +//To run specified transactions per terminal- runMins must equal zero +runTxnsPerTerminal=0 +//To run for specified minutes- runTxnsPerTerminal must equal zero +runMins=30 +//Number of total transactions per minute +limitTxnsPerMin=0 + +//Set to true to run in 4.x compatible mode. Set to false to use the +//entire configured database evenly. +terminalWarehouseFixed=false + +//The following five values must add up to 100 +//The default percentages of 45, 43, 4, 4 & 4 match the TPC-C spec +newOrderWeight=45 +paymentWeight=43 +orderStatusWeight=4 +deliveryWeight=4 +stockLevelWeight=4 + +// Directory name to create for collecting detailed result data. +// Comment this out to suppress. +resultDirectory=my_result_%tY-%tm-%td_%tH%tM%tS +osCollectorScript=./misc/os_collector_linux.py +osCollectorInterval=1 +//osCollectorSSHAddr=user@dbhost +osCollectorDevices=net_eth0 blk_sda +``` + +# 四、 数据建仓 + +openGauss 150万tpmc使用warehouse仓数为1000,大约产生100G数据量。 + +## 1. 数据插入 + +进入benchmarksql所在机器的$benchmark_home/run目录。 + +参照第一节中的[BenchmarkSQL性能测试](https://opengauss.org/zh/blogs/blogs.html?post/jiajunfeng/benchmarksql%E6%80%A7%E8%83%BD%E6%B5%8B%E8%AF%95/)的描述,在`sql.common/tableCreates.sql`中修改对应的表空间创建语句和热点表bmsql_customer和bmsql_stock使用: + +```sql +create tablespace example2 relative location 'tb/example2'; +create tablespace example3 relative location 'tb/example3'; + +create table bmsql_customer xxx using tablespace example2; +create table bmsql_stock xxx using tablespace example3; +``` + +同时需要修改`sql.common/indexCreates.sql`,使bmsql_new_order的索引也存放在example2这个表空间上: + +```sql +alter table bmsql_new_order add constraint bmsql_new_order_pkey + primary key (no_w_id, no_d_id, no_o_id) using index tablespace example2; +``` + +在`./runDatabaseBuild.sh props.og`完成数据建仓。 + +## 2. 数据备份 + +在openGauss高性能环境下完成1000仓数据插入约30分钟,而性能测试将导致数据及缓存膨胀,所以标准测试前都需要恢复数据。每次都通过数据插入的方式来恢复数据的耗时就变得难以忍受。openGauss允许备份数据目录的方式来恢复数据。注意备份数据必须停止openGauss进程 :`gs_ctl stop -D $gaussdb_home`。 + +这里提供备份脚本backup.sh如下,脚本中的绝对路径请按实际修改,原理就是针对3个nvme盘创建同路径的.bak备份路径: + +```bash +#!/usr/bin/env bash +if [ -z "$1" ]; then + echo "first param must set" + exit +fi +cur_path=$1 +bak=".bak" +data_dir="/usr1/peilq_dn/$cur_path" +data_dir_bak="$data_dir$bak" + +xlog="/usr2/peilq_dn/$cur_path" +xlog_bak="$xlog$bak" + +tb="/usr3/peilq_dn/$cur_path" +tb_bak="$tb$bak" + +rm -rf $data_dir_bak +cp -rdf $data_dir $data_dir_bak + +rm -rf $xlog_bak +cp -rdf $xlog $xlog_bak + +rm -rf $tb_bak +cp -rdf $tb $tb_bak +echo "OVER" +``` + +使用方式`sh backup.sh test01`。 + +## 3. 数据恢复 + +当一轮测试完成后,当前的openGauss数据仓库已经被污染,接下来的测试性能会逐渐降低,此时可以通过上节的备份数据来恢复数据仓库。 + +数据恢复前仍然需要停止数据库,如果测试完成后数据库停止较慢,可以通过`kill -9 $pid`方式强行终止数据库。 + +数据恢复脚本recovery.sh如下,脚本中的绝对路径请按实际修改,原理就是针对3个nvme盘从同路径的.bak路径覆盖对应路径: + +```bash +#!/usr/bin/env bash +if [ -z "$1" ]; then + echo "first param must set" + exit +fi + +cur_path=$1 +bak=".bak" +data_dir="/usr1/peilq_dn/$cur_path" +data_dir_bak="$data_dir$bak" + +xlog="/usr2/peilq_dn/$cur_path" +xlog_bak="$xlog$bak" + +tb="/usr3/peilq_dn/$cur_path" +tb_bak="$tb$bak" + +rm -rf $data_dir +cp -rdf $data_dir_bak $data_dir + +rm -rf $xlog +cp -rdf $xlog_bak $xlog + +rm -rf $tb +cp -rdf $tb_bak $tb +echo "OVER" +``` + +使用方式`sh recovery.sh test01`,恢复时间在1~2分钟内。 + +重新使用`numactl -C 0-28,32-60,64-92,96-124 gs_ctl -D $gauss_home start &`来启动数据库即可。 + +# 五、 测试验证 + +进入benchmarksql所在机器的$benchmark_home/run目录。 + +其中props.og中`runMins=30`控制测试时长单位为分钟。 + +`./runBenchmark.sh props.og`,即可启动测试。 + +在测试过程中会滚动显示当前的平均事务数和总事务数以及内存使用情况。第一个显示的数字即为平均事务数`Running Average tpmTOTAL:`在完成测试前,该值 *0.45即约等于测试结果,所以测试进行3~5分钟基本可以判断性能是否在预期之内,当前性能环境第一个值需要在300万以上才能满足150万要求。 + +我们可以在测试过程中,在数据库运行的机器上执行htop命令,查看CPU使用情况:查看绑核是否生效,CPU是否满载等。 + +**每次正式测试前,请重置数据。** + + + diff --git a/content/zh/post/justbk/img/proxy-sql-flow.png b/content/zh/post/justbk/img/proxy-sql-flow.png new file mode 100644 index 0000000000000000000000000000000000000000..7ab46f930dc94ae9c9550db9e1ec6bcc3fabc1b6 Binary files /dev/null and b/content/zh/post/justbk/img/proxy-sql-flow.png differ diff --git a/content/zh/post/justbk/img/proxy-start-success.png b/content/zh/post/justbk/img/proxy-start-success.png new file mode 100644 index 0000000000000000000000000000000000000000..069a74327d410cda76318b4d6aae9982f481d2cf Binary files /dev/null and b/content/zh/post/justbk/img/proxy-start-success.png differ diff --git a/content/zh/post/justbk/img/proxy-unzip-filelist.png b/content/zh/post/justbk/img/proxy-unzip-filelist.png new file mode 100644 index 0000000000000000000000000000000000000000..78d469d46bf55cc4b1e9621bbc0222fee816dc20 Binary files /dev/null and b/content/zh/post/justbk/img/proxy-unzip-filelist.png differ diff --git "a/content/zh/post/justbk/img/zabbix-agentd\346\227\240\347\233\256\345\275\225\346\235\203\351\231\220.png" "b/content/zh/post/justbk/img/zabbix-agentd\346\227\240\347\233\256\345\275\225\346\235\203\351\231\220.png" new file mode 100644 index 0000000000000000000000000000000000000000..e4fd08849de1dc08d7248296ca6007ea88e1d4ea Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-agentd\346\227\240\347\233\256\345\275\225\346\235\203\351\231\220.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-agentd\351\205\215\347\275\256\351\224\231\350\257\257.png" "b/content/zh/post/justbk/img/zabbix-agentd\351\205\215\347\275\256\351\224\231\350\257\257.png" new file mode 100644 index 0000000000000000000000000000000000000000..52225524b5e2dbffc2f7afe47d476ac010974ff0 Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-agentd\351\205\215\347\275\256\351\224\231\350\257\257.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-agent\346\234\215\345\212\241\345\231\250\351\205\215\347\275\256.png" "b/content/zh/post/justbk/img/zabbix-agent\346\234\215\345\212\241\345\231\250\351\205\215\347\275\256.png" new file mode 100644 index 0000000000000000000000000000000000000000..32cdd713e3ab8839cb4543b028afd90610bf2a34 Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-agent\346\234\215\345\212\241\345\231\250\351\205\215\347\275\256.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-agent\347\216\257\345\242\203\345\217\230\351\207\217\351\205\215\347\275\256.png" "b/content/zh/post/justbk/img/zabbix-agent\347\216\257\345\242\203\345\217\230\351\207\217\351\205\215\347\275\256.png" new file mode 100644 index 0000000000000000000000000000000000000000..b7b8d725a9d08201b00293957c99315f76811870 Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-agent\347\216\257\345\242\203\345\217\230\351\207\217\351\205\215\347\275\256.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-agent\351\200\211\346\213\251\345\257\271\345\272\224\347\211\210\346\234\254.png" "b/content/zh/post/justbk/img/zabbix-agent\351\200\211\346\213\251\345\257\271\345\272\224\347\211\210\346\234\254.png" new file mode 100644 index 0000000000000000000000000000000000000000..b7d2963417c3d4fd820689a9f9efdba5e4d9c27b Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-agent\351\200\211\346\213\251\345\257\271\345\272\224\347\211\210\346\234\254.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-agent\351\252\214\350\257\201\347\233\221\346\216\247\351\241\271\350\257\273\345\217\226.png" "b/content/zh/post/justbk/img/zabbix-agent\351\252\214\350\257\201\347\233\221\346\216\247\351\241\271\350\257\273\345\217\226.png" new file mode 100644 index 0000000000000000000000000000000000000000..98043f3e171e907fb659357380bd5bad5c2088dc Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-agent\351\252\214\350\257\201\347\233\221\346\216\247\351\241\271\350\257\273\345\217\226.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-email\345\252\222\344\273\213\351\205\215\347\275\256.png" "b/content/zh/post/justbk/img/zabbix-email\345\252\222\344\273\213\351\205\215\347\275\256.png" new file mode 100644 index 0000000000000000000000000000000000000000..6ff70b96b57a2921d57ed42f7b62e3dc06cb26a2 Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-email\345\252\222\344\273\213\351\205\215\347\275\256.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-openGauss\344\270\273\346\234\272\345\210\233\345\273\272.png" "b/content/zh/post/justbk/img/zabbix-openGauss\344\270\273\346\234\272\345\210\233\345\273\272.png" new file mode 100644 index 0000000000000000000000000000000000000000..cf2991bf3524014118329b384dfd448a69be835e Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-openGauss\344\270\273\346\234\272\345\210\233\345\273\272.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-postgresql\347\233\221\346\216\247\351\241\271.png" "b/content/zh/post/justbk/img/zabbix-postgresql\347\233\221\346\216\247\351\241\271.png" new file mode 100644 index 0000000000000000000000000000000000000000..1a2e6c0dabe30a7ffe8e00d4e3bd82cef0113e40 Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-postgresql\347\233\221\346\216\247\351\241\271.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-web\351\205\215\347\275\256.png" "b/content/zh/post/justbk/img/zabbix-web\351\205\215\347\275\256.png" new file mode 100644 index 0000000000000000000000000000000000000000..fd7508b779a0e7c698f854bcdcf0e6c150ecb712 Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-web\351\205\215\347\275\256.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-windows-agent\347\211\210\346\234\254.png" "b/content/zh/post/justbk/img/zabbix-windows-agent\347\211\210\346\234\254.png" new file mode 100644 index 0000000000000000000000000000000000000000..02729a562a67211a371c8128a3ae6451635d9f6a Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-windows-agent\347\211\210\346\234\254.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-\344\273\252\350\241\250\347\233\230\346\230\276\347\244\272.png" "b/content/zh/post/justbk/img/zabbix-\344\273\252\350\241\250\347\233\230\346\230\276\347\244\272.png" new file mode 100644 index 0000000000000000000000000000000000000000..ce7a730aa7e4fb9afa03ed1d1efb6fb302c14af3 Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-\344\273\252\350\241\250\347\233\230\346\230\276\347\244\272.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-\345\210\233\345\273\272\350\247\246\345\217\221\345\231\250.png" "b/content/zh/post/justbk/img/zabbix-\345\210\233\345\273\272\350\247\246\345\217\221\345\231\250.png" new file mode 100644 index 0000000000000000000000000000000000000000..3df00a439498deb579e16a4bfdb0da6db1984857 Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-\345\210\233\345\273\272\350\247\246\345\217\221\345\231\250.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-\345\210\233\345\273\272\350\247\246\345\217\221\345\231\250\345\212\250\344\275\234.png" "b/content/zh/post/justbk/img/zabbix-\345\210\233\345\273\272\350\247\246\345\217\221\345\231\250\345\212\250\344\275\234.png" new file mode 100644 index 0000000000000000000000000000000000000000..24d1ba7c922a9e0ac581e897869959ec52655cf2 Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-\345\210\233\345\273\272\350\247\246\345\217\221\345\231\250\345\212\250\344\275\234.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-\345\242\236\345\212\240\345\212\250\344\275\234\345\223\215\345\272\224.png" "b/content/zh/post/justbk/img/zabbix-\345\242\236\345\212\240\345\212\250\344\275\234\345\223\215\345\272\224.png" new file mode 100644 index 0000000000000000000000000000000000000000..176a1805af4c28090c9d7b11552794349c332c18 Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-\345\242\236\345\212\240\345\212\250\344\275\234\345\223\215\345\272\224.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-\346\230\276\347\244\272\344\270\273\346\234\272\347\233\221\346\216\247\351\241\271.png" "b/content/zh/post/justbk/img/zabbix-\346\230\276\347\244\272\344\270\273\346\234\272\347\233\221\346\216\247\351\241\271.png" new file mode 100644 index 0000000000000000000000000000000000000000..abf888ce2a02a0cdc5f14a89cc8446544bab6e76 Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-\346\230\276\347\244\272\344\270\273\346\234\272\347\233\221\346\216\247\351\241\271.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-\346\250\241\346\235\277\345\210\233\345\273\272.png" "b/content/zh/post/justbk/img/zabbix-\346\250\241\346\235\277\345\210\233\345\273\272.png" new file mode 100644 index 0000000000000000000000000000000000000000..710d3aa2d2d210a9c14274250b30aaf2b9f21c1f Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-\346\250\241\346\235\277\345\210\233\345\273\272.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-\346\267\273\345\212\240Windows\346\250\241\346\235\277\351\205\215\347\275\256.png" "b/content/zh/post/justbk/img/zabbix-\346\267\273\345\212\240Windows\346\250\241\346\235\277\351\205\215\347\275\256.png" new file mode 100644 index 0000000000000000000000000000000000000000..6bd43103ff360992f5d816d7b63cd7bbd1b528e9 Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-\346\267\273\345\212\240Windows\346\250\241\346\235\277\351\205\215\347\275\256.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-\346\267\273\345\212\240\344\270\273\346\234\272\346\210\220\345\212\237.png" "b/content/zh/post/justbk/img/zabbix-\346\267\273\345\212\240\344\270\273\346\234\272\346\210\220\345\212\237.png" new file mode 100644 index 0000000000000000000000000000000000000000..5a3ec9ce0c052473c254cb5066b1c38706d5a122 Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-\346\267\273\345\212\240\344\270\273\346\234\272\346\210\220\345\212\237.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-\346\267\273\345\212\240\344\270\273\346\234\272\351\205\215\347\275\256.png" "b/content/zh/post/justbk/img/zabbix-\346\267\273\345\212\240\344\270\273\346\234\272\351\205\215\347\275\256.png" new file mode 100644 index 0000000000000000000000000000000000000000..9242f36a6e61f7bc0ed4eca934b8fa0b8f078527 Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-\346\267\273\345\212\240\344\270\273\346\234\272\351\205\215\347\275\256.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-\346\267\273\345\212\240\347\233\221\346\216\247\351\241\271ipconfig.png" "b/content/zh/post/justbk/img/zabbix-\346\267\273\345\212\240\347\233\221\346\216\247\351\241\271ipconfig.png" new file mode 100644 index 0000000000000000000000000000000000000000..e321f9485611b12ca90bd8d07debf597f3e0e666 Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-\346\267\273\345\212\240\347\233\221\346\216\247\351\241\271ipconfig.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-\347\224\250\346\210\267email\351\205\215\347\275\256.png" "b/content/zh/post/justbk/img/zabbix-\347\224\250\346\210\267email\351\205\215\347\275\256.png" new file mode 100644 index 0000000000000000000000000000000000000000..02f3ed34866c5604a1f6abf43e86afd77e729360 Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-\347\224\250\346\210\267email\351\205\215\347\275\256.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-\347\233\221\346\216\247\351\241\271size\345\222\214\345\256\217\351\205\215\347\275\256.png" "b/content/zh/post/justbk/img/zabbix-\347\233\221\346\216\247\351\241\271size\345\222\214\345\256\217\351\205\215\347\275\256.png" new file mode 100644 index 0000000000000000000000000000000000000000..4aa2dc765ec30a53982041969ade732b53e99d8d Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-\347\233\221\346\216\247\351\241\271size\345\222\214\345\256\217\351\205\215\347\275\256.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-\347\233\221\346\216\247\351\241\271status\345\222\214\345\256\217\351\205\215\347\275\256.png" "b/content/zh/post/justbk/img/zabbix-\347\233\221\346\216\247\351\241\271status\345\222\214\345\256\217\351\205\215\347\275\256.png" new file mode 100644 index 0000000000000000000000000000000000000000..594512917b4f3dbf7f91de624291fd08b44d2977 Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-\347\233\221\346\216\247\351\241\271status\345\222\214\345\256\217\351\205\215\347\275\256.png" differ diff --git "a/content/zh/post/justbk/img/zabbix-\347\274\226\350\276\221\350\247\246\345\217\221\345\231\250\350\241\250\350\276\276\345\274\217.png" "b/content/zh/post/justbk/img/zabbix-\347\274\226\350\276\221\350\247\246\345\217\221\345\231\250\350\241\250\350\276\276\345\274\217.png" new file mode 100644 index 0000000000000000000000000000000000000000..c47b935e0fb582e6e26a2657b22c3befe2b43bd3 Binary files /dev/null and "b/content/zh/post/justbk/img/zabbix-\347\274\226\350\276\221\350\247\246\345\217\221\345\231\250\350\241\250\350\276\276\345\274\217.png" differ diff --git "a/content/zh/post/justbk/img/\351\253\230\346\200\247\350\203\275\346\265\213\350\257\225-\347\233\256\345\275\225\345\210\222\345\210\206.png" "b/content/zh/post/justbk/img/\351\253\230\346\200\247\350\203\275\346\265\213\350\257\225-\347\233\256\345\275\225\345\210\222\345\210\206.png" new file mode 100644 index 0000000000000000000000000000000000000000..12dbe5ee87c2a52b632b47d4d8ab2bb2a8d65348 Binary files /dev/null and "b/content/zh/post/justbk/img/\351\253\230\346\200\247\350\203\275\346\265\213\350\257\225-\347\233\256\345\275\225\345\210\222\345\210\206.png" differ diff --git a/content/zh/post/justbk/title/perfermance_openGauss_logo.png b/content/zh/post/justbk/title/perfermance_openGauss_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..a538fcbeb763cc9940418e4b45df392e5ae84383 Binary files /dev/null and b/content/zh/post/justbk/title/perfermance_openGauss_logo.png differ diff --git a/content/zh/post/justbk/title/shardingSphere_logo_v2.png b/content/zh/post/justbk/title/shardingSphere_logo_v2.png new file mode 100644 index 0000000000000000000000000000000000000000..826a03f4250cfdd03789c50ac39b0d6f58c08b5c Binary files /dev/null and b/content/zh/post/justbk/title/shardingSphere_logo_v2.png differ diff --git a/content/zh/post/justbk/title/zabbix_title.png b/content/zh/post/justbk/title/zabbix_title.png new file mode 100644 index 0000000000000000000000000000000000000000..4f0933c5857ae37d26ae4d09a0ec1a7552f3d392 Binary files /dev/null and b/content/zh/post/justbk/title/zabbix_title.png differ diff --git a/content/zh/post/kamus/.gitignore b/content/zh/post/kamus/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..e43b0f988953ae3a84b00331d0ccf5f7d51cb3cf --- /dev/null +++ b/content/zh/post/kamus/.gitignore @@ -0,0 +1 @@ +.DS_Store diff --git a/content/zh/post/kamus/How to manage jobs in openGauss 2.1.0.md b/content/zh/post/kamus/How to manage jobs in openGauss 2.1.0.md new file mode 100644 index 0000000000000000000000000000000000000000..4f00c68fb9fb83fce15f5ed7e6f9c3f121671fe2 --- /dev/null +++ b/content/zh/post/kamus/How to manage jobs in openGauss 2.1.0.md @@ -0,0 +1,218 @@ ++++ +title = "How to manage jobs in openGauss 2.1.0" +date = "2021-10-12" +tags = ["openGauss 2.1.0"] +archives = "2021-10" +author = "Kamus" +summary = "openGauss 2.1.0版本中新增了数据库任务创建,可以在数据库中设定和执行定时任务。" +img = "/zh/post/kamus/title/img-title.png" +times = "21:30" ++++ + +## 创建测试表 +``` +gaussdb@postgres> create table t_job (value TIMESTAMP); +CREATE TABLE + +gaussdb@postgres> insert into t_job values(sysdate); +INSERT 0 1 + +gaussdb@postgres> select * from t_job; ++---------------------+ +| value | +|---------------------| +| 2021-10-09 04:36:20 | ++---------------------+ +SELECT 1 +``` + +## 创建任务,每一分钟插入一条记录 +``` +gaussdb@postgres> select pkg_service.job_submit(null, 'insert into t_job values(sysdate);',sysdate,'sysdate + 1/1440'); ++--------------+ +| job_submit | +|--------------| +| 15566 | ++--------------+ +SELECT 1 +``` + + +## 检查JOB运行结果 +``` +gaussdb@postgres> select * from t_job; ++---------------------+ +| value | +|---------------------| +| 2021-10-09 04:36:20 | +| 2021-10-09 04:40:54 | +| 2021-10-09 04:41:54 | +| 2021-10-09 04:42:54 | ++---------------------+ +SELECT 4 +``` + +## 从系统视图中检查JOB运行情况 +``` +gaussdb@postgres> select job_id,dbname,start_date,next_run_date,interval,failure_count from pg_job; ++----------+----------+----------------------------+---------------------+------------------+-----------------+ +| job_id | dbname | start_date | next_run_date | interval | failure_count | +|----------+----------+----------------------------+---------------------+------------------+-----------------| +| 15566 | postgres | 2021-10-09 04:40:54.072363 | 2021-10-09 04:56:54 | sysdate + 1/1440 | 0 | ++----------+----------+----------------------------+---------------------+------------------+-----------------+ +SELECT 1 +Time: 0.089s +gaussdb@postgres> select * from pg_catalog.pg_job_proc pjp where job_id=15566; ++----------+------------------------------------+ +| job_id | what | +|----------+------------------------------------| +| 15566 | insert into t_job values(sysdate); | ++----------+------------------------------------+ +SELECT 1 +Time: 0.089s +``` + +## 修改为2分钟执行一次 +``` +gaussdb@postgres> select pkg_service.job_update(15566,null,'sysdate + 2/1440',null); ++--------------+ +| job_update | +|--------------| +| | ++--------------+ +SELECT 1 +``` + +## 检查修改情况和运行结果 +``` +[gaussdb@postgres> select job_id,interval from pg_job where job_id=15566; ++----------+------------------+ +| job_id | interval | +|----------+------------------| +| 15566 | sysdate + 2/1440 | ++----------+------------------+ +SELECT 1]( select job_id,interval,next_run_date from pg_job where job_id=15566; ++----------+------------------+---------------------+ +| job_id | interval | next_run_date | +|----------+------------------+---------------------| +| 15566 | sysdate + 2/1440 | 2021-10-09 05:05:57 | ++----------+------------------+---------------------+ +SELECT 1 +Time: 0.078s> +``` + + +## 禁用和启用任务 +禁用和启用都是同样的函数pkg_service.job_finish,传入不同的参数表示是禁用还是启用。 +``` +gaussdb@postgres> select pkg_service.job_finish(15566,true,null); ++--------------+ +| job_finish | +|--------------| +| | ++--------------+ +SELECT 1 +Time: 0.089s +gaussdb@postgres> select job_id,next_run_date,job_status from pg_job where job_id=15566; ++----------+---------------------+--------------+ +| job_id | next_run_date | job_status | +|----------+---------------------+--------------| +| 15566 | 4000-01-01 00:00:00 | d | ++----------+---------------------+--------------+ +SELECT 1 +Time: 0.075s +gaussdb@postgres> select pkg_service.job_finish(15566,false,null); ++--------------+ +| job_finish | +|--------------| +| | ++--------------+ +SELECT 1 +Time: 0.091s +gaussdb@postgres> select job_id,next_run_date,job_status from pg_job where job_id=15566; ++----------+---------------------+--------------+ +| job_id | next_run_date | job_status | +|----------+---------------------+--------------| +| 15566 | 4000-01-01 00:00:00 | s | ++----------+---------------------+--------------+ +SELECT 1 +Time: 0.080s +``` + +可以看到如果重新启用任务的时候,没有指定下次运行时间,那么下次运行时间会始终保持在4000年,意味着仍然不会启动,所以如果禁用任务之后再重新启动,需要手动显式指定下次运行时间。 + +``` +gaussdb@postgres> select pkg_service.job_finish(15566,false,sysdate); ++--------------+ +| job_finish | +|--------------| +| | ++--------------+ +SELECT 1 +Time: 0.088s +gaussdb@postgres> select job_id,next_run_date,job_status from pg_job where job_id=15566; ++----------+---------------------+--------------+ +| job_id | next_run_date | job_status | +|----------+---------------------+--------------| +| 15566 | 2021-10-09 05:16:22 | s | ++----------+---------------------+--------------+ +SELECT 1 +Time: 0.086s +``` + +## 删除任务 +``` +gaussdb@postgres> select pkg_service.job_cancel(15566); ++--------------+ +| job_cancel | +|--------------| +| | ++--------------+ +SELECT 1 +Time: 0.082s +gaussdb@postgres> select job_id,next_run_date,job_status from pg_job where job_id=15566; ++----------+-----------------+--------------+ +| job_id | next_run_date | job_status | +|----------+-----------------+--------------| ++----------+-----------------+--------------+ +SELECT 0 +Time: 0.086s +gaussdb@postgres> select * from pg_catalog.pg_job_proc pjp where job_id=15566; ++----------+--------+ +| job_id | what | +|----------+--------| ++----------+--------+ +SELECT 0 +Time: 0.087s +``` \ No newline at end of file diff --git a/content/zh/post/kamus/title/img-title.png b/content/zh/post/kamus/title/img-title.png new file mode 100644 index 0000000000000000000000000000000000000000..2ddddfa2858d77999b4cfec8e97e4f29ac0cab79 Binary files /dev/null and b/content/zh/post/kamus/title/img-title.png differ diff --git a/content/zh/post/labixiaoxin/title/img.png b/content/zh/post/labixiaoxin/title/img.png new file mode 100644 index 0000000000000000000000000000000000000000..86a420b92fb8289658d807d49f137b6d13862f6d Binary files /dev/null and b/content/zh/post/labixiaoxin/title/img.png differ diff --git "a/content/zh/post/labixiaoxin/\346\225\260\346\215\256\350\204\261\346\225\217.md" "b/content/zh/post/labixiaoxin/\346\225\260\346\215\256\350\204\261\346\225\217.md" new file mode 100644 index 0000000000000000000000000000000000000000..dac8fdcc6de35e02a6d08c77152645f817e00609 --- /dev/null +++ "b/content/zh/post/labixiaoxin/\346\225\260\346\215\256\350\204\261\346\225\217.md" @@ -0,0 +1,31 @@ ++++ + +title = "openGauss预置的脱敏函数" + +date = "2022-05-14" + +tags = ["openGauss社区开发入门"] + +archives = "2022-05" + +author = "labixiaoxin" + +summary = "openGauss社区开发入门" + +img = "/zh/post/xingchen/title/title.jpg" + +times = "19:30" + ++++ + +## 预置的脱敏函数 + +| 脱敏函数名 | 示例 | +|----|----| +| creditcardmasking | ‘4880-9898-4545-2525’ 将会被脱敏为 ‘xxxx-xxxx-xxxx-2525’,该函数仅对后4位之前的数字进行脱敏 +| basicemailmasking |‘abcd@gmail.com’ 将会被脱敏为’xxxx@gmail.com’, 对出现第一个’@’之前的文本进行脱敏 +| fullemailmasking | ‘abcd@gmail.com’ 将会被脱敏为 ‘xxxx@xxxxx.com’,对出现最后一个’.’之前的文本(除’@’符外)进行脱敏 +| alldigitsmasking | ‘alex123alex’ 将会被脱敏为 ‘alex000alex’, 仅对文本中的数字进行脱敏 +| shufflemasking | ‘hello word’ 将会被随机打乱顺序脱敏为 ‘hlwoeor dl’, 该函数通过字符乱序排列的方式实现,属于弱脱敏函数,语义较强的字符串不建议使用该函数脱敏。 +| randommasking |‘hello word’ 将会被脱敏为 ‘ad5f5ghdf5’,将文本按字符随机脱敏 +| maskall | ‘4880-9898-4545-2525’ 将会被脱敏为 ‘xxxxxxxxxxxxxxxxxxx’ \ No newline at end of file diff --git "a/content/zh/post/lihongda/MogDB AI \347\211\271\346\200\247\347\263\273\345\210\2271_X-Tuner.md" "b/content/zh/post/lihongda/MogDB AI \347\211\271\346\200\247\347\263\273\345\210\2271_X-Tuner.md" new file mode 100644 index 0000000000000000000000000000000000000000..0f148827e64435f3951696fbc3a571c3d955edd6 --- /dev/null +++ "b/content/zh/post/lihongda/MogDB AI \347\211\271\346\200\247\347\263\273\345\210\2271_X-Tuner.md" @@ -0,0 +1,575 @@ ++++ + +title = "MogDB AI 特性系列1_X-Tuner" + +date = "2022-04-11" + +tags = ["MogDB AI 特性系列1_X-Tuner"] + +archives = "2022-04" + +author = "李宏达" + +summary = "MogDB AI 特性系列1_X-Tuner" + +img = "/zh/post/lihongda/title/img6.png" + +times = "10:20" + ++++ + +# MogDB AI 特性系列1_X-Tuner + +## 一、概述 + + X-Tuner 是一款数据库集成的参数调优工具,通过结合深度强化学习和全局搜索算法等AI技术,实现在无需人工干预的情况下,获取最佳数据库参数配置。本功能不强制与数据库环境部署到一起,**支持独立部署**,脱离数据库安装环境独立运行。 + +## 二、使用准备 + +**前提条件与使用事项** + +- 数据库状态正常、客户端能够正常连接、且要求数据库内导入数据,以便调优程序可以执行benchmark测试调优效果。 +- 使用本工具需要指定登录到数据库的用户身份,要求该登录到数据库上的用户具有足够的权限,以便可以获得充足的数据库状态信息。 +- 使用登录到数据库宿主机上的Linux用户,需要将$GAUSSHOME/bin添加到PATH环境变量中,即能够直接运行gsql、gs_guc、gs_ctl等数据库运维工具。 +- Python版本建议为Python3.6及以上,且运行环境中已经安装相应依赖,并能够正常启动调优程序。您可以独立安装一个python3.6+的环境,无需设置到全局环境变量中。不建议使用root用户权限安装本工具,如果以root身份安装本完毕工具,使用其他用户身份运行本工具时,需要确保配置文件有读取权限。 +- 本工具支持以三种模式运行,其中tune和train模式要求用户配置好benchmark运行环境,并导入数据,本工具将会通过迭代运行benchmark来判断修改后的参数是否有性能提升。 +- recommend模式建议在数据库正在执行workload的过程中执行,以便获得更准确的实时workload信息。 +- 本工具默认带有TPC-C、TPC-H、TPC-DS以及sysbench的benchmark运行脚本样例,如果用户使用上述benchmark对数据库系统进行压力测试,则可以对上述配置文件进行适度修改或配置。如果需要适配用户自己的业务场景,需要您参照benchmark目录中的template.py文件编写驱动您自定义benchmark的脚本文件。 + +## 三、原理简介 + + 调优程序是一个独立于数据库内核之外的工具,需要提供数据库及其所在实例的用户名和登录密码信息,以便控制数据库执行benchmark进行性能测试;在启动调优程序前,要求用户测试环境交互正常,能够正常跑通benchmark测试脚本、能够正常连接数据库。 + +**X-Tuner运行模式** + +- recommend: 通过用户指定的用户名等信息登录到数据库环境中,获取当前正在运行的workload特征信息,根据上述特征信息生成参数推荐报告。报告当前数据库中不合理的参数配置和潜在风险等;输出根据当前正在运行的workload行为和特征;输出推荐的参数配置。 该模式是秒级的,不涉及数据库的重启操作,其他模式可能需要反复重启数据库。 +- train: 通过用户提供的benchmark信息,不断地进行参数修改和benchmark的执行。通过反复的迭代过程,训练强化学习模型,以便用户在后面通过tune模式加载该模型进行调优。 +- tune: 使用优化算法进行数据库参数的调优,当前支持两大类算法,一种是深度强化学习,另一种是全局搜索算法(全局优化算法)。深度强化学习模式要求先运行train模式,生成训练后的调优模型,而使用全局搜索算法则不需要提前进行训练,可以直接进行搜索调优。 + +**X-Tuner架构图** +![image.png](../figures/20210117-95f2c7cc-a440-4184-853f-747d94f85978.png) + +- DB侧:通过DB_Agent 模块对数据库实例进行抽象,通过该模块可以获取数据库内部的状态信息、当前数据库参数、以及设置数据库参数等。DB侧包括登录数据库环境使用的SSH连接。 +- 算法侧:用于调优的算法包,包括全局搜索算法(如贝叶斯优化、粒子群算法等)和深度强化学习(如DDPG); +- X-Tuner 主体逻辑模块:通过Enviroment模块进行封装,每一个step 就是一次调优过程。整个调优过程通过多个step进行迭代; +- benchmark: 由用户指定的benchmark性能测试脚本,用于运行benchmark作业,通过跑分结果反映数据库系统性能优劣。 + +## 四、X-Tuner安装 + +### 1. 程序位置 + +- 源码位置 + +``` +[root@ecs-saving-0008 xtuner]# pwd /root/openGauss-server/src/gausskernel/dbmind/tools/xtuner +``` + +- 已安装过的数据库 + +``` +[omm@ecs-saving-0001 ~]$ cd $GAUSSHOME/bin/dbmind/xtuner [omm@ecs-saving-0001 xtuner]$ pwd /opengauss/app/1.0.1/bin/dbmind/xtuner +``` + +### 2. 安装依赖 + +```sql +[root@ecs-saving-0008 xtuner]# ls +build dist openGauss_xtuner.egg-info Readme.md requirements.txt setup.py share test tuner +[root@ecs-saving-0008 xtuner]# pip3 install --upgrade pip +[root@ecs-saving-0008 xtuner]# pip3 install -r requirements.txt +[root@ecs-saving-0008 xtuner]# cat requirements.txt +tensorflow>=2.2.0 +keras-rl2 +paramiko +bayesian-optimization +ptable +[root@ecs-saving-0008 xtuner]# pip3 install keras +``` + +默认网站较慢建议学会科学上网,手动解决依赖。 +[Python Package Index](https://pypi.org/) + +### 3. 安装X-Tuner + +```sql +[root@ecs-saving-0008 xtuner]# pwd +/root/openGauss-server/src/gausskernel/dbmind/tools/xtuner +[root@ecs-saving-0008 xtuner]# python3 setup.py install +[root@ecs-saving-0008 xtuner]# gs_xtuner --help +usage: gs_xtuner [-h] [--db-name DB_NAME] [--db-user DB_USER] [--port PORT] + [--host HOST] [--host-user HOST_USER] + [--host-ssh-port HOST_SSH_PORT] [-f DB_CONFIG_FILE] + [-x TUNER_CONFIG_FILE] [-v] + {train,tune,recommend} + +X-Tuner: a self-tuning tool integrated by openGauss. + +positional arguments: + {train,tune,recommend} + Train a reinforcement learning model or tune database + by model. And also can recommend best_knobs according + to your workload. + +optional arguments: + -h, --help show this help message and exit + -f DB_CONFIG_FILE, --db-config-file DB_CONFIG_FILE + You can pass a path of configuration file otherwise + you should enter database information by command + arguments manually. Please see the template file + share/server.json.template. + -x TUNER_CONFIG_FILE, --tuner-config-file TUNER_CONFIG_FILE + This is the path of the core configuration file of the + X-Tuner. You can specify the path of the new + configuration file. The default path is + /usr/local/lib/python3.6/site-packages/openGauss_xtune + r-2.0.0-py3.6.egg/tuner/xtuner.conf. You can modify + the configuration file to control the tuning process. + -v, --version show program's version number and exit + +Database Connection Information: + --db-name DB_NAME The name of database where your workload running on. + --db-user DB_USER Use this user to login your database. Note that the + user must have sufficient permissions. + --port PORT Use this port to connect with the database. + --host HOST The IP address of your database installation host. + --host-user HOST_USER + The login user of your database installation host. + --host-ssh-port HOST_SSH_PORT + The SSH port of your database installation host. +``` + +## 五、文件解读 + +``` +[root@ecs-saving-0008 tools]# pwd +/root/openGauss-server/src/gausskernel/dbmind/tools +[root@ecs-saving-0008 tools]# ls +anomaly_detection index_advisor predictor sqldiag xtuner +``` + +**分别为五个AI功能,后续文章会逐一介绍。** + +- Anomaly-detection:数据库指标采集、预测与异常监控 +- Index-advisor:索引推荐 +- Predictor: AI查询时间预测 +- SQLdiag: 慢SQL发现 +- X-Tuner: 参数调优与诊断 + +``` +[root@ecs-saving-0008 benchmark]# pwd /root/openGauss-server/src/gausskernel/dbmind/tools/xtuner/tuner/benchmark __init__.py Readme.md sysbench.py template.py tpcc.py tpcds.py tpch.py +``` + +**需提前调试好相关程序并生成测试数据** + +- benchmark 模拟压力模型,支持 sysbench.py template.py tpcc.py tpcds.py tpch.py +- 在使用tune和train 模式前,用户需要先导入benchmark所需数据并检查benchmark能否正常跑通,并备份好此时的数据库参数,查询当前数据库参数的方法为:select name, setting from pg_settings; +- 目前tpcc测试程序相关代码有问题,暂时手动模拟压力解决。 + +**配置文件解读** + +``` +[root@ecs-saving-0008 tuner]# cat xtuner.conf |grep -v ^$|grep -v ^# +[Master] +logfile = log/opengauss_tuner.log +output_tuning_result = tuned_knobs.json +verbose = on +recorder_file = log/recorder.log +tune_strategy = auto # rl, gop or auto +drop_cache = on # You must modify the permission of the login user in the /etc/sudoers file and grant the NOPASSWD permission to the user. +used_mem_penalty_term = 1e-9 # Prevent taking up more memory. +[Reinforcement Learning] +rl_algorithm = ddpg # ddpg, dqn. Not support dqn yet. +rl_model_path = model/rl.model +rl_steps = 100 +max_episode_steps = 10 +test_episode = 1 +[Gloabal Optimization Algorithm] +gop_algorithm = bayes # bayes, pso +max_iterations = 100 +particle_nums = 3 # A larger value indicates higher accuracy but slower speed. +[Benchmark] +benchmark_script = tpcc +benchmark_path = '/opt/benchmarksql-5.0/run' # If this parameter is blank, the default path in the benchmark script is used. +benchmark_cmd = "./runBenchmark.sh props.og" # If this parameter is blank, the default cmd in the benchmark script is used. +[Knobs] +scenario = auto # ap, tp, htap or auto +tuning_list = # template: share/knobs.json.template +``` + +- rl_algorithm:用于训练强化学习模型的算法,当前支持设置为ddpg. +- rl_model_path: 训练后生成的强化学习模型保存路径。 +- rl_steps:训练过程的最大迭代步数。 +- max_episode_steps:每个回合的最大步数。 +- scenario: 明确指定的workload类型,如果为auto则为自动判断。在不同模式下,推荐的调优参数列表也不一样。 +- tuning_list: 用户指定需要调哪些参数,如果不指定,则根据workload类型自动推荐应该调的参数列表。 +- tune_strategy: 指定选择哪种算法进行调优,支持rl(使用强化学习模型进行调优)、gop (使用全局搜索算法)以及 aut- (自动选择)。若该参数设置为rl,则rl相关的配置项生效。除前文提到过的train模式下生效的配置项外,test_episode配置项- 生效,该配置项表明调优过程的最大回合数,该参数直接影响了调优过程的执行时间(一般地,数值越大越耗时)。 +- gop_algorithm: 选择何种全局搜索算法,支持bayes以及pso. +- max_iterations: 最大迭代轮次,数值越高搜索时间越长,效果往往越好。 +- particle_nums: 在PSO算法上生效,表示粒子数。 +- scenario 与 tuning_list 见上文 train 模式中的描述。 +- drop_cache = on 需修改/etc/sudoers文件 添加如下 username ALL=(ALL) NOPASSWD: ALL + +## 六、使用示例 + + X-Tuner 支持三种模式,分别是获取参数诊断报告的recommend模式、训练强化学习模型的train模式、以及使用算法进行调优的tune模式。上述三种模式可以通过命令行参数来区别,通过配置文件来指定具体的细节。 + +### 1. 配置数据库连接信息 + +有两种方式 + +- a. 通过命令行执行 + +``` + [root@ecs-saving-0008 xtuner]# gs_xtuner recommend --db-name test1 --db-user test1 --port 26000 --host 192.168.1.XXX --host-user omm -x xtuner.conf +``` + +- b. json配置文件 + +``` +[root@ecs-saving-0008 tuner]# cat connection.json { "db_name": "test1", "db_user": "test1", "host": "192.168.1.xxx", "host_user": "omm", "port": 26000, "ssh_port": 22 } [root@ecs-saving-0008 xtuner]# gs_xtuner recommend -f connection.json -x xtuner.conf +``` + +### 2. recommend 模式 + +``` + +[root@ecs-saving-0008 tuner]# gs_xtuner recommend --db-name test1 --db-user test1 --port 26000 --host 192.168.1.xxx --host-user omm +Please input the password of database: +Please input the password of host: +Start to recommend knobs. Just a moment, please. +******************************************************* Knob Recommendation Report ******************************************************* +INFO: ++---------------------------------------+-----------------------+ +| Metric | Value | ++---------------------------------------+-----------------------+ +| workload_type | tp | +| dirty_background_bytes | 0 | +| temp_file_size | 0 | +| current_locks_count | 0.0 | +| current_prepared_xacts_count | 0.0 | +| average_connection_age | 0.013457 | +| checkpoint_proactive_triggering_ratio | 0.0413987138263666 | +| rollback_commit_ratio | 0.07913229312035018 | +| fetched_returned_ratio | 0.3706085439318767 | +| cache_hit_rate | 0.9780311156824839 | +| current_connections | 1.0 | +| uptime | 1.48835260083333 | +| search_modify_ratio | 119.88027537957866 | +| all_database_size | 1436801.01171875 | +| current_free_mem | 15400044 | +| os_mem_total | 16430884 | +| checkpoint_avg_sync_time | 2.03215434083601 | +| read_write_ratio | 2.1202316810075224 | +| ap_index | 2.1987815358313485 | +| max_processes | 3010 | +| track_activity_size | 3010.0 | +| used_mem | 3155124224.0 | +| write_tup_speed | 374.211175655672 | +| os_cpu_count | 4 | +| checkpoint_dirty_writing_time_window | 720.0 | +| read_tup_speed | 793.422421264566 | +| block_size | 8.0 | +| shared_buffer_toast_hit_rate | 91.9572192513369 | +| shared_buffer_heap_hit_rate | 99.5745962509106 | +| shared_buffer_idx_hit_rate | 99.67849101944427 | +| shared_buffer_tidx_hit_rate | 99.78536585365853 | +| is_hdd | False | +| enable_autovacuum | True | +| is_64bit | True | +| load_average | [0.26, 0.5375, 0.525] | ++---------------------------------------+-----------------------+ +p.s: The unit of storage is kB. +WARN: +[0]. The total size of all databases is less than the memory size. Therefore, it is unnecessary to set shared_buffers to a large value. +[1]. The database runs for a short period of time, and the database description may not be accumulated. The recommendation result may be inaccurate. +[2]. The number of CPU cores is a little small. Please do not run too high concurrency. You are recommended to set max_connections based on the number of CPU cores. If your job does not consume much CPU, you can also increase it. +BAD: +[0]. The value of wal_buffers is too high. Generally, a large value does not bring better performance. +******************************************************** Recommended Knob Settings ******************************************************** ++---------------------------+-----------+--------+----------+---------+ +| name | recommend | min | max | restart | ++---------------------------+-----------+--------+----------+---------+ +| shared_buffers | 179600 | 179600 | 206540 | True | +| max_connections | 100 | 20 | 500 | True | +| effective_cache_size | 179600 | 179600 | 12323163 | False | +| effective_io_concurrency | 200 | 150 | 250 | False | +| wal_buffers | 5612 | 2048 | 5612 | True | +| random_page_cost | 1.0 | 1.0 | 2.0 | False | +| default_statistics_target | 100 | 10 | 150 | False | ++---------------------------+-----------+--------+----------+---------+ + + +``` + +在上述报告中,推荐了该环境上的数据库参数配置,并进行了风险提示。报告同时生成了当前workload的特征信息,其中有几个特征是比较有参考意义的: + +- temp_file_size:产生的临时文件数量,如果该结果大于0,则表明系统使用了临时文件。使用过多的临时文件会导致性能不佳,如果可能的话,需要提高work_mem参数的配置。 +- cache_hit_rate:shared_buffer 的缓存命中率,表明当前workload使用缓存的效率。 +- read_write_ratio:数据库作业的读写比例。 +- search_modify_ratio:数据库作业的查询与修改数据的比例。 +- ap_index:表明当前workload的AP指数,取值范围是0到10,该数值越大,表明越偏向于数据分析与检索。 +- workload_type:根据数据库统计信息,推测当前负载类型,分为AP、TP以及HTAP三种类型。 +- checkpoint_avg_sync_time:数据库在checkpoint 时,平均每次同步刷新数据到磁盘的时长,单位是毫秒。 +- load_average:平均每个CPU核心在1分钟、5分钟以及15分钟内的负载。一般地,该数值在1左右表明当前硬件比较匹配workload、在3左右表明运行当前作业压力比较大,大于5则表示当前硬件环境运行该workload压力过大(此时一般建议减少负载或升级硬件)。 + - recommend 模式会读取数据库中的pg_stat_database以及 pg_stat_bgwriter等系统表中的信息,需要登录到数据库上的用户具有足够的权限(建议为管理员权限,可通过alter user username sysadmin;授予username相应的权限)。 + - 由于某些系统表会一直记录统计信息,这可能会对负载特征识别造成干扰,因此建议最好先清空某些系统表的统计信息,运行一段时间的workload后再使用recommend模式进行诊断,以便获得更准确的结果。清除统计信息的方法为: + - select pg_stat_reset_shared(‘bgwriter’); + - select pg_stat_reset(); + +### 3. train 模式 + +``` +[root@ecs-saving-0008 tuner]# gs_xtuner train -f connection.json -x xtuner.conf +Please input the password of database: +Please input the password of host: +Start to recommend knobs. Just a moment, please. +WARN: The database may restart several times during tuning, continue or not [yes|no]:yes +2021-01-17 15:48:20,864: Recorder is starting. +2021-01-17 15:48:21.423844: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory +2021-01-17 15:48:21.423910: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine. +Adam +2021-01-17 15:48:24.022924: I tensorflow/compiler/jit/xla_cpu_device.cc:41] Not creating XLA devices, tf_xla_enable_xla_devices not set +2021-01-17 15:48:24.023181: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory +2021-01-17 15:48:24.023212: W tensorflow/stream_executor/cuda/cuda_driver.cc:326] failed call to cuInit: UNKNOWN ERROR (303) +2021-01-17 15:48:24.023246: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (ecs-saving-0008): /proc/driver/nvidia/version does not exist +2021-01-17 15:48:24.023773: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX512F +To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags. +2021-01-17 15:48:24.024250: I tensorflow/compiler/jit/xla_gpu_device.cc:99] Not creating XLA devices, tf_xla_enable_xla_devices not set +2021-01-17 15:48:24.052176: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:196] None of the MLIR optimization passes are enabled (registered 0 passes) +2021-01-17 15:48:24.057462: I tensorflow/core/platform/profile_utils/cpu_utils.cc:112] CPU Frequency: 2600000000 Hz +The list of tuned knobs in the training mode based on the reinforcement learning algorithm must be the same as that in the tuning mode. +/usr/local/lib64/python3.6/site-packages/tensorflow/python/keras/engine/training.py:2325: UserWarning: `Model.state_updates` will be removed in a future version. This property should not be used in TensorFlow 2.0, as `updates` are applied automatically. + warnings.warn('`Model.state_updates` will be removed in a future version. ' +2021-01-17 15:48:50,022: [0] Current reward is -3.256027, knobs: {'shared_buffers': '213807'}. +2021-01-17 15:48:50,022: [0] Best reward is -3.256027, knobs: {'shared_buffers': '213807'}. +2021-01-17 15:48:50,022: [1] Database metrics: [0.5051657050852006, 0.975755080057054, 0.2125]. +2021-01-17 15:48:50,023: [1] Benchmark score: -0.103773, used mem: 3152253680 kB, reward: -3.256027. +2021-01-17 15:49:15,193: [1] Current reward is -3.255776, knobs: {'shared_buffers': '228559'}. +2021-01-17 15:49:15,194: [1] Best reward is -3.255776, knobs: {'shared_buffers': '228559'}. +2021-01-17 15:49:15,194: [2] Database metrics: [1.0, 0.9757370052274821, 0.18]. +2021-01-17 15:49:15,194: [2] Benchmark score: -0.103286, used mem: 3152489712 kB, reward: -3.255776. +2021-01-17 15:49:40,384: [2] Current reward is -3.256460, knobs: {'shared_buffers': '213646'}. +2021-01-17 15:49:40,384: [2] Best reward is -3.255776, knobs: {'shared_buffers': '228559'}. +2021-01-17 15:49:40,385: [3] Database metrics: [0.49976519522339996, 0.9757370052274821, 0.12]. +2021-01-17 15:49:40,385: [3] Benchmark score: -0.104209, used mem: 3152251104 kB, reward: -3.256460. +2021-01-17 15:50:05,542: [3] Current reward is -3.256012, knobs: {'shared_buffers': '220718'}. +2021-01-17 15:50:05,542: [3] Best reward is -3.255776, knobs: {'shared_buffers': '228559'}. +2021-01-17 15:50:05,542: [4] Database metrics: [0.7369851066684556, 0.975755080057054, 0.1075]. +2021-01-17 15:50:05,543: [4] Benchmark score: -0.103648, used mem: 3152364256 kB, reward: -3.256012. +2021-01-17 15:50:30,691: [4] Current reward is -3.256337, knobs: {'shared_buffers': '207214'}. +2021-01-17 15:50:30,691: [4] Best reward is -3.255776, knobs: {'shared_buffers': '228559'}. +2021-01-17 15:50:30,691: [5] Database metrics: [0.2840131490674896, 0.975755080057054, 0.0925]. +2021-01-17 15:50:30,692: [5] Benchmark score: -0.104188, used mem: 3152148192 kB, reward: -3.256337. +****************************************************************************************************** Knob Recommendation Report ****************************************************************************************************** +INFO: ++---------------------------------------+------------------------+ +| Metric | Value | ++---------------------------------------+------------------------+ +| workload_type | ap | +| dirty_background_bytes | 0 | +| temp_file_size | 0 | +| current_locks_count | 0.0 | +| current_prepared_xacts_count | 0.0 | +| rollback_commit_ratio | 0.0 | +| write_tup_speed | 0.0 | +| average_connection_age | 0.012783 | +| uptime | 0.101278143888889 | +| fetched_returned_ratio | 0.18264946102405455 | +| checkpoint_proactive_triggering_ratio | 0.195960870937204 | +| cache_hit_rate | 0.9871855024265209 | +| current_connections | 1.0 | +| current_free_mem | 15428064 | +| all_database_size | 1589977.01171875 | +| os_mem_total | 16430884 | +| checkpoint_avg_sync_time | 2.64405175134112 | +| search_modify_ratio | 20120500.0 | +| read_write_ratio | 201205000.0 | +| max_processes | 3010 | +| track_activity_size | 3010.0 | +| used_mem | 3152427664.0 | +| os_cpu_count | 4 | +| read_tup_speed | 419.743636679271 | +| ap_index | 6.7 | +| checkpoint_dirty_writing_time_window | 720.0 | +| shared_buffer_toast_hit_rate | 76.40449438202248 | +| block_size | 8.0 | +| shared_buffer_tidx_hit_rate | 94.7103274559194 | +| shared_buffer_heap_hit_rate | 99.50838310887633 | +| shared_buffer_idx_hit_rate | 99.52046667292409 | +| is_hdd | False | +| enable_autovacuum | True | +| is_64bit | True | +| load_average | [0.255, 0.225, 0.2125] | ++---------------------------------------+------------------------+ +p.s: The unit of storage is kB. +WARN: +[0]. The number of CPU cores is a little small. Please do not run too high concurrency. You are recommended to set max_connections based on the number of CPU cores. If your job does not consume much CPU, you can also increase it. +[1]. The total size of all databases is less than the memory size. Therefore, it is unnecessary to set shared_buffers to a large value. +BAD: +[0]. The value of wal_buffers is too high. Generally, a large value does not bring better performance. +****************************************************************************************************** Recommended Knob Settings ****************************************************************************************************** ++---------------------------+-----------+--------+----------+---------+ +| name | recommend | min | max | restart | ++---------------------------+-----------+--------+----------+---------+ +| shared_buffers | 198747 | 198747 | 228559 | True | +| max_connections | 185 | 15 | 370 | True | +| effective_cache_size | 12323163 | 198747 | 12323163 | False | +| effective_io_concurrency | 200 | 150 | 250 | False | +| wal_buffers | 6210 | 2048 | 6210 | True | +| random_page_cost | 1.0 | 1.0 | 2.0 | False | +| default_statistics_target | 1000 | 100 | 1000 | False | ++---------------------------+-----------+--------+----------+---------+ +``` + +训练完成后,会在配置项rl_model_path指定的目录中生成模型文件。 + +### 4. tune 模式 + +``` +[root@ecs-saving-0008 tuner]# gs_xtuner tune -f connection.json -x xtuner.conf +Please input the password of database: +Please input the password of host: +Start to recommend knobs. Just a moment, please. +WARN: The database may restart several times during tuning, continue or not [yes|no]:yes +2021-01-17 15:52:56,212: Recorder is starting. +| iter | target | effect... | random... | +------------------------------------------------- +2021-01-17 15:52:59,473: [0] Current reward is -3.255687, knobs: {'effective_io_concurrency': '172', 'random_page_cost': '1.34'}. +2021-01-17 15:52:59,473: [0] Best reward is -3.255687, knobs: {'effective_io_concurrency': '172', 'random_page_cost': '1.34'}. +2021-01-17 15:52:59,473: [1] Database metrics: [0.22, 0.3400000000000001, 0.9848556082330464, 0.2575]. +2021-01-17 15:52:59,474: [1] Benchmark score: -0.103539, used mem: 3152148192 kB, reward: -3.255687. +| 1 | -3.256 | 0.223 | 0.3392 | +2021-01-17 15:53:01,612: [1] Current reward is -3.256332, knobs: {'effective_io_concurrency': '171', 'random_page_cost': '1.02'}. +2021-01-17 15:53:01,613: [1] Best reward is -3.255687, knobs: {'effective_io_concurrency': '172', 'random_page_cost': '1.34'}. +2021-01-17 15:53:01,614: [2] Database metrics: [0.21, 0.020000000000000018, 0.9851427720903616, 0.2575]. +2021-01-17 15:53:01,614: [2] Benchmark score: -0.104184, used mem: 3152148192 kB, reward: -3.256332. +| 2 | -3.256 | 0.2139 | 0.01739 | +2021-01-17 15:53:03,664: [2] Current reward is -3.256156, knobs: {'effective_io_concurrency': '226', 'random_page_cost': '1.28'}. +2021-01-17 15:53:03,664: [2] Best reward is -3.255687, knobs: {'effective_io_concurrency': '172', 'random_page_cost': '1.34'}. +2021-01-17 15:53:03,665: [3] Database metrics: [0.76, 0.28, 0.9854192483562304, 0.2575]. +2021-01-17 15:53:03,665: [3] Benchmark score: -0.104007, used mem: 3152148192 kB, reward: -3.256156. +| 3 | -3.256 | 0.7578 | 0.2831 | +2021-01-17 15:53:05,681: [3] Current reward is -3.256023, knobs: {'effective_io_concurrency': '200', 'random_page_cost': '1.35'}. +2021-01-17 15:53:05,682: [3] Best reward is -3.255687, knobs: {'effective_io_concurrency': '172', 'random_page_cost': '1.34'}. +2021-01-17 15:53:05,682: [4] Database metrics: [0.5, 0.3500000000000001, 0.9856856227825624, 0.2575]. +2021-01-17 15:53:05,682: [4] Benchmark score: -0.103875, used mem: 3152148192 kB, reward: -3.256023. +| 4 | -3.256 | 0.5007 | 0.345 | +2021-01-17 15:53:07,728: [4] Current reward is -3.255765, knobs: {'effective_io_concurrency': '158', 'random_page_cost': '1.29'}. +2021-01-17 15:53:07,728: [4] Best reward is -3.255687, knobs: {'effective_io_concurrency': '172', 'random_page_cost': '1.34'}. +2021-01-17 15:53:07,728: [5] Database metrics: [0.08, 0.29000000000000004, 0.9859424390850301, 0.2575]. +2021-01-17 15:53:07,729: [5] Benchmark score: -0.103617, used mem: 3152148192 kB, reward: -3.255765. +| 5 | -3.256 | 0.08186 | 0.2852 | +2021-01-17 15:53:09,892: [5] Current reward is -3.255949, knobs: {'effective_io_concurrency': '163', 'random_page_cost': '1.47'}. +2021-01-17 15:53:09,892: [5] Best reward is -3.255687, knobs: {'effective_io_concurrency': '172', 'random_page_cost': '1.34'}. +2021-01-17 15:53:09,892: [6] Database metrics: [0.13, 0.47, 0.9861902026474977, 0.275]. +2021-01-17 15:53:09,892: [6] Benchmark score: -0.103801, used mem: 3152148192 kB, reward: -3.255949. +| 6 | -3.256 | 0.1274 | 0.4671 | +2021-01-17 15:53:12,043: [6] Current reward is -3.256464, knobs: {'effective_io_concurrency': '181', 'random_page_cost': '1.43'}. +2021-01-17 15:53:12,043: [6] Best reward is -3.255687, knobs: {'effective_io_concurrency': '172', 'random_page_cost': '1.34'}. +2021-01-17 15:53:12,043: [7] Database metrics: [0.31, 0.42999999999999994, 0.9864293838414936, 0.275]. +2021-01-17 15:53:12,044: [7] Benchmark score: -0.104316, used mem: 3152148192 kB, reward: -3.256464. +| 7 | -3.256 | 0.3105 | 0.4275 | +2021-01-17 15:53:14,118: [7] Current reward is -3.256466, knobs: {'effective_io_concurrency': '164', 'random_page_cost': '1.31'}. +2021-01-17 15:53:14,118: [7] Best reward is -3.255687, knobs: {'effective_io_concurrency': '172', 'random_page_cost': '1.34'}. +2021-01-17 15:53:14,118: [8] Database metrics: [0.14, 0.31000000000000005, 0.9866604210066019, 0.295]. +2021-01-17 15:53:14,119: [8] Benchmark score: -0.104318, used mem: 3152148192 kB, reward: -3.256466. +| 8 | -3.256 | 0.138 | 0.3143 | +2021-01-17 15:53:16,193: [8] Current reward is -3.255741, knobs: {'effective_io_concurrency': '185', 'random_page_cost': '1.79'}. +2021-01-17 15:53:16,193: [8] Best reward is -3.255687, knobs: {'effective_io_concurrency': '172', 'random_page_cost': '1.34'}. +2021-01-17 15:53:16,194: [9] Database metrics: [0.35, 0.79, 0.9868837231315075, 0.295]. +2021-01-17 15:53:16,194: [9] Benchmark score: -0.103592, used mem: 3152148192 kB, reward: -3.255741. +| 9 | -3.256 | 0.3464 | 0.7892 | +2021-01-17 15:53:18,292: [9] Current reward is -3.255511, knobs: {'effective_io_concurrency': '155', 'random_page_cost': '1.83'}. +2021-01-17 15:53:18,293: [9] Best reward is -3.255511, knobs: {'effective_io_concurrency': '155', 'random_page_cost': '1.83'}. +2021-01-17 15:53:18,293: [10] Database metrics: [0.05, 0.8300000000000001, 0.9870996722701926, 0.295]. +2021-01-17 15:53:18,293: [10] Benchmark score: -0.103362, used mem: 3152148192 kB, reward: -3.255511. +| 10 | -3.256 | 0.05099 | 0.8299 | +2021-01-17 15:53:20,388: [10] Current reward is -3.255719, knobs: {'effective_io_concurrency': '183', 'random_page_cost': '1.58'}. +2021-01-17 15:53:20,388: [10] Best reward is -3.255511, knobs: {'effective_io_concurrency': '155', 'random_page_cost': '1.83'}. +2021-01-17 15:53:20,389: [11] Database metrics: [0.33, 0.5800000000000001, 0.9873086257233148, 0.31]. +2021-01-17 15:53:20,389: [11] Benchmark score: -0.103571, used mem: 3152148192 kB, reward: -3.255719. +| 11 | -3.256 | 0.332 | 0.5833 | +2021-01-17 15:53:22,476: [11] Current reward is -3.255922, knobs: {'effective_io_concurrency': '224', 'random_page_cost': '1.8'}. +2021-01-17 15:53:22,477: [11] Best reward is -3.255511, knobs: {'effective_io_concurrency': '155', 'random_page_cost': '1.83'}. +2021-01-17 15:53:22,478: [12] Database metrics: [0.74, 0.8, 0.9875109180109677, 0.31]. +2021-01-17 15:53:22,478: [12] Benchmark score: -0.103774, used mem: 3152148192 kB, reward: -3.255922. +| 12 | -3.256 | 0.7379 | 0.7979 | +2021-01-17 15:53:24,562: [12] Current reward is -3.256171, knobs: {'effective_io_concurrency': '246', 'random_page_cost': '1.9'}. +2021-01-17 15:53:24,562: [12] Best reward is -3.255511, knobs: {'effective_io_concurrency': '155', 'random_page_cost': '1.83'}. +2021-01-17 15:53:24,562: [13] Database metrics: [0.96, 0.8999999999999999, 0.9877068626597331, 0.325]. +2021-01-17 15:53:24,562: [13] Benchmark score: -0.104023, used mem: 3152148192 kB, reward: -3.256171. +| 13 | -3.256 | 0.9551 | 0.9002 | +2021-01-17 15:53:26,676: [13] Current reward is -3.255921, knobs: {'effective_io_concurrency': '199', 'random_page_cost': '1.35'}. +2021-01-17 15:53:26,677: [13] Best reward is -3.255511, knobs: {'effective_io_concurrency': '155', 'random_page_cost': '1.83'}. +2021-01-17 15:53:26,677: [14] Database metrics: [0.49, 0.3500000000000001, 0.9878967538241067, 0.325]. +2021-01-17 15:53:26,677: [14] Benchmark score: -0.103773, used mem: 3152148192 kB, reward: -3.255921. +| 14 | -3.256 | 0.4858 | 0.3522 | +2021-01-17 15:53:28,763: [14] Current reward is -3.255747, knobs: {'effective_io_concurrency': '153', 'random_page_cost': '1.83'}. +2021-01-17 15:53:28,764: [14] Best reward is -3.255511, knobs: {'effective_io_concurrency': '155', 'random_page_cost': '1.83'}. +2021-01-17 15:53:28,764: [15] Database metrics: [0.03, 0.8300000000000001, 0.9880808677599346, 0.325]. +2021-01-17 15:53:28,764: [15] Benchmark score: -0.103598, used mem: 3152148192 kB, reward: -3.255747. +| 15 | -3.256 | 0.02704 | 0.8322 | +================================================= +2021-01-17 15:53:28,765: The tuning process is complete. The best reward is -3.255511, best knobs are: +{'effective_io_concurrency': '155', 'random_page_cost': '1.83'}. +****************************************************************************************************** Knob Recommendation Report ****************************************************************************************************** +INFO: ++---------------------------------------+------------------------+ +| Metric | Value | ++---------------------------------------+------------------------+ +| workload_type | ap | +| dirty_background_bytes | 0 | +| temp_file_size | 0 | +| current_locks_count | 0.0 | +| current_prepared_xacts_count | 0.0 | +| rollback_commit_ratio | 0.0 | +| write_tup_speed | 0.0 | +| average_connection_age | 0.012993 | +| uptime | 0.0202690916666667 | +| fetched_returned_ratio | 0.15769242371706166 | +| checkpoint_proactive_triggering_ratio | 0.203501094091904 | +| cache_hit_rate | 0.9780674962419049 | +| current_connections | 1.0 | +| current_free_mem | 15532000 | +| all_database_size | 1593746.21484375 | +| os_mem_total | 16430884 | +| checkpoint_avg_sync_time | 2.64145045326665 | +| max_processes | 3010 | +| track_activity_size | 3010.0 | +| used_mem | 3152148192.0 | +| os_cpu_count | 4 | +| ap_index | 6.7 | +| shared_buffer_toast_hit_rate | 63.41463414634146 | +| checkpoint_dirty_writing_time_window | 720.0 | +| block_size | 8.0 | +| search_modify_ratio | 8616900.0 | +| read_write_ratio | 86169000.0 | +| shared_buffer_tidx_hit_rate | 91.66666666666667 | +| read_tup_speed | 917.793974728196 | +| shared_buffer_idx_hit_rate | 99.49449740875606 | +| shared_buffer_heap_hit_rate | 99.54367629484318 | +| is_hdd | False | +| enable_autovacuum | True | +| is_64bit | True | +| load_average | [0.1125, 0.1625, 0.19] | ++---------------------------------------+------------------------+ +p.s: The unit of storage is kB. +WARN: +[0]. The total size of all databases is less than the memory size. Therefore, it is unnecessary to set shared_buffers to a large value. +[1]. The number of CPU cores is a little small. Please do not run too high concurrency. You are recommended to set max_connections based on the number of CPU cores. If your job does not consume much CPU, you can also increase it. +BAD: +[0]. The value of wal_buffers is too high. Generally, a large value does not bring better performance. +****************************************************************************************************** Recommended Knob Settings ****************************************************************************************************** ++---------------------------+-----------+--------+----------+---------+ +| name | recommend | min | max | restart | ++---------------------------+-----------+--------+----------+---------+ +| random_page_cost | 1.83 | 1.0 | 2.0 | False | +| effective_io_concurrency | 155 | 150 | 250 | False | +| shared_buffers | 199218 | 199218 | 229101 | True | +| max_connections | 184 | 15 | 369 | True | +| effective_cache_size | 12323163 | 199218 | 12323163 | False | +| wal_buffers | 6225 | 2048 | 6225 | True | +| default_statistics_target | 1000 | 100 | 1000 | False | ++---------------------------+-----------+--------+----------+---------+ + +``` diff --git "a/content/zh/post/lihongda/MogDB-openGauss-\346\211\213\345\212\250\351\203\250\347\275\262(\351\235\236OM\345\267\245\345\205\267)\345\215\225\346\234\272-\344\270\273\345\244\207-\344\270\273\345\244\207\347\272\247\350\201\224\346\236\266\346\236\204.md" "b/content/zh/post/lihongda/MogDB-openGauss-\346\211\213\345\212\250\351\203\250\347\275\262(\351\235\236OM\345\267\245\345\205\267)\345\215\225\346\234\272-\344\270\273\345\244\207-\344\270\273\345\244\207\347\272\247\350\201\224\346\236\266\346\236\204.md" new file mode 100644 index 0000000000000000000000000000000000000000..c213656fbc77ca2014e52012d2900fc0124d3d92 --- /dev/null +++ "b/content/zh/post/lihongda/MogDB-openGauss-\346\211\213\345\212\250\351\203\250\347\275\262(\351\235\236OM\345\267\245\345\205\267)\345\215\225\346\234\272-\344\270\273\345\244\207-\344\270\273\345\244\207\347\272\247\350\201\224\346\236\266\346\236\204.md" @@ -0,0 +1,399 @@ ++++ + +title = "MogDB/openGauss 手动部署(非OM工具)单机,主备,主备级联架构" + +date = "2021-06-29" + +tags = ["MogDB/openGauss 手动部署(非OM工具)单机,主备,主备级联架构"] + +archives = "2021-06" + +author = "李宏达" + +summary = "MogDB/openGauss 手动部署(非OM工具)单机,主备,主备级联架构" + +img = "/zh/post/lihongda/title/title.png" + +times = "19:30" + ++++ + +# **MogDB/openGauss 手动部署(非OM工具)单机,主备,主备级联架构** + +## 一、前期准备 + +1. 关闭防火墙,selinux + + ``` + systemctl disable firewalld.service + systemctl stop firewalld.service + setenforce=0 + sed -i '/^SELINUX=/c'SELINUX=disabled /etc/selinux/config + ``` + +2. 安装依赖包 + + ``` + yum install libaio-devel -y + ``` + +3. 创建相关目录,用户,组 + + ``` + groupadd dbgrp -g 2000 + useradd omm -g 2000 + -u 2000 echo "Enmo@123" | passwd --stdin omm + mkdir -p /opt/mogdb/software + chown -R omm:dbgrp /opt/ + ``` + +4. 上传并解压二进制文件 + + ``` + [root@mogdb-kernel-0001 software]# pwd + /opt/mogdb/software + [root@mogdb-kernel-0001 software]# ls -lrt + total 90236 + -r-------- 1 root root 92401412 Jun 13 06:14 MogDB-2.0.0-openEuler-64bit.tar.bz2 + chown omm:dbgrp MogDB-2.0.0-openEuler-64bit.tar.bz2 + su - omm + cd /opt/mogdb/software/ + tar -xf MogDB-2.0.0-openEuler-64bit.tar.bz2 + ``` + + +## 二、初始化数据库(单机) + +1. 配置环境变量 + + ``` + echo "export GAUSSHOME=/opt/mogdb/software" >> /home/omm/.bashrc && + \ echo "export PATH=\$GAUSSHOME/bin:\$PATH " >> /home/omm/.bashrc && + \ echo "export LD_LIBRARY_PATH=\$GAUSSHOME/lib:\$LD_LIBRARY_PATH" >> /home/omm/.bashrc + source /home/omm/.bashrc + ``` + +2. init数据库 + + ``` + bin/gs_initdb --pgdata=/opt/mogdb/data --nodename=primary --pwpasswd=Enmo@123 --encoding=UTF-8 --locale=en_US.UTF-8 + ``` + +3. 修改初始化参数 + + ``` + echo "port=26000" >> /opt/mogdb/data/postgresql.conf + echo "listen_addresses = '0.0.0.0'" >> /opt/mogdb/data/postgresql.conf + echo "password_encryption_type = 0" >> /opt/mogdb/data/postgresql.conf + echo "log_directory = 'pg_log'" >> /opt/mogdb/data/postgresql.conf + echo "remote_read_mode=non_authentication" >> /opt/mogdb/data/postgresql.conf + echo "host all all 0.0.0.0/0 md5" >> /opt/mogdb/data/pg_hba.conf + ``` + +4. 启动数据库 + + ``` + gs_ctl start -D /opt/mogdb/data + ``` + + +**至此单机安装完成** + +## 三、主备安装 + +1. 主库操作 + + - 配置连接通道 + + ``` + echo "replconninfo1='localhost=172.16.0.106 localport=26001 localheartbeatport=26005 localservice=26004 remotehost=172.16.0.245 remoteport=26001 remoteheartbeatport=26005 remoteservice=26004' " >> /opt/mogdb/data/postgresql.conf + ``` + + + **localhost为主库IP,remotehost为备库IP** + + - 将主库以primary方式启动 + + ``` + gs_ctl restart -D /opt/mogdb/data/ -M primary + ``` + + +2. 备库操作 + + - 前期准备工作如上(一) + - 配置环境变量 + + ``` + echo "export GAUSSHOME=/opt/mogdb/software" >> /home/omm/.bashrc && \ + echo "export PATH=\$GAUSSHOME/bin:\$PATH " >> /home/omm/.bashrc && \ + echo "export LD_LIBRARY_PATH=\$GAUSSHOME/lib:\$LD_LIBRARY_PATH" >> /home/omm/.bashrc + source /home/omm/.bashrc + - 将主库的配置文件传到备库 + scp /opt/mogdb/data/pg_hba.conf /opt/mogdb/data/postgresql.conf 172.16.0.245:/opt/mogdb/data/ + ``` + + + - 配置连接通道,将localhost和remotehost对调 + + ``` + .sed -i “/^replconninfo1/creplconninfo1=‘localhost=172.16.0.245 localport=26001 localheartbeatport=26005 localservice=26004 remotehost=172.16.0.106 remoteport=26001 remoteheartbeatport=26005 remoteservice=26004’” /opt/mogdb/data/postgresql.conf + ``` + + + **localhost为备库IP,remotehost为主库IP** + + - 构建主备关系 + + ``` + gs_ctl build -D /opt/mogdb/data/ -b full -M standby + ``` + + + - 查询主备状态 + - 主库 + + ``` + [omm@mogdb-kernel-0001 data]$ gs_ctl query -D /opt/mogdb/data/ + [2021-06-13 07:51:41.119][159054][][gs_ctl]: gs_ctl query ,datadir is /opt/mogdb/data + HA state: + local_role : Primary + static_connections : 1 + db_state : Normal + detail_information : Normal + + Senders info: + sender_pid : 159041 + local_role : Primary + peer_role : Standby + peer_state : Normal + state : Streaming + sender_sent_location : 0/14000258 + sender_write_location : 0/14000258 + sender_flush_location : 0/14000258 + sender_replay_location : 0/14000258 + receiver_received_location : 0/14000258 + receiver_write_location : 0/14000258 + receiver_flush_location : 0/14000258 + receiver_replay_location : 0/14000258 + sync_percent : 100% + sync_state : Sync + sync_priority : 1 + sync_most_available : Off + channel : 172.16.0.106:26001-->172.16.0.245:60856 + + Receiver info: + No information + ``` + + + - 备库 + + ``` + [omm@mogdb-kernel-0002 data]$ gs_ctl query -D /opt/mogdb/data/ + [2021-06-13 07:51:32.743][123204][][gs_ctl]: gs_ctl query ,datadir is /opt/mogdb/data + HA state: + local_role : Standby + static_connections : 1 + db_state : Normal + detail_information : Normal + + Senders info: + No information + Receiver info: + receiver_pid : 123194 + local_role : Standby + peer_role : Primary + peer_state : Normal + state : Normal + sender_sent_location : 0/14000140 + sender_write_location : 0/14000140 + sender_flush_location : 0/14000140 + sender_replay_location : 0/14000140 + receiver_received_location : 0/14000140 + receiver_write_location : 0/14000140 + receiver_flush_location : 0/14000140 + receiver_replay_location : 0/14000140 + sync_percent : 100% + channel : 172.16.0.245:60856<--172.16.0.106:26001 + ``` + + + + +**至此主备已安装完成** + +## 四、主备级联安装 + +1. 主备安装如上\(一,二,三\) +2. 添加复制通道 + - 主库操作 + + ``` + gsql -d postgres -p26000 -c “alter system set replconninfo2 to ‘localhost=172.16.0.106 localport=26001 localheartbeatport=26005 localservice=26004 remotehost=172.16.0.127 remoteport=26001 remoteheartbeatport=26005 remoteservice=26004 iscascade=true’;” + ``` + + - 备库操作 + + ``` + gsql -d postgres -p26000 -c “alter system set replconninfo2 to ‘localhost=172.16.0.245 localport=26001 localheartbeatport=26005 localservice=26004 remotehost=172.16.0.127 remoteport=26001 remoteheartbeatport=26005 remoteservice=26004 iscascade=true’;” + ``` + + +3. 级联库操作 + + - 前期准备工作如上(一) + - 配置环境变量 + + ``` + echo "export GAUSSHOME=/opt/mogdb/software" >> /home/omm/.bashrc && \ + echo "export PATH=\$GAUSSHOME/bin:\$PATH " >> /home/omm/.bashrc && \ + echo "export LD_LIBRARY_PATH=\$GAUSSHOME/lib:\$LD_LIBRARY_PATH" >> /home/omm/.bashrc + source /home/omm/.bashrc + ``` + + + - 将备库的配置文件传到备库 + + ``` + scp /opt/mogdb/data/pg_hba.conf /opt/mogdb/data/postgresql.conf 172.16.0.245:/opt/mogdb/data/ + ``` + + + - 配置连接通道 + + ``` + sed -i "/^replconninfo1/creplconninfo1='localhost=172.16.0.127 localport=26001 localheartbeatport=26005 localservice=26004 remotehost=172.16.0.106 remoteport=26001 remoteheartbeatport=26005 remoteservice=26004'" /opt/mogdb/data/postgresql.conf + sed -i "/replconninfo2/creplconninfo2='localhost=172.16.0.127 localport=26001 localheartbeatport=26005 localservice=26004 remotehost=172.16.0.245 remoteport=26001 remoteheartbeatport=26005 remoteservice=26004'" /opt/mogdb/data/postgresql.conf + ``` + + + **localhost为级联IP,remotehost为主库IP和备库IP。** + + - 构建主备关系 + + ``` + gs_ctl build -D /opt/mogdb/data/ -b full -M cascade_standby + ``` + + +4. 查看主备级联状态 + + - 主库 + + ``` + [omm@mogdb-kernel-0001 ~]$ gs_ctl query -D /opt/mogdb/data + [2021-06-13 08:37:03.281][207069][][gs_ctl]: gs_ctl query ,datadir is /opt/mogdb/data + HA state: + local_role : Primary + static_connections : 2 + db_state : Normal + detail_information : Normal + + Senders info: + sender_pid : 206143 + local_role : Primary + peer_role : Standby + peer_state : Normal + state : Streaming + sender_sent_location : 0/1A000140 + sender_write_location : 0/1A000140 + sender_flush_location : 0/1A000140 + sender_replay_location : 0/1A000140 + receiver_received_location : 0/1A000140 + receiver_write_location : 0/1A000140 + receiver_flush_location : 0/1A000140 + receiver_replay_location : 0/1A000140 + sync_percent : 100% + sync_state : Sync + sync_priority : 1 + sync_most_available : Off + channel : 172.16.0.106:26001-->172.16.0.245:34586 + + Receiver info: + No information + ``` + + - 备库 + + ``` + [omm@mogdb-kernel-0002 ~]$ gs_ctl query -D /opt/mogdb/data + [2021-06-13 08:37:09.128][147065][][gs_ctl]: gs_ctl query ,datadir is /opt/mogdb/data + HA state: + local_role : Standby + static_connections : 2 + db_state : Normal + detail_information : Normal + + Senders info: + sender_pid : 147043 + local_role : Standby + peer_role : Cascade Standby + peer_state : Normal + state : Streaming + sender_sent_location : 0/1A000140 + sender_write_location : 0/1A000140 + sender_flush_location : 0/1A000140 + sender_replay_location : 0/1A000140 + receiver_received_location : 0/1A000140 + receiver_write_location : 0/1A000140 + receiver_flush_location : 0/1A000140 + receiver_replay_location : 0/1A000140 + sync_percent : 100% + sync_state : Async + sync_priority : 0 + sync_most_available : Off + channel : 172.16.0.245:26001-->172.16.0.127:49110 + + Receiver info: + receiver_pid : 146771 + local_role : Standby + peer_role : Primary + peer_state : Normal + state : Normal + sender_sent_location : 0/1A000140 + sender_write_location : 0/1A000140 + sender_flush_location : 0/1A000140 + sender_replay_location : 0/1A000140 + receiver_received_location : 0/1A000140 + receiver_write_location : 0/1A000140 + receiver_flush_location : 0/1A000140 + receiver_replay_location : 0/1A000140 + sync_percent : 100% + channel : 172.16.0.245:34586<--172.16.0.106:26001 + ``` + + - 级联库 + + ``` + [omm@mogdb-kernel-0003 data]$ gs_ctl query -D /opt/mogdb/data + [2021-06-13 08:36:56.223][273241][][gs_ctl]: gs_ctl query ,datadir is /opt/mogdb/data + HA state: + local_role : Cascade Standby + static_connections : 2 + db_state : Normal + detail_information : Normal + + Senders info: + No information + Receiver info: + receiver_pid : 273237 + local_role : Cascade Standby + peer_role : Standby + peer_state : Normal + state : Normal + sender_sent_location : 0/1A000140 + sender_write_location : 0/1A000140 + sender_flush_location : 0/1A000140 + sender_replay_location : 0/1A000140 + receiver_received_location : 0/1A000140 + receiver_write_location : 0/1A000140 + receiver_flush_location : 0/1A000140 + receiver_replay_location : 0/1A000140 + sync_percent : 100% + channel : 172.16.0.127:49110<--172.16.0.245:26001 + ``` + + **至此主备级联安装完成** + + diff --git "a/content/zh/post/lihongda/MogDB_openGauss \347\224\237\346\200\201\345\267\245\345\205\267-MTK(Migration ToolKit) \346\225\260\346\215\256\345\272\223\350\277\201\347\247\273.md" "b/content/zh/post/lihongda/MogDB_openGauss \347\224\237\346\200\201\345\267\245\345\205\267-MTK(Migration ToolKit) \346\225\260\346\215\256\345\272\223\350\277\201\347\247\273.md" new file mode 100644 index 0000000000000000000000000000000000000000..3a9c003e9f3f2b1aaff1e89dcc5c57dcde60c87b --- /dev/null +++ "b/content/zh/post/lihongda/MogDB_openGauss \347\224\237\346\200\201\345\267\245\345\205\267-MTK(Migration ToolKit) \346\225\260\346\215\256\345\272\223\350\277\201\347\247\273.md" @@ -0,0 +1,183 @@ ++++ + +title = "MogDB_openGauss 生态工具-MTK(Migration ToolKit) 数据库迁移" + +date = "2021-07-09" + +tags = ["MogDB_openGauss 生态工具-MTK(Migration ToolKit) 数据库迁移接"] + +archives = "2021-07" + +author = "李宏达" + +summary = "MogDB_openGauss 生态工具-MTK(Migration ToolKit) 数据库迁移" + +img = "/zh/post/lihongda/title/title.png" + +times = "12:30" + ++++ + +# MogDB_openGauss 生态工具-MTK(Migration ToolKit) 数据库迁移 + +

一、准备环境

+ +

1. 源库创建(Oracle)

+
    +
  • 创建Oracle 11.2.0.4
  • +
+ +``` +ocker pull registry.cn-hangzhou.aliyuncs.com/lhrbest/oracle_11g_ee_lhr_11.2.0.4:1.0 +docker run -itd --name oracle -h oracle --privileged=true -p 1521:1521 -p 222:22 -p 1158:1158 lhrbest/oracle_11g_ee_lhr_11.2.0.4:1.0 init +``` +

MTK程序迁移Oracle需要安装Oracle客户端

+
    +
  • 安装Oracle客户端
  • +
+ +``` +wget https://download.oracle.com/otn_software/linux/instantclient/211000/oracle-instantclient-basic-21.1.0.0.0-1.x86_64.rpm +wget https://download.oracle.com/otn_software/linux/instantclient/211000/oracle-instantclient-sqlplus-21.1.0.0.0-1.x86_64.rpm +rpm -ivh oracle-instantclient-basic-21.1.0.0.0-1.x86_64.rpm oracle-instantclient-sqlplus-21.1.0.0.0-1.x86_64.rpm +export LD_LIBRARY_PATH=/usr/lib/oracle/21/client64/lib +``` +

2. 目标库创建(MogDB)

+ +

二、迁移

+

1. 上传程序,编写配置文件

+

迁移为Oracle下的scott用户

+
    +
  • 编写配置文件
  • +
+ +``` +[root@mogdb-kernel-0005 mtk]# cat mtk_config.json +{ + "source": { + "type": "oracle", + "connect": { + "version": "", + "host": "172.16.0.176", + "user": "system", + "port": 1521, + "password": "system", + "dbName": "LHR11G", + "dsn": "" + }, + "parameter": { + "debugTest": false + } + }, + "target": { + "type": "openGauss", + "connect": { + "version": "", + "host": "172.16.0.106", + "user": "mogdb", + "port": 26000, + "password": "Enmo@123", + "dbName": "mtk", + "dsn": "" + }, + "parameter": { + "dropExistingObject": true, + "truncTable": true, + "ignoreTableDDLCompErr": true, + "parallelInsert": 1 + } + }, + "limit": { + "parallel": 2 + }, + "object": { + "schemas": [ + "SCOTT" + ] + }, + "dataOnly": false , + "schemaOnly": false, + "reportFile": "./report_Oracle2OpenGauss_all.html" +} +``` + + +

2 迁移

+
    +
  • 执行迁移命令
  • +
+ +``` +[root@mogdb-kernel-0005 mtk]# ./mtk -c mtk_config.json --reportFile mtk_report.html --logfile mtk_report.log --debug +''''' +----------------------- +ObjectName Type Summary +----------------------- + ++--------------+---------------------+---------------------+------------+--------+-------------+------------+ +| Type | StartTime | EndTime | Time | Status | Success Num | Failed Num | ++--------------+---------------------+---------------------+------------+--------+-------------+------------+ +| Schema | 2021-06-15 12:00:46 | 2021-06-15 12:00:46 | 18 ms | finish | 1 | 0 | +| ObjectType | 2021-06-15 12:00:46 | 2021-06-15 12:00:47 | 450 ms | finish | 0 | 0 | +| Domain | 2021-06-15 12:00:47 | 2021-06-15 12:00:47 | 0 ms | finish | 0 | 0 | +| CustomType | 2021-06-15 12:00:47 | 2021-06-15 12:00:47 | 0 ms | finish | 0 | 0 | +| Sequence | 2021-06-15 12:00:47 | 2021-06-15 12:00:47 | 5 ms | finish | 0 | 0 | +| Queue | 2021-06-15 12:00:47 | 2021-06-15 12:00:47 | 0 ms | finish | 0 | 0 | +| Table | 2021-06-15 12:00:47 | 2021-06-15 12:00:47 | 335 ms | finish | 4 | 0 | +| TableDDLCom | 2021-06-15 12:00:47 | 2021-06-15 12:00:47 | 0 ms | finish | 0 | 0 | +| TableData | 2021-06-15 12:00:47 | 2021-06-15 12:00:49 | 2 s 45 ms | finish | 4 | 0 | +| Constraint | 2021-06-15 12:00:49 | 2021-06-15 12:00:49 | 445 ms | finish | 3 | 0 | +| Index | 2021-06-15 12:00:49 | 2021-06-15 12:00:51 | 1 s 894 ms | finish | 0 | 0 | +| Trigger | 2021-06-15 12:00:51 | 2021-06-15 12:00:51 | 0 ms | finish | 0 | 0 | +| View | 2021-06-15 12:00:51 | 2021-06-15 12:00:51 | 86 ms | finish | 0 | 0 | +| TableDataCom | 2021-06-15 12:00:51 | 2021-06-15 12:00:52 | 39 ms | finish | 4 | 0 | ++--------------+---------------------+---------------------+------------+--------+-------------+------------+ + +------------------ +Table Data Summary +------------------ + ++----------------+----------------+---------------------+---------------------+-----------+---------+-------------+-------------+-------------+------+ +| SrcName | TgtName | StartTime | EndTime | Time | Status | Select Rows | Insert Rows | Ignore Rows | Size | ++----------------+----------------+---------------------+---------------------+-----------+---------+-------------+-------------+-------------+------+ +| SCOTT.EMP | SCOTT.EMP | 2021-06-15 12:00:47 | 2021-06-15 12:00:48 | 1 s 18 ms | succeed | 14 | 14 | 0 | 773 | +| SCOTT.SALGRADE | SCOTT.SALGRADE | 2021-06-15 12:00:47 | 2021-06-15 12:00:48 | 1 s 29 ms | succeed | 5 | 5 | 0 | 44 | +| SCOTT.DEPT | SCOTT.DEPT | 2021-06-15 12:00:48 | 2021-06-15 12:00:49 | 1 s 17 ms | succeed | 4 | 4 | 0 | 68 | +| SCOTT.BONUS | SCOTT.BONUS | 2021-06-15 12:00:48 | 2021-06-15 12:00:49 | 1 s 16 ms | succeed | 0 | 0 | 0 | 0 | ++----------------+----------------+---------------------+---------------------+-----------+---------+-------------+-------------+-------------+------+ + +----------------------------- +Table Data Comparison Summary +----------------------------- + ++----------------+----------------+---------------------+-------+---------+------------+------------+-------+ +| SrcName | TgtName | StartTime | Time | Status | SourceRows | TargetRows | Error | ++----------------+----------------+---------------------+-------+---------+------------+------------+-------+ +| SCOTT.EMP | SCOTT.EMP | 2021-06-15 12:00:51 | 1 ms | succeed | 14 | 14 | | +| SCOTT.SALGRADE | SCOTT.SALGRADE | 2021-06-15 12:00:51 | 39 ms | succeed | 5 | 5 | | +| SCOTT.DEPT | SCOTT.DEPT | 2021-06-15 12:00:51 | 1 ms | succeed | 4 | 4 | | +| SCOTT.BONUS | SCOTT.BONUS | 2021-06-15 12:00:51 | 1 ms | succeed | 0 | 0 | | ++----------------+----------------+---------------------+-------+---------+------------+------------+-------+ +``` + + +
  • 日志截图 +
      +
    • +

      主界面
      +1.png

      +
    • +
    • +

      对象
      +2.png

      +
    • +
    • +

      表数据
      +3.png

      +
    • +
    +
  • + +
    diff --git a/content/zh/post/lihongda/figures/20210117-95f2c7cc-a440-4184-853f-747d94f85978.png b/content/zh/post/lihongda/figures/20210117-95f2c7cc-a440-4184-853f-747d94f85978.png new file mode 100644 index 0000000000000000000000000000000000000000..b8cbaa2a50255bcff72461eb018cc69896c67c1c Binary files /dev/null and b/content/zh/post/lihongda/figures/20210117-95f2c7cc-a440-4184-853f-747d94f85978.png differ diff --git "a/content/zh/post/lihongda/openGauss\344\277\256\346\224\271\346\234\215\345\212\241\345\231\250IP.md" "b/content/zh/post/lihongda/openGauss\344\277\256\346\224\271\346\234\215\345\212\241\345\231\250IP.md" new file mode 100644 index 0000000000000000000000000000000000000000..1a82761aea3fb6dd89c38dfe39e20412d9a3b3a1 --- /dev/null +++ "b/content/zh/post/lihongda/openGauss\344\277\256\346\224\271\346\234\215\345\212\241\345\231\250IP.md" @@ -0,0 +1,174 @@ ++++ + +title = "openGauss修改服务器IP" + +date = "2022-04-02" + +tags = ["openGauss修改服务器IP"] + +archives = "2022-04" + +author = "李宏达" + +summary = "openGauss修改服务器IP" + +img = "/zh/post/lihongda/title/img39.png" + +times = "10:21" + ++++ + +# openGauss修改服务器IP + +## 一 、测试环境概述 + +### 1. 机器配置 + +- 配置截图 + +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20220120-4af715e1-5acd-480e-bb55-ebb6ca39db6b.png) + +- 两台华为云ECS,kc1.xlarge.4,规格4c/16g,openEuler 20.03系统。 + +## 二、安装openGauss + +略 + +## 三 、修改内网地址 + +### 1. 修改ECS IP + +- 修改IP前要解绑NAT,关闭服务器。 + +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20220120-a7023299-d549-4938-bb5c-7ad7c1072ec7.png) + +- 修改IP + +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20220120-66f736f3-7f49-49b0-bfda-0d1c5f24609e.png) + +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20220120-23eab59b-c7cd-4c57-a26f-769891339c0e.png) + +- 另一台同样 + +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20220120-c3c39dfb-97db-4ce1-9056-aee9fea6f66b.png) + +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20220120-5843c6be-af18-4615-838c-1ffb31ff7d10.png) + +### 2. 开机 + +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20220120-5c40c9ab-4c7f-46de-bfc8-f025a4429e4c.png) + +### 3. 恢复NAT + +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20220120-a6dc3156-5f15-45a0-ac2b-28c58ecff3d5.png) + +![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20220120-e8c9fb70-ed6c-4482-9218-794eb98e40b5.png) + +## 四、数据库端操作 + +### 1. 直接启动数据库 + +- 发现报错 + +```s +[omm@ecs-0001 ~]$ gs_om -t start +Starting cluster. +========================================= +[GAUSS-51400] : Failed to execute the command: scp ecs-0002:/appdata/app/opengauss_f892ccb7/bin/cluster_dynamic_config /appdata/app/opengauss_f892ccb7/bin/cluster_dynamic_config_ecs-0002. Error: +ssh: connect to host ecs-0002 port 22: No route to host +``` + +### 2. 修改配置文件 + +- postgresql.conf +- pg_hba.conf +- clusterconfig.xml (用于生成static configuration) +- /etc/hosts + +```s +[root@ecs-0001 ~]# sed -i 's/192.168.0.10/192.168.0.30/g' /appdata/data/postgresql.conf /appdata/data/pg_hba.conf /opt/software/opengauss/clusterconfig.xml /etc/hosts +[root@ecs-0001 ~]# sed -i 's/192.168.0.20/192.168.0.40/g' /appdata/data/postgresql.conf /appdata/data/pg_hba.conf /opt/software/opengauss/clusterconfig.xml /etc/hosts +[root@ecs-0002 ~]# sed -i 's/192.168.0.10/192.168.0.30/g' /appdata/data/postgresql.conf /appdata/data/pg_hba.conf /opt/software/opengauss/clusterconfig.xml /etc/hosts +[root@ecs-0002 ~]# sed -i 's/192.168.0.20/192.168.0.40/g' /appdata/data/postgresql.conf /appdata/data/pg_hba.conf /opt/software/opengauss/clusterconfig.xml /etc/hosts +``` + +### 3. 生成集群文件并发送到备库 + +- 自动发送到备库 + +```s +[omm@ecs-0001 ~]$ gs_om -t generateconf -X /opt/software/opengauss/clusterconfig.xml --distribute +Generating static configuration files for all nodes. +Creating temp directory to store static configuration files. +Successfully created the temp directory. +Generating static configuration files. +Successfully generated static configuration files. +Static configuration files for all nodes are saved in /appdata/app/tools/script/static_config_files. +Distributing static configuration files to all nodes. +Successfully distributed static configuration files. +``` + +### 4. 启动数据库验证 + +- 主库启动 + +```s +[omm@ecs-0001 ~]$ gs_om -t start +Starting cluster. +========================================= +[SUCCESS] ecs-0001 +2022-01-20 12:45:15.721 [unknown] [unknown] localhost 281457640472592 0 0 [BACKEND] WARNING: Failed to initialize the memory protect for g_instance.attr.attr_storage.cstore_buffers (16 Mbytes) or shared memory (8004 Mbytes) is larger. +[SUCCESS] ecs-0002 +2022-01-20 12:45:18.071 [unknown] [unknown] localhost 281465901482000 0 0 [BACKEND] WARNING: Failed to initialize the memory protect for g_instance.attr.attr_storage.cstore_buffers (16 Mbytes) or shared memory (8004 Mbytes) is larger. +========================================= +Successfully started. +``` + +- 备库查看状态 + +```s +[omm@ecs-0002 ~]$ gs_om -t status --all +----------------------------------------------------------------------- + +cluster_state : Normal +redistributing : No + +----------------------------------------------------------------------- + +node : 1 +node_name : ecs-0001 +instance_id : 6001 +node_ip : 192.168.0.30 +data_path : /appdata/data +type : Datanode +instance_state : Normal +az_name : AZ1 +static_connections : 1 +HA_state : Normal +instance_role : Primary + +----------------------------------------------------------------------- + +node : 2 +node_name : ecs-0002 +instance_id : 6002 +node_ip : 192.168.0.40 +data_path : /appdata/data +type : Datanode +instance_state : Normal +az_name : AZ1 +instance_role : Standby +HA_state : Streaming +sender_sent_location : 0/452D3E8 +sender_write_location : 0/452D3E8 +sender_flush_location : 0/452D3E8 +sender_replay_location : 0/452D3E8 +receiver_received_location: 0/452D3E8 +receiver_write_location : 0/452D3E8 +receiver_flush_location : 0/452D3E8 +receiver_replay_location : 0/452D3E8 +sync_percent : 100% +sync_state : Sync + +----------------------------------------------------------------------- +``` \ No newline at end of file diff --git "a/content/zh/post/lizhenxu/MogDB\345\255\246\344\271\240\347\254\224\350\256\260\344\271\213 -- \344\272\206\350\247\243pagewriter\347\272\277\347\250\213.md" "b/content/zh/post/lizhenxu/MogDB\345\255\246\344\271\240\347\254\224\350\256\260\344\271\213 -- \344\272\206\350\247\243pagewriter\347\272\277\347\250\213.md" new file mode 100644 index 0000000000000000000000000000000000000000..f1ed94a0518710b0a0208c8fc805889f71d132f8 --- /dev/null +++ "b/content/zh/post/lizhenxu/MogDB\345\255\246\344\271\240\347\254\224\350\256\260\344\271\213 -- \344\272\206\350\247\243pagewriter\347\272\277\347\250\213.md" @@ -0,0 +1,241 @@ ++++ + +title = "MogDB学习笔记之 -- 了解pagewriter线程" + +date = "2022-04-13" + +tags = ["MogDB学习笔记之 -- 了解pagewriter线程"] + +archives = "2022-04" + +author = "李真旭" + +summary = "MogDB学习笔记之 -- 了解pagewriter线程" + +img = "/zh/post/lizhenxu/title/img6.png" + +times = "10:20" ++++ + +# MogDB学习笔记之 -- 了解pagewriter线程 + +本文出处:https://www.modb.pro/db/183172 + +在前面的MogDB学习系列中,我们了解了核心的bgwriter进程,今天继续来学习另外一个主要的线程,即pagewriter;首先来看下数据库相关的参数设置: + +```sql +postgres=# select name,setting,category,context from pg_settings where name like '%pagewrit%'; + name | setting | category | context +-----------------------+---------+-------------------------------------+------------ + log_pagewriter | off | Reporting and Logging / What to Log | sighup + pagewriter_sleep | 2000 | Write-Ahead Log / Checkpoints | sighup + pagewriter_thread_num | 2 | Write-Ahead Log / Checkpoints | postmaster +(3 rows) +``` + +从上面的参数来看,我们可以知道pagewriter线程的数量由参数 pagewriter_thread_num来控制;默认情况下一共有2个pagewriter线程。 +其中一个是master主线程。从MogDB官方文档来看,pagewriter主要负责从全局脏页队列中获取脏页,然后将其写入double write文件。由于有多个pagewriter线程, +那么是如何工作和协调的呢? 毫无疑问,是主线程扫描到需要写入的脏页后,将其分发个其他pagewriter线程,最终写入文件系统落盘。 + +其次从另外一个参数pagewriter_sleep参数来看,表示pagewriter线程的唤醒睡眠时间,单位是ms。这跟增量检查点有关。也就是说该参数 +设置后,pagewirter线程会间隔2s(默认值)开始扫描脏页并进行刷新,这同时也推进了数据库检查点。 + +不过这里需要注意的是,如果当shared_buffers中的脏页过多,页比例达到dirty_page_percent_max设置时,每次刷新脏页的的数量将会更大; +将会根据max_io_capacity 来进行计算。 + +接下来我们简单做一下测试,跟踪一下pagewriter线程,观察一下相关的操作,是否如上面所讲: + +```sql + +[omm@mogdb ~]$ ps -ef|grep mogdb|grep -v grep +avahi 9129 1 0 01:02 ? 00:00:03 avahi-daemon: running [mogdb.local] +omm 14421 1 99 05:05 pts/1 00:05:46 /data/mogdb/bin/mogdb -D /data/mogdb_b75b585a/data/db1 +[omm@mogdb ~]$ ps -T -p 14421 + PID SPID TTY TIME CMD + 14421 14421 pts/1 00:00:02 mogdb + 14421 14422 pts/1 00:00:00 jemalloc_bg_thd + 14421 14425 pts/1 00:00:00 mogdb + 14421 14426 pts/1 00:00:00 syslogger + 14421 14427 pts/1 00:00:00 jemalloc_bg_thd + 14421 14428 pts/1 00:00:00 alarm + 14421 14429 pts/1 00:00:00 jemalloc_bg_thd + 14421 14430 pts/1 00:00:00 reaper + 14421 14431 pts/1 00:00:00 jemalloc_bg_thd + 14421 14456 pts/1 00:00:00 checkpointer + 14421 14457 pts/1 00:00:01 pagewriter + 14421 14460 pts/1 00:00:00 pagewriter + 14421 14461 pts/1 00:00:00 bgwriter + 14421 14462 pts/1 00:00:00 bgwriter + 14421 14463 pts/1 00:00:00 CBMwriter + 14421 14464 pts/1 00:04:20 WALwriter + 14421 14465 pts/1 00:00:00 WALwriteraux + 14421 14466 pts/1 00:00:00 AVClauncher + 14421 14467 pts/1 00:00:00 Jobscheduler + 14421 14468 pts/1 00:00:00 statscollector + 14421 14469 pts/1 00:00:00 snapshotworker + 14421 14470 pts/1 00:01:24 percentworker + 14421 14471 pts/1 00:00:02 ashworker + 14421 14472 pts/1 00:00:00 TrackStmtWorker + 14421 14473 pts/1 00:00:00 auditor + 14421 14474 pts/1 00:00:00 2pccleaner + 14421 14475 pts/1 00:00:00 faultmonitor + 14421 14487 pts/1 00:00:00 worker + +``` + +下面创建一些测试表来进行一些探索。 + +```sql +enmotech=# create table test1123 as select * from pg_settings; +INSERT 0 601 +enmotech=# insert into test1123 select * from test1123; +INSERT 0 601 +enmotech=# insert into test1123 select * from test1123; +INSERT 0 1202 +enmotech=# insert into test1123 select * from test1123; +INSERT 0 2404 +enmotech=# insert into test1123 select * from test1123; +INSERT 0 4808 +enmotech=# insert into test1123 select * from test1123; +INSERT 0 9616 +enmotech=# insert into test1123 select * from test1123; +INSERT 0 19232 +enmotech=# insert into test1123 select * from test1123; +INSERT 0 38464 +enmotech=# select pg_relation_filepath('test1123'); + pg_relation_filepath +---------------------- + base/16423/16453 +(1 row) + +enmotech=# insert into test1123 select * from test1123; +INSERT 0 76928 +enmotech=# +enmotech=# vacuum test1123; +VACUUM +enmotech=# vacuum test1123; +VACUUM +enmotech=# vacuum test1123; +VACUUM +enmotech=# SELECT OID,relname FROM pg_class where OID=16456; + oid | relname +-------+---------------- + 16456 | pg_toast_16453 +(1 row) + + +复制 +``` + +这时候我们打开strace 对page write线程做一个跟踪。 + +```sql + +[omm@mogdb ~]$ strace -fr -o /tmp/14457.log -p 14457 +strace: Process 14457 attached with 28 threads +strace: Process 14625 attached +strace: Process 14626 attached +strace: Process 14627 attached +strace: Process 14628 attached +strace: Process 14637 attached +strace: Process 14638 attached + strace: Process 14639 attached +^Cstrace: Process 14457 detached +strace: Process 14421 detached +strace: Process 14422 detached +strace: Process 14425 detached +strace: Process 14426 detached +strace: Process 14427 detached +strace: Process 14428 detached +strace: Process 14429 detached +strace: Process 14430 detached +strace: Process 14431 detached +strace: Process 14456 detached +strace: Process 14460 detached +strace: Process 14461 detached +strace: Process 14462 detached +strace: Process 14463 detached +strace: Process 14464 detached +strace: Process 14465 detached +strace: Process 14466 detached +strace: Process 14467 detached +strace: Process 14468 detached +strace: Process 14469 detached +strace: Process 14470 detached +strace: Process 14471 detached +strace: Process 14472 detached +strace: Process 14473 detached +strace: Process 14474 detached +strace: Process 14475 detached +strace: Process 14487 detached +``` + +这里我跟踪了多次,包括在进行vcauum操作时。 + +获取相关操作文件的句柄信息: + +``` +[root@mogdb fd]# ls -ltr +total 0 +l-wx------. 1 omm dbgrp 64 Nov 23 05:09 2 -> pipe:[130481] +lrwx------. 1 omm dbgrp 64 Nov 23 05:11 97 -> /data/mogdb_b75b585a/data/db1/base/16423/14707 +lrwx------. 1 omm dbgrp 64 Nov 23 05:11 96 -> /data/mogdb_b75b585a/data/db1/base/16423/14706 +lrwx------. 1 omm dbgrp 64 Nov 23 05:11 95 -> /data/mogdb_b75b585a/data/db1/base/16423/16458 +lrwx------. 1 omm dbgrp 64 Nov 23 05:11 94 -> /data/mogdb_b75b585a/data/db1/base/16423/16456 +lrwx------. 1 omm dbgrp 64 Nov 23 05:11 93 -> /data/mogdb_b75b585a/data/db1/base/16423/14737 +lrwx------. 1 omm dbgrp 64 Nov 23 05:11 92 -> /data/mogdb_b75b585a/data/db1/base/16423/14737_fsm +lrwx------. 1 omm dbgrp 64 Nov 23 05:11 91 -> /data/mogdb_b75b585a/data/db1/base/16423/14692 +lrwx------. 1 omm dbgrp 64 Nov 23 05:11 90 -> /data/mogdb_b75b585a/data/db1/base/16423/14692_fsm +lrwx------. 1 omm dbgrp 64 Nov 23 05:11 9 -> socket:[130470] +lrwx------. 1 omm dbgrp 64 Nov 23 05:11 89 -> /data/mogdb_b75b585a/data/db1/base/16423/14703_fsm +...... +lr-x------. 1 omm dbgrp 64 Nov 23 05:11 41 -> pipe:[129754] +lr-x------. 1 omm dbgrp 64 Nov 23 05:11 40 -> pipe:[130495] +l-wx------. 1 omm dbgrp 64 Nov 23 05:11 4 -> /var/log/mogdb/omm/bin/gs_obs/gs_obs.interface.log +lr-x------. 1 omm dbgrp 64 Nov 23 05:11 39 -> pipe:[130490] +l-wx------. 1 omm dbgrp 64 Nov 23 05:11 38 -> pipe:[129753] +lr-x------. 1 omm dbgrp 64 Nov 23 05:11 37 -> pipe:[129753] +l-wx------. 1 omm dbgrp 64 Nov 23 05:11 36 -> pipe:[129752] +l-wx------. 1 omm dbgrp 64 Nov 23 05:11 35 -> pipe:[129756] +lr-x------. 1 omm dbgrp 64 Nov 23 05:11 34 -> pipe:[129756] +l-wx------. 1 omm dbgrp 64 Nov 23 05:11 33 -> pipe:[130486] +l-wx------. 1 omm dbgrp 64 Nov 23 05:11 32 -> pipe:[129751] +lr-x------. 1 omm dbgrp 64 Nov 23 05:11 31 -> pipe:[129751] +lr-x------. 1 omm dbgrp 64 Nov 23 05:11 30 -> pipe:[130486] +l-wx------. 1 omm dbgrp 64 Nov 23 05:11 3 -> /data/mogdb_b75b585a/data/db1/pg_ctl.lock +lr-x------. 1 omm dbgrp 64 Nov 23 05:11 29 -> pipe:[129752] +lrwx------. 1 omm dbgrp 64 Nov 23 05:11 28 -> /data/mogdb_b75b585a/data/db1/pg_cbm/pg_xlog_1_0000000009000258_0000000000000000.cbm +lrwx------. 1 omm dbgrp 64 Nov 23 05:11 27 -> /data/mogdb_b75b585a/data/db1/global/pg_dw_single +lrwx------. 1 omm dbgrp 64 Nov 23 05:11 26 -> /data/mogdb_b75b585a/data/db1/global/pg_dw + + +[root@mogdb tmp]# cat 14457_2.log |grep 14457|grep "pwrite64(" |awk '{print $3}'|sort|uniq +pwrite64(26, +pwrite64(74, +pwrite64(77, +[root@mogdb tmp]# +[root@mogdb tmp]# cat 14457.log |grep 14457|grep "pwrite64(" |awk '{print $3}'|sort|uniq +pwrite64(26, +pwrite64(77, +[root@mogdb tmp]# cat 14457.log |grep 14460|grep "pwrite64(" |awk '{print $3}'|sort|uniq +pwrite64(77, +pwrite64(94, +[root@mogdb tmp]# cat 14457_2.log |grep 14460|grep "pwrite64(" |awk '{print $3}'|sort|uniq +pwrite64(77, +pwrite64(96, +[root@mogdb tmp]# +``` + +其中26号文件是double writer文件。另外发现pagewriter线程还会写其他文件,比如94号文件,查下发现是如下对象: + +``` +enmotech=# SELECT OID,relname FROM pg_class where OID=16456; + oid | relname +-------+---------------- + 16456 | pg_toast_16453 +(1 row) +``` + +看到这个pg_toast 表还是非常奇怪,查询了相关材料发现,这是PostgreSQL特有的机制之一。对于PostgreSQL而言,页是数据在文件存储中的基本单位,默认大小为8192 byte。同时,PostgreSQL不允许一行数据跨页存储,那么对于超长的行数据,就会启动TOAST,具体就是采用压缩和切片的方式。如果启用了切片,实际数据存储在另一张系统表的多个行中, +这就叫TOAST表,这种存储方式叫行外存储。由于MogDB沿用了opengauss内核,而openGauss内核又是基于PostgreSQL 9.2.4 进化而来,因此不难看出,这仍然是用了原生PostgreSQL的一些机制。最后简单总结一下pagewriter线程的作用:1、扫描share_buffers中的脏页链表,获取脏页,同时将脏页写入到double write文件。 +2、推进检查点(实际上是增量检查点). diff --git "a/content/zh/post/lizhenxu/MogDB\345\255\246\344\271\240\347\254\224\350\256\260\347\263\273\345\210\227 -- \344\275\277\347\224\250gs_restore\345\244\207\344\273\275\346\201\242\345\244\215\345\267\245\345\205\267.md" "b/content/zh/post/lizhenxu/MogDB\345\255\246\344\271\240\347\254\224\350\256\260\347\263\273\345\210\227 -- \344\275\277\347\224\250gs_restore\345\244\207\344\273\275\346\201\242\345\244\215\345\267\245\345\205\267.md" new file mode 100644 index 0000000000000000000000000000000000000000..9f79902a53307644e21391798863b349fecbfb44 --- /dev/null +++ "b/content/zh/post/lizhenxu/MogDB\345\255\246\344\271\240\347\254\224\350\256\260\347\263\273\345\210\227 -- \344\275\277\347\224\250gs_restore\345\244\207\344\273\275\346\201\242\345\244\215\345\267\245\345\205\267.md" @@ -0,0 +1,318 @@ ++++ + +title = "MogDB学习笔记系列 -- 使用gs_restore备份恢复工具" + +date = "2022-04-18" + +tags = ["MogDB学习笔记系列 -- 使用gs_restore备份恢复工具"] + +archives = "2022-04" + +author = "李真旭" + +summary = "MogDB学习笔记系列 -- 使用gs_restore备份恢复工具" + +img = "/zh/post/lizhenxu/title/img6.png" + +times = "10:20" ++++ + +# MogDB学习笔记系列 -- 使用gs_restore备份恢复工具 + +本文出处:https://www.modb.pro/db/183831 + +前面学习了MogDB的备份工具gs_dump,主要用于逻辑备份,其中还有对应的逻辑恢复工具gs_restore。这里来跟大家一起学习。 + +```sql +[omm@mogdb bin]$ ./gs_restore --help +gs_restore restores a MogDB database from an archive created by gs_dump. + +Usage: + gs_restore [OPTION]... FILE + +General options: + -d, --dbname=NAME connect to database name + -f, --file=FILENAME output file name + -F, --format=c|d|t backup file format (should be automatic) + -l, --list print summarized TOC of the archive + -v, --verbose verbose mode + -V, --version output version information, then exit + -?, --help show this help, then exit + +Options controlling the restore: + -a, --data-only restore only the data, no schema + -c, --clean clean (drop) database objects before recreating + -C, --create create the target database + -e, --exit-on-error exit on error, default is to continue + -I, --index=NAME restore named index(s) + -j, --jobs=NUM use this many parallel jobs to restore + -L, --use-list=FILENAME use table of contents from this file for + selecting/ordering output + -n, --schema=NAME restore only objects in this schema(s) + -O, --no-owner skip restoration of + object ownership + -P, --function=NAME(args) restore named function(s) + -s, --schema-only restore only the schema, no data + -S, --sysadmin=NAME system admin user name to use for disabling triggers + -t, --table=NAME restore named table(s) + -T, --trigger=NAME restore named trigger(s) + -x, --no-privileges/--no-acl skip restoration of access privileges (grant/revoke) + -1, --single-transaction restore as a single transaction + --disable-triggers disable triggers during data-only restore + --no-data-for-failed-tables do not restore data of tables that could not be + created + --no-security-labels do not restore security labels + --no-tablespaces do not restore tablespace assignments + --section=SECTION restore named section (pre-data, data, or post-data) + --use-set-session-authorization use SET SESSION AUTHORIZATION commands instead of + ALTER OWNER commands to set ownership + +Connection options: + -h, --host=HOSTNAME database server host or socket directory + -p, --port=PORT database server port number + -U, --username=NAME connect as specified database user + -w, --no-password never prompt for password + -W, --password=PASSWORD the password of specified database user + --role=ROLENAME do SET ROLE before restore + --rolepassword=ROLEPASSWORD the password for role +[omm@mogdb bin]$ + +``` + +从上面的介绍信息来看,gs_restore 也支持多种粒度的还原操作。这里来进行相关测试。 + +#### 准备测试表 + +```sql +[omm@mogdb ~]$ gsql -d enmotech -p26000 +gsql ((MogDB 2.0.0 build b75b585a) compiled at 2021-05-28 17:20:47 commit 0 last mr ) +NOTICE : The password has been expired, please change the password. +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +enmotech=# select count(1) from db2mogdb; + count +------- + 4 +(1 row) + +enmotech=# + +``` + +#### 备份整个database + +```sql +[omm@mogdb ~]$ gs_dump -p 26000 -U test -W test@1234 enmotech -f enmotech_20211201.tar -F t +gs_dump[port='26000'][enmotech][2021-12-01 16:24:15]: The total objects number is 388. +gs_dump[port='26000'][enmotech][2021-12-01 16:24:15]: [100.00%] 388 objects have been dumped. +gs_dump[port='26000'][enmotech][2021-12-01 16:24:15]: dump database enmotech successfully +gs_dump[port='26000'][enmotech][2021-12-01 16:24:15]: total time: 384 ms +[omm@mogdb ~]$ +``` + +#### 模拟误删除表 + +```sql +[omm@mogdb ~]$ gsql -d enmotech -p26000 +gsql ((MogDB 2.0.0 build b75b585a) compiled at 2021-05-28 17:20:47 commit 0 last mr ) +NOTICE : The password has been expired, please change the password. +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +enmotech=# drop table db2mogdb; +DROP TABLE +enmotech=# \q +``` + +#### 通过gs_restore进行还原操作 + +```sql +[omm@mogdb ~]$ gs_restore enmotech_20211201.tar -d enmotech -p26000 -Utest -W test@1234 +start restore operation ... +table db2mogdb complete data imported ! +Finish reading 8 SQL statements! +end restore operation ... +restore operation successful +total time: 13 ms +[omm@mogdb ~]$ +[omm@mogdb ~]$ +``` + +#### 检查数据恢复是否成功 + +```sql +[omm@mogdb ~]$ gsql -d enmotech -p26000 -Utest -W test@1234 +gsql ((MogDB 2.0.0 build b75b585a) compiled at 2021-05-28 17:20:47 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +enmotech=> \dt + List of relations + Schema | Name | Type | Owner | Storage +--------+----------+-------+-------+---------------------------------- + public | db2mogdb | table | test | {orientation=row,compression=no} +(1 row) + +enmotech=> select count(1) from db2mogdb; + count +------- + 4 +(1 row) + +enmotech=> +``` + +可以看到通过gs_restore成功恢复了我们前面模拟drop table的表。 + +那么对对于truncate table操作呢?已经存在的对象,数据被清空,恢复理论上一样,如下: + +```sql +[omm@mogdb ~]$ gsql -d enmotech -p26000 +gsql ((MogDB 2.0.0 build b75b585a) compiled at 2021-05-28 17:20:47 commit 0 last mr ) +NOTICE : The password has been expired, please change the password. +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +enmotech=# truncate table db2mogdb; +TRUNCATE TABLE +enmotech=# \q +[omm@mogdb ~]$ gs_restore enmotech_20211201.tar -d enmotech -p26000 -Utest -W test@1234 +start restore operation ... +Error while PROCESSING TOC: +Error from TOC entry 468; 1259 16522 TABLE db2mogdb test +could not execute query: ERROR: relation "db2mogdb" already exists + Command was: CREATE TABLE db2mogdb ( + age integer +) +WITH (orientation=row, compression=no); + + + +table db2mogdb complete data imported ! +Finish reading 8 SQL statements! +end restore operation ... +WARNING: errors ignored on restore: 1 +restore operation successful +total time: 17 ms +[omm@mogdb ~]$ +[omm@mogdb ~]$ gsql -d enmotech -p26000 +gsql ((MogDB 2.0.0 build b75b585a) compiled at 2021-05-28 17:20:47 commit 0 last mr ) +NOTICE : The password has been expired, please change the password. +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +enmotech=# select count(1) from db2mogdb; + count +------- + 4 +(1 row) + +enmotech=# +``` + +除了对于database级别备份恢复之外,gs_restore也支持schema级别、表级别的还原操作,如下再次进行一些测试: + +#### 创建测试schema + +```sql +enmotech=# create schema roger; +CREATE SCHEMA +enmotech=# +enmotech=# create table roger.test1201 as select * from db2mogdb; +INSERT 0 4 +enmotech=# insert into roger.test1201 select * from roger.test1201; +INSERT 0 4 +...... +enmotech=# insert into roger.test1201 select * from roger.test1201; +INSERT 0 32768 +enmotech=# insert into roger.test1201 select * from roger.test1201; +INSERT 0 65536 +enmotech=# insert into roger.test1201 select * from roger.test1201; +INSERT 0 131072 +enmotech=# select count(1) from roger.test1201; + count +-------- + 262144 +(1 row) +``` + +#### 备份整个测试库enmotech + +```sql + +[omm@mogdb ~]$ gs_dump -p 26000 -U test -W test@1234 enmotech -f enmotech_all.tar -F t +gs_dump[port='26000'][enmotech][2021-12-01 16:39:56]: The total objects number is 391. +gs_dump[port='26000'][enmotech][2021-12-01 16:39:56]: [100.00%] 391 objects have been dumped. +gs_dump[port='26000'][enmotech][2021-12-01 16:39:56]: dump database enmotech successfully +gs_dump[port='26000'][enmotech][2021-12-01 16:39:56]: total time: 430 ms +[omm@mogdb ~]$ +``` + +#### 删除schema + +``` +enmotech=# drop schema roger CASCADE; NOTICE: drop cascades to table roger.test1201 DROP SCHEMA enmotech=# \q +``` + +#### 从整个database 备份中恢复单个schema + +```sql + +[omm@mogdb ~]$ gs_restore enmotech_all.tar -d enmotech -n roger -p26000 -Utest -W test@1234 +start restore operation ... +table test1201 complete data imported ! +Finish reading 11 SQL statements! +end restore operation ... +restore operation successful +total time: 120 ms +[omm@mogdb ~]$ + + +[omm@mogdb ~]$ gsql -d enmotech -p26000 +gsql ((MogDB 2.0.0 build b75b585a) compiled at 2021-05-28 17:20:47 commit 0 last mr ) +NOTICE : The password has been expired, please change the password. +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +enmotech=# \dn + List of schemas + Name | Owner +-------------+------- + cstore | omm + dbe_perf | omm + pkg_service | omm + public | omm + roger | omm + snapshot | omm +(6 rows) + +enmotech=# select count(1) from roger.test1201; + count +-------- + 262144 +(1 row) + +enmotech=# +``` + +#### 从整个database 备份中恢复单个表 + +``` +[omm@mogdb ~]$ gs_restore enmotech_all.tar -d enmotech -n roger -t test1201 -p26000 -Utest -W test@1234 +start restore operation ... +table test1201 complete data imported ! +Finish reading 11 SQL statements! +end restore operation ... +restore operation successful +total time: 166 ms +[omm@mogdb ~]$ + +``` + +最后简单总结一下gs_restore 恢复工具的主要功能点: + +1、支持多种粒度的还原操作(database、schema、table等级别) +2、由于是逻辑备份,因此可以从全备中还原单个schema和单个表,操作灵活 +3、支持并行操作 +4、支持触发器等多种数据库对象;如果在还原数据时因为有trigger导致性能较低,可以关闭触发器,支持–disable-triggers参数。 diff --git "a/content/zh/post/lizhenxu/openGauss\344\270\255\347\232\204sequence\350\267\237Oracle\347\232\204sequence\346\234\211\344\273\200\344\271\210\345\214\272\345\210\253\357\274\237.md" "b/content/zh/post/lizhenxu/openGauss\344\270\255\347\232\204sequence\350\267\237Oracle\347\232\204sequence\346\234\211\344\273\200\344\271\210\345\214\272\345\210\253\357\274\237.md" new file mode 100644 index 0000000000000000000000000000000000000000..1538bf3e373a2dd8daf43419104300e0f449005a --- /dev/null +++ "b/content/zh/post/lizhenxu/openGauss\344\270\255\347\232\204sequence\350\267\237Oracle\347\232\204sequence\346\234\211\344\273\200\344\271\210\345\214\272\345\210\253\357\274\237.md" @@ -0,0 +1,138 @@ ++++ + +title = "openGauss中的sequence跟Oracle的sequence有什么区别?" + +date = "2022-04-06" + +tags = ["openGauss中的sequence跟Oracle的sequence有什么区别?"] + +archives = "2022-04" + +author = "李真旭" + +summary = "openGauss中的sequence跟Oracle的sequence有什么区别?" + +img = "/zh/post/lizhenxu/title/img.png" + +times = "10:30" + ++++ + +# openGauss中的sequence跟Oracle的sequence有什么区别? + +openGauss中也提供了sequence序列功能,使用Oracle的用户应该都非常喜欢使用这个功能。所以如果从Oracle迁移到openGauss,那么这项功能可以完全替代了。 + +接下来我们简单测试一下: + +```sql +enmotech=> drop table test; +DROP TABLE +enmotech=> create table test(id serial,name varchar(20)); +NOTICE: CREATE TABLE will create implicit sequence "test_id_seq" for serial column "test.id" +CREATE TABLE +enmotech=> \d+ test +Table "public.test" +Column | Type | Modifiers | Storage | Stats target | Description +--------+-----------------------+---------------------------------------------------+----------+--------------+------------- +id | integer | not null default nextval('test_id_seq'::regclass) | plain | | +name | character varying(20) | | extended | | +Has OIDs: no +Options: orientation=row, compression=no + +enmotech=> insert into test values (nextval('test_id_seq'),'enmotech'); +INSERT 0 1 +enmotech=> insert into test values (nextval('test_id_seq'),'killdb.com'); +INSERT 0 1 +enmotech=> insert into test values (nextval('test_id_seq'),'www.killdb.com'); +INSERT 0 1 +enmotech=> select * from test; +id | name +----+---------------- +2 | enmotech +3 | killdb.com +4 | www.killdb.com +(3 rows) +``` + + +同时我们也可以单独创建序列,然后指定给某个表所使用,如下是create sequence的语法: + +```sql +CREATE SEQUENCE name [ INCREMENT [ BY ] increment ] +[ MINVALUE minvalue | NO MINVALUE | NOMINVALUE ] [ MAXVALUE maxvalue | NO MAXVALUE | NOMAXVALUE] +[ START [ WITH ] start ] [ CACHE cache ] [ [ NO ] CYCLE | NOCYCLE ] +[ OWNED BY { table_name.column_name | NONE } ]; +``` + +接下来我们单独创建使用sequence试试: + +```sql +enmotech=> create sequence kill_seq cache 1000; +CREATE SEQUENCE +enmotech=> +enmotech=> drop table test; +DROP TABLE +enmotech=> create table test(id int not null default nextval('kill_seq'),name varchar(200)); +CREATE TABLE +enmotech=> +enmotech=> \d+ test + Table "public.test" + Column | Type | Modifiers | Storage | Stats target | Description +--------+------------------------+------------------------------------------------+----------+--------------+------------- + id | integer | not null default nextval('kill_seq'::regclass) | plain | | + name | character varying(200) | | extended | | +Has OIDs: no +Options: orientation=row, compression=no + +enmotech=> alter sequence kill_seq increment by 10 NOCYCLE; +ERROR: ALTER SEQUENCE is not yet supported. +enmotech=> +enmotech=> \d+ kill_seq + Sequence "public.kill_seq" + Column | Type | Value | Storage +---------------+---------+---------------------+--------- + sequence_name | name | kill_seq | plain + last_value | bigint | 1000 | plain + start_value | bigint | 1 | plain + increment_by | bigint | 1 | plain + max_value | bigint | 9223372036854775807 | plain + min_value | bigint | 1 | plain + cache_value | bigint | 1000 | plain + log_cnt | bigint | 32 | plain + is_cycled | boolean | f | plain + is_called | boolean | t | plain + uuid | bigint | 0 | plain + +enmotech=> alter sequence kill_seq nomaxvalue; +ALTER SEQUENCE ^ +enmotech=> alter sequence kill_seq cache 10000; +ERROR: ALTER SEQUENCE is not yet supported. +enmotech=> alter sequence kill_seq start 888; +ERROR: ALTER SEQUENCE is not yet supported. +enmotech=> \d+ kill_seq + Sequence "public.kill_seq" + Column | Type | Value | Storage +---------------+---------+---------------------+--------- + sequence_name | name | kill_seq | plain + last_value | bigint | 1000 | plain + start_value | bigint | 1 | plain + increment_by | bigint | 1 | plain + max_value | bigint | 9223372036854775807 | plain + min_value | bigint | 1 | plain + cache_value | bigint | 1000 | plain + log_cnt | bigint | 0 | plain + is_cycled | boolean | f | plain + is_called | boolean | t | plain + uuid | bigint | 0 | plain +``` + +尽管sequence的属性跟Oracle类似,但是我们可以看到,目前openGauss暂时还不支持alter sequence的方式去修改序列增长步长或其他属性。只能修改owner属主。 +查了一下官方文档,发现alter sequence只支持如下的语法操作: + +```sql +ALTER SEQUENCE [ IF EXISTS ] name +[MAXVALUE maxvalue | NO MAXVALUE | NOMAXVALUE] +[ OWNED BY { table_name.column_name | NONE } ] ; +``` + +需要注意的是,openGauss中的sequence跟Oracle 中的序列不一样的是,Oracle 由于集群的原因,序列还存在一个order或noorder选项。但在openGauss中sequence是不存在这个属性的。 diff --git a/content/zh/post/lizhenxu/title/img.png b/content/zh/post/lizhenxu/title/img.png new file mode 100644 index 0000000000000000000000000000000000000000..86a420b92fb8289658d807d49f137b6d13862f6d Binary files /dev/null and b/content/zh/post/lizhenxu/title/img.png differ diff --git a/content/zh/post/lizhenxu/title/img6.png b/content/zh/post/lizhenxu/title/img6.png new file mode 100644 index 0000000000000000000000000000000000000000..2ddddfa2858d77999b4cfec8e97e4f29ac0cab79 Binary files /dev/null and b/content/zh/post/lizhenxu/title/img6.png differ diff --git "a/content/zh/post/lizhenxu/\345\205\263\344\272\216openGauss\344\270\255\347\232\204\350\231\232\346\213\237\347\264\242\345\274\225.md" "b/content/zh/post/lizhenxu/\345\205\263\344\272\216openGauss\344\270\255\347\232\204\350\231\232\346\213\237\347\264\242\345\274\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..c93b9c504af09e9f2618c48bbc979afcdcd0f828 --- /dev/null +++ "b/content/zh/post/lizhenxu/\345\205\263\344\272\216openGauss\344\270\255\347\232\204\350\231\232\346\213\237\347\264\242\345\274\225.md" @@ -0,0 +1,123 @@ ++++ + +title = "关于openGauss中的虚拟索引" + +date = "2022-04-06" + +tags = ["关于openGauss中的虚拟索引"] + +archives = "2022-04" + +author = "李真旭" + +summary = "关于openGauss中的虚拟索引" + +img = "/zh/post/lizhenxu/title/img6.png" + +times = "11:37" + ++++ + +# 关于openGauss中的虚拟索引 + +作为曾经的Oracle资深使用者,对于Oracle 11gR2版本推出的invisible Index感觉一直很良好;因为这对于大部分情况下做优化是比较友好的。实际上openGauss2.0版本中也提供了类似的功能,下面我们来进行简单测试。首先我们创建一个测试表用来验证openGauss的虚拟索引功能: + +```sql +enmotech=# create table test as select * from pg_settings; + +INSERT 0 637 +enmotech=# select count(1) from test; +count +------- +637 +(1 row) +``` + +openGauss中对于虚拟索引的创建,需要借助相关函数来实现,如下: + +```sql +enmotech=# select * from hypopg_create_index('create index on test(name)'); +indexrelid | indexname +------------+------------------------- +24643 | <24643>ubtree_test_name +(1 row) + + +enmotech=# set enable_hypo_index = on; +SET +enmotech=# +``` + +通过hypopg_create_index 创建了基于test(name)的虚拟索引之后,我们打开会话级参数,让优化器能够识别索引。 + +接下来验证一下索引是否能够起作用: + +```sql +enmotech=# explain select name,setting from test where name='checkpoint_timeout'; +QUERY PLAN +------------------------------------------------------------------------------------- +Index Scan using <24643>ubtree_test_name on test (cost=0.00..8.27 rows=1 width=64) +Index Cond: (name = 'checkpoint_timeout'::text) +(2 rows) + +enmotech=# +``` + +可以看到通过explain的结果来看,该查询语句能够使用Index scan,用到我们所创建的虚拟索引16395. + +那么对于虚拟索引,是否会分配空间,占据文件系统大小呢?同样也可以使用openGauss提供的相关函数进行查询: + +```sql +enmotech=# select * from hypopg_estimate_size(24643); +hypopg_estimate_size +---------------------- +8192 +(1 row) + +enmotech=# +``` + +除此之后还提供了一些其他的函数: + +hypopg_reset_index 清除所有虚拟索引 + +hypopg_drop_index 删除某个虚拟索引 + +hypopg_display_index 查看所有创建的虚拟索引 + +```sql +enmotech=# select * from hypopg_estimate_size(24643); +hypopg_estimate_size +---------------------- +8192 +(1 row) + +enmotech=# +``` + +虚拟索引创建后,属于实例级别、会话级别(其他会话也可以共享)。如果我们没有手工进行删除或者清除操作;那么当重启数据库实例之后,openGauss会自动删除所有的虚拟索引。 + +这里我们重启了openGauss集群之后,再登录数据库查看是否是这样: + +```sql +enmotech=# \l + List of databases + Name | Owner | Encoding | Collate | Ctype | Access privileges +-----------+-------+----------+-------------+-------------+------------------- + enmotech | roger | UTF8 | en_US.UTF-8 | en_US.UTF-8 | + postgres | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | + template0 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/omm + + | | | | | omm=CTc/omm + template1 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/omm + + | | | | | omm=CTc/omm +(4 rows) + +enmotech=# select * from hypopg_display_index(); + indexname | indexrelid | table | column +-----------+------------+-------+-------- +(0 rows) + + +``` + +可以看到,openGauss实例重启之后,之前所创建的虚拟索引自动被清除。这实际上也openGauss AI功能方面的一个小点。非常赞! diff --git "a/content/zh/post/lmj/openGauss\345\205\263\344\272\216PLSQL\345\214\277\345\220\215\345\235\227\350\260\203\347\224\250\346\265\213\350\257\225.md" "b/content/zh/post/lmj/openGauss\345\205\263\344\272\216PLSQL\345\214\277\345\220\215\345\235\227\350\260\203\347\224\250\346\265\213\350\257\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..bf19b5417b9eb280a3ef060c1d31a2357bef7959 --- /dev/null +++ "b/content/zh/post/lmj/openGauss\345\205\263\344\272\216PLSQL\345\214\277\345\220\215\345\235\227\350\260\203\347\224\250\346\265\213\350\257\225.md" @@ -0,0 +1,187 @@ ++++ + +title = "openGauss关于PL/SQL匿名块调用测试" + +date = "2022-04-06" + +tags = ["openGauss关于PL/SQL匿名块调用测试"] + +archives = "2022-04" + +author = "云和恩墨-lmj" + +summary = "openGauss关于PL/SQL匿名块调用测试" + +img = "/zh/post/lmj/title/img.png" + +times = "11:37" + ++++ + +# openGauss关于PL/SQL匿名块调用测试 + +## 一、原理介绍 + +PL/SQL(Procedure Language/Structure Query Language)是标准SQL语言添加了过程化功能的一门程序设计语言。 + +单一的SQL语句只能进行数据操作,没有流程控制,无法开发复杂的应用。PL/SQL语言是结合了结构化查询与数据库自身过程控制为一体的强大语言。 + +### 1.PL/SQL原理 + +PL/SQL是一种块结构的语言,它将一组语句放在一个块中,一次性发送给服务器。 + +PL/SQL引擎分析收到PL/SQL语句块中的内容,把其中的过程控制语句由PL/SQL引擎自身去执行,把PL/SQL块中的SQL语句交给服务器的SQL语句执行器执行。 + +PL/SQL块发送给服务器后,先被编译然后执行,对于有名称的PL/SQL块(如子程序)可以单独编译,永久的存储在数据库中,随时准备执行。 + +PL/SQL是一种块结构的语言,一个PL/SQL程序包含了一个或者多个逻辑块,逻辑块中可以声明变量,变量在使用之前必须先声明。 + +### 2.PL/SQL特点 + +–与SQL紧密结合 +–支持面向对象编程 +–更好的性能 +–可移植性 +–安全性 + +### 3.语法结构 + +除了正常的执行程序外,PL/SQL还提供了专门的异常处理部分进行异常处理 + +```sql +[DECLARE + --declaration statements] ① +BEGIN + --executable statements ② +[EXCEPTION + --exception statements] ③ +END; +``` + +语法解析 +①声明部分:声明部分包含了变量和常量的定义。在此声明PL/SQL用到的变量,类型及游标,以及局部的存储过程和函数, +这个部分由关键字DECLARE开始,如果不声明变量或者常量,可以省略这部分。 +②执行部分:执行部分是 PL/SQL块的指令部分,由关键字BEGIN开始,关键字END结尾。 +所有的可执行PL/SQL语句都放在这一部分,该部分执行命令并操作变量。其他的PL/SQL块可以作为子块嵌套在该部分。 +PL/SQL块的执行部分是必选的。注意END关键字后面用分号结尾。 +③异常处理部分:该部分是可选的,该部分用EXCEPTION关键字把可执行部分分成两个小部分,之前的程序是正常运行的程序, +一旦出现异常就跳转到异常部分执行。 + +### 4.PL/SQL语句块的类型 + +1、匿名块 +2、命名块 +–①procedure 存储过程 +–②function 函数 +–③package 包 +–④trigger 触发器 + +原本大家可能一提到PL/SQL就会想到ORACLE,ORACLE的PL/SQL很强大,它的匿名块调用以及有名块调用可以解决很多问题,在openGauss中,其实也有这样的功能,如下,是我针对openGauss匿名块的一些测试。 + +## 二、匿名块测试 + +### 1.普通匿名块调用 + +```sql +openGauss=# create table t1(a int ,b text); +CREATE TABLE + +openGauss=# DECLARE +openGauss-# PRAGMA AUTONOMOUS_TRANSACTION; +openGauss-# BEGIN +openGauss$# raise notice 'Normal anonymous block printing.'; +openGauss$# insert into t1 values(1,'I am lmj!'); +openGauss$# END; +openGauss$# / +NOTICE: Normal anonymous block printing. + +ANONYMOUS BLOCK EXECUTE +openGauss=# select * from t1; + a | b +---+----------- + 1 | I am lmj! +(1 row) +``` + +### 2.匿名块和事务影响 + +启动一个事务后,执行一个自治事务匿名块,如果事务回滚,则匿名块不回滚。 + +``` + +``` + +### 3.外部匿名块和内部匿名块 + +其中外部匿名块是一个公共匿名块,而内部匿名块是一个自治事务匿名块,可以根据如下例子和第二个例子对比事务回滚和匿名块回滚 + +```sql +openGauss=# truncate table t1; +TRUNCATE TABLE + +openGauss=# START TRANSACTION; +START TRANSACTION +openGauss=# DECLARE +openGauss-# PRAGMA AUTONOMOUS_TRANSACTION; +openGauss-# BEGIN +openGauss$# raise notice 'an autonomous transaction anonymous block.'; +openGauss$# insert into t1 values(1,'it will commit!'); +openGauss$# END; +openGauss$# / +NOTICE: an autonomous transaction anonymous block. + +ANONYMOUS BLOCK EXECUTE +openGauss=# insert into t1 values(1,'you will rollback!'); +INSERT 0 1 +openGauss=# rollback; +ROLLBACK +openGauss=# select * from t1; + a | b +---+----------------- + 1 | it will commit! +(1 row) +``` + +### 4.匿名块直接执行自治事务匿名块并引发异常 + +```sql +openGauss=# DECLARE +openGauss-# PRAGMA AUTONOMOUS_TRANSACTION; +openGauss-# res int := 0; +openGauss-# res2 int := 1; +openGauss-# BEGIN +openGauss$# raise notice 'just use call.'; +openGauss$# res2 = res2/res; +openGauss$# END; +openGauss$# / +NOTICE: just use call. + +ERROR: ERROR: division by zero +CONTEXT: PL/pgSQL function inline_code_block line 7 at assignment +``` + +匿名块执行错误,会报出异常 + +### 5.异常捕获 + +在执行期间引发异常后,将捕获匿名块,如下所示,在执行错误后,抛出autonomous throw exception提示 + +```plsql +openGauss=# DECLARE +openGauss-# PRAGMA AUTONOMOUS_TRANSACTION; +openGauss-# res int := 0; +openGauss-# res2 int := 1; +openGauss-# BEGIN +openGauss$# raise notice 'error catch.'; +openGauss$# res2 = res2/res; +openGauss$# EXCEPTION +openGauss$# WHEN division_by_zero THEN +openGauss$# raise notice 'autonomous throw exception.'; +openGauss$# END; +openGauss$# / +NOTICE: error catch. + +NOTICE: autonomous throw exception. + +ANONYMOUS BLOCK EXECUTE +``` diff --git a/content/zh/post/lmj/title/img.png b/content/zh/post/lmj/title/img.png new file mode 100644 index 0000000000000000000000000000000000000000..86a420b92fb8289658d807d49f137b6d13862f6d Binary files /dev/null and b/content/zh/post/lmj/title/img.png differ diff --git a/content/zh/post/lmj/title/img6.png b/content/zh/post/lmj/title/img6.png new file mode 100644 index 0000000000000000000000000000000000000000..2ddddfa2858d77999b4cfec8e97e4f29ac0cab79 Binary files /dev/null and b/content/zh/post/lmj/title/img6.png differ diff --git "a/content/zh/post/louie/OpenGauss\346\225\260\346\215\256\345\272\223SQL\350\247\243\346\236\220\346\250\241\345\235\227\346\272\220\347\240\201\345\210\206\346\236\220.md" "b/content/zh/post/louie/OpenGauss\346\225\260\346\215\256\345\272\223SQL\350\247\243\346\236\220\346\250\241\345\235\227\346\272\220\347\240\201\345\210\206\346\236\220.md" new file mode 100644 index 0000000000000000000000000000000000000000..e47c5b6143fe2d37640fd106a5c460d9ab2e3261 --- /dev/null +++ "b/content/zh/post/louie/OpenGauss\346\225\260\346\215\256\345\272\223SQL\350\247\243\346\236\220\346\250\241\345\235\227\346\272\220\347\240\201\345\210\206\346\236\220.md" @@ -0,0 +1,142 @@ ++++ + +title = "OpenGauss数据库SQL解析模块源码分析" + +date = "2021-11-29" + +tags = ["OpenGauss数据库SQL解析模块"] + +archives = "2021-11" + +author = "罗宇辰" + +summary = "OpenGauss数据库安装与使用" + +img = "/zh/post/louie/title/1.png" + +times = "12:45" + ++++ + +# OpenGauss数据库SQL解析模块源码分析 + +## 一.概述 + +经过对openGauss安装使用后发现openGauss依然采用sql语言进行数据库操作。于是我对openGauss如何使用sql的语法进行数据库操作进行了探索。 +通过学习已有博客分析和源码的阅读,我发现这个过程在SQL引擎中算作SQL解析,SQL语句在数据库管理系统中的编译过程符合编译器实现的常规过程,需要进行词法分析、语法分析和语义分析。 +(1) 词法分析:从查询语句中识别出系统支持的关键字、标识符、操作符、终结符等,确定每个词自己固有的词性。常用工具如flex。 +(2) 语法分析:根据SQL语言的标准定义语法规则,使用词法分析中产生的词去匹配语法规则,如果一个SQL语句能够匹配一个语法规则,则生成对应的抽象语法树(abstract synatax tree,AST)。常用工具如Bison。 +(3) 语义分析:对抽象语法树进行有效性检查,检查语法树中对应的表、列、函数、表达式是否有对应的元数据,将抽象语法树转换为查询树。 +所以个人感觉内容和编译原理实验差不多。 +openGauss采用flex和bison两个工具来完成词法分析和语法分析的主要工作。对于用户输入的每个SQL语句,它首先交由flex工具进行词法分析。flex工具通过对已经定义好的词法文件进行编译,生成词法分析的代码。 + +![1.png](../figures/1.png "1") + +## 二.SQL解析的具体实现分析 + +###1.代码结构 + +![2.png](../figures/2.png "2") +词法结构和语法结构分别由scan.l和gram.y文件定义,并通过flex和bison分别编译成scan.cpp和gram.cpp文件。 +###2.词法分析 +openGauss中的词法文件是scan.l,它根据SQL语言标准对SQL语言中的关键字、标识符、操作符、常量、终结符进行了定义和识别。 +词法分析将一个SQL划分成多个不同的token,每个token会有自己的词性。 +####(1)函数声明 +![3.png](../figures/3.png "3") +![4.png](../figures/4.png "4") +![5.png](../figures/5.png "5") + +####(2)flex文件构成 +Flex文件由三个部分组成。或者说三个段。三个段之间用两个%%分隔。 +定义段(definitions) +%% +规则段(rules) +%% +用户代码段(usercode) + +####(3)定义段分析(token定义) +1)空格,换行,注释 +![6.png](../figures/6.png "6") +2)op tokens是单个char还是具有特殊操作的识别(看op tokens后面还有更多字符) +![7.png](../figures/7.png "7") +3)数据类型定义 +![8.png](../figures/8.png "8") + +4)其他 +还有关于括号识别确定sql语句开始或结束(包括{ }double quote和$ $style quote),注释识别(c style)等。 + +####(4)规则段分析(识别token后的操作) +1)只有空格则什么都不做 +![9.png](../figures/9.png "9") +![10.png](../figures/10.png "10") +2)sql单语句操作时,将分号前的所有字符视为一条sql操作语句存储起来。(识别单语句取决于是否在括号里) +![11.png](../figures/11.png "11") +![12.png](../figures/12.png "12") + +####(5)用户代码段分析 + +1)Report a lexer or grammar error cursor position(光标位置错误处理) +![13.png](../figures/13.png "13") + +###3.语法分析 +openGuass中定义了bison工具能够识别的语法文件gram.y,同样在Makefile中可以通过bison工具对gram.y进行编译,生成gram.cpp文件。 +openGauss中,根据SQL语言的不同定义了一系列表达Statement的结构体(stmt),用来保存语法分析结果(如SELECT,DELETE,CREATE)。 + +源码分析gram.y openGauss/openGauss-server - Gitee IDE +####(1)Bison 语法文件内容的布局 +Bison 语法文件内容的分布如下(四个部分): +%{ +序言 +%} +Bison 声明 +%% +语法规则 +%% +结尾 + +####(2)序言(prologue)分析 + +####(3)Bison 声明 +1)通过 %pure_parser 来指定希望解析器是可重入的。(默认情况下 yyparse() 函数是没有参数的, 可以通过%parse-param {param} 来传递参数, 调用的时候也是 yyparse(param)的形式. %lex-param 是对 yylex() 函数增加参数. +![14.png](../figures/14.png "14") + +2)Bison中默认将所有的语义值都定义为int类型,可以通过定义宏YYSTYPE来改变值的类型。如果有多个值类型,则需要通过在Bison声明中使用%union列举出所有的类型 +![15.png](../figures/15.png "15") + +3)非终结符使用%type来定义 +![16.png](../figures/16.png "16") + +4)终结符使用%token +![17.png](../figures/17.png "17") + +5)操作符优先级(左结合,右结合) +![18.png](../figures/18.png "18") + +####(4)语法规则(grammar rules) +1)编译目标 +![19.png](../figures/19.png "19") +2)所有语法规则结构体 +![20.png](../figures/20.png "20") + +3)具体功能实现 +![21.png](../figures/21.png "21") + +####(5)结尾 +![22.png](../figures/22.png "22") + +###4.flex与bison的联系 +用bison来做语法分析,首先要将分析对象做仔细的研究。分析工作的首要任务是分清楚什么是终结符,什么是非终结符。 +终结符是一组原子性的单词,表达了语法意义中不可分割的一个标记。在具体的表现形式上,可能是一个字符串,也可能是一个整数,或者是一个空格,一个换行符等等。bison只给出每个终结符的名称,并不给出其定义。Bison为每个终结符名称分配一个唯一的数字代码。 +终结符的识别由专门定义的函数yylex()执行。这个函数返回识别出来的终结符的编码,且已识别的终结符可以通过全局变量yytext指针,而这个终结符的长度则存储在全局变量yyleng中。来取得这种终结符的分析最好用flex工具通过对语法文件进行扫描来识别。有些终结符有不同的具体表示。 +非终结符是一个终结符序列所构成的一个中间表达式的名字。实际上不存在这么一个原子性的标记。这种非终结符的构成方式则应该由Bison来表达。语法规则就是由终结符和非终结符一起构成的一种组成规则的表达。 +Bison实际上也是一个自动化的文法分析工具,其利用词法分析函数yylex()返回的词法标记返回其ID,执行每一条文法规则后定义的动作。Bison是不能自动地生成词法分析函数的。一般简单的程序里,一般在文法规则定义文件的末尾添加该函数的定义。但是在较复杂的大型程序里,则利用自动词法生成工具flex生成yylex()的定义。 +Bison与Flex联用时,Bison只定义标记的ID。Flex则需要知道这些词法标记的ID,才能在识别到一个词法标记时返回这个ID给Bison。Bison传递这些ID给Flex的方法,就是在调用bison命令时使用参数-d。使用这个参数后,Bison会生成一个独立的头文件,该文件的名称形式为name.tab.h。在Flex的词法规则文件中,在定义区段里包含这个头文件即可。 +yylex()只需要每次识别出一个token就马上返回这个token的ID即可。上例中返回的token的ID就是TOK_NUMBER。此外,一个token的语义值可以由yylex()计算出来后放在全局变量yylval中。 + +##三.总结 +对openGauss内核分析的任务,我选择了看起来较为熟悉的SQL解析模块。在博客文章的指引下成功找到了对应模块所在位置,之后便开始拜读大佬们的程序。因为之前也用flex和bison做过编译原理的实验作业,所以整个步骤大概还知道点。代码写的真好,注释啥的都很多,主要问题就是注释也全英文,看的那叫一个难受。由于里面有很多专业词汇,谷歌翻译的一团糟,很多看完大概知道什么意思,但不确定理解的对不对。感觉专业词汇可能见多了才能舒服的阅读,我现在明显还没这个功力。 + +##四.参考资料 +1.https://zhuanlan.zhihu.com/p/389174538 +2.https://gitee.com/opengauss/openGauss-server/tree/master/src/common/backend/parser +3.https://www.cnblogs.com/me115/archive/2010/10/27/1862180.html diff --git a/content/zh/post/louie/figures/1.png b/content/zh/post/louie/figures/1.png new file mode 100644 index 0000000000000000000000000000000000000000..bf45b4342f71e8abd01958c995109a2170d3eed8 Binary files /dev/null and b/content/zh/post/louie/figures/1.png differ diff --git a/content/zh/post/louie/figures/10.png b/content/zh/post/louie/figures/10.png new file mode 100644 index 0000000000000000000000000000000000000000..b0e0501c85c3b6ce98a3cd4d8fc2c709799711f8 Binary files /dev/null and b/content/zh/post/louie/figures/10.png differ diff --git a/content/zh/post/louie/figures/11.png b/content/zh/post/louie/figures/11.png new file mode 100644 index 0000000000000000000000000000000000000000..b74e9b7bc50bccd0288c2644992a588b8175a341 Binary files /dev/null and b/content/zh/post/louie/figures/11.png differ diff --git a/content/zh/post/louie/figures/12.png b/content/zh/post/louie/figures/12.png new file mode 100644 index 0000000000000000000000000000000000000000..a57c5bd277127408ad9ff115739f1f53c8be5b5f Binary files /dev/null and b/content/zh/post/louie/figures/12.png differ diff --git a/content/zh/post/louie/figures/13.png b/content/zh/post/louie/figures/13.png new file mode 100644 index 0000000000000000000000000000000000000000..e985b85e47d2cfaf28601bd6504fa4103f235186 Binary files /dev/null and b/content/zh/post/louie/figures/13.png differ diff --git a/content/zh/post/louie/figures/14.png b/content/zh/post/louie/figures/14.png new file mode 100644 index 0000000000000000000000000000000000000000..6c6567498388dd7078e439a6d0c9df3eebb5c2a0 Binary files /dev/null and b/content/zh/post/louie/figures/14.png differ diff --git a/content/zh/post/louie/figures/15.png b/content/zh/post/louie/figures/15.png new file mode 100644 index 0000000000000000000000000000000000000000..55d93407dbad33097e8146c431f0f7a2df0070d5 Binary files /dev/null and b/content/zh/post/louie/figures/15.png differ diff --git a/content/zh/post/louie/figures/16.png b/content/zh/post/louie/figures/16.png new file mode 100644 index 0000000000000000000000000000000000000000..c635d57437daa3fac013f6c526e4102678f0422d Binary files /dev/null and b/content/zh/post/louie/figures/16.png differ diff --git a/content/zh/post/louie/figures/17.png b/content/zh/post/louie/figures/17.png new file mode 100644 index 0000000000000000000000000000000000000000..b3c6636e1050a4c4db958010b10e57a5838478bd Binary files /dev/null and b/content/zh/post/louie/figures/17.png differ diff --git a/content/zh/post/louie/figures/18.png b/content/zh/post/louie/figures/18.png new file mode 100644 index 0000000000000000000000000000000000000000..70bd9ec55905ff6fe2fd25977f3f396445430cf6 Binary files /dev/null and b/content/zh/post/louie/figures/18.png differ diff --git a/content/zh/post/louie/figures/19.png b/content/zh/post/louie/figures/19.png new file mode 100644 index 0000000000000000000000000000000000000000..9be53c274f647225a58ae3ed3dc2b1f17293ac89 Binary files /dev/null and b/content/zh/post/louie/figures/19.png differ diff --git a/content/zh/post/louie/figures/2.png b/content/zh/post/louie/figures/2.png new file mode 100644 index 0000000000000000000000000000000000000000..1cd0940b5cc0e18572d3c53af96a042e38bc3246 Binary files /dev/null and b/content/zh/post/louie/figures/2.png differ diff --git a/content/zh/post/louie/figures/20.png b/content/zh/post/louie/figures/20.png new file mode 100644 index 0000000000000000000000000000000000000000..21b47174c1563cf4ea2b22238931fb2163184582 Binary files /dev/null and b/content/zh/post/louie/figures/20.png differ diff --git a/content/zh/post/louie/figures/21.png b/content/zh/post/louie/figures/21.png new file mode 100644 index 0000000000000000000000000000000000000000..2bb8b7a94d96313fa0436dec9a0bb00975a76a1d Binary files /dev/null and b/content/zh/post/louie/figures/21.png differ diff --git a/content/zh/post/louie/figures/22.png b/content/zh/post/louie/figures/22.png new file mode 100644 index 0000000000000000000000000000000000000000..7c39a4a28b893bd03c4160c5107d951ec0d82e1a Binary files /dev/null and b/content/zh/post/louie/figures/22.png differ diff --git a/content/zh/post/louie/figures/3.png b/content/zh/post/louie/figures/3.png new file mode 100644 index 0000000000000000000000000000000000000000..f731f4467dff0b3976ee4f17c2afe75542e337b8 Binary files /dev/null and b/content/zh/post/louie/figures/3.png differ diff --git a/content/zh/post/louie/figures/4.png b/content/zh/post/louie/figures/4.png new file mode 100644 index 0000000000000000000000000000000000000000..aa9bc589472f231fddc330b410716b53d25b04cb Binary files /dev/null and b/content/zh/post/louie/figures/4.png differ diff --git a/content/zh/post/louie/figures/5.png b/content/zh/post/louie/figures/5.png new file mode 100644 index 0000000000000000000000000000000000000000..b5411a34d7e2cfbb5518e14656323a840f119573 Binary files /dev/null and b/content/zh/post/louie/figures/5.png differ diff --git a/content/zh/post/louie/figures/6.png b/content/zh/post/louie/figures/6.png new file mode 100644 index 0000000000000000000000000000000000000000..f5086748664a205202d8a9584a24eb193b4ca4eb Binary files /dev/null and b/content/zh/post/louie/figures/6.png differ diff --git a/content/zh/post/louie/figures/7.png b/content/zh/post/louie/figures/7.png new file mode 100644 index 0000000000000000000000000000000000000000..3e46dc91ae6b084e7da74b2713fab8c34382451e Binary files /dev/null and b/content/zh/post/louie/figures/7.png differ diff --git a/content/zh/post/louie/figures/8.png b/content/zh/post/louie/figures/8.png new file mode 100644 index 0000000000000000000000000000000000000000..39b031aa543b7268a046dcf319cc9d9afb69c54a Binary files /dev/null and b/content/zh/post/louie/figures/8.png differ diff --git a/content/zh/post/louie/figures/9.png b/content/zh/post/louie/figures/9.png new file mode 100644 index 0000000000000000000000000000000000000000..ec25a066f054e2a6e5e6bfe7382f200336c571b9 Binary files /dev/null and b/content/zh/post/louie/figures/9.png differ diff --git a/content/zh/post/louie/title/1.png b/content/zh/post/louie/title/1.png new file mode 100644 index 0000000000000000000000000000000000000000..bf45b4342f71e8abd01958c995109a2170d3eed8 Binary files /dev/null and b/content/zh/post/louie/title/1.png differ diff --git "a/content/zh/post/luohaixiong/Mogdb\344\270\255merge\350\257\255\345\217\245\345\207\272\347\216\260\344\270\273\351\224\256\345\206\262\347\252\201\347\232\204\345\210\206\346\236\220.md" "b/content/zh/post/luohaixiong/Mogdb\344\270\255merge\350\257\255\345\217\245\345\207\272\347\216\260\344\270\273\351\224\256\345\206\262\347\252\201\347\232\204\345\210\206\346\236\220.md" new file mode 100644 index 0000000000000000000000000000000000000000..bbff846c07d7b448bbff7b9fd44a9967324f7b47 --- /dev/null +++ "b/content/zh/post/luohaixiong/Mogdb\344\270\255merge\350\257\255\345\217\245\345\207\272\347\216\260\344\270\273\351\224\256\345\206\262\347\252\201\347\232\204\345\210\206\346\236\220.md" @@ -0,0 +1,583 @@ ++++ + +title = "ODBC批量merge中出现主键冲突的分析" + +date = "2022-04-20" + +tags = ["ODBC批量merge中出现主键冲突的分析"] + +archives = "2022-04" + +author = "云和恩墨-罗海雄" + +summary = "ODBC批量merge中出现主键冲突的分析" + +img = "/zh/post/luohaixiong/title/img.png" + +times = "10:20" ++++ + +# ODBC批量merge中出现主键冲突的分析 + +## 一、 文档概括 + +客户某个merge语句运行时,发生主键冲突报错。 + +经分析,其原因如下: + +由于merge语句中,ON 里的判断条件(谓词)中存在带精度定义的数字字段,在绑定变量传递过程中,驱动将数值高精度数字传过去,而数据库内的数据已经做了精度限制,导致在符合条件的数据存在的情况下,ON条件错误地判断为false;数据库尝试进行插入,插入时,自动转换成限制精度的数字,此时,和数据库内已有的数据发生冲突。 + +该问题除了merge会发生,其他同时满足以下数个条件的ODBC程序均有可能出现谓词条件判断错误的问题。 + +- 数字类型字段在数据库中限定了精度 +- 程序中使用绑定变量中将其和该字段进行比较 +- 该变量小数部分不为0 +- 绑定变量未使用SQL_NUMERIC_STRUCT或者SQL_CHAR类型 + +包括: + +```sql +Select … from … where col_with_precision = ? +Update … set … where col_with_precision = ? +Delete from … where col_with_precision = ? +Merge into .. on (col_with_precision = ?) … +``` + +对于作为更新数据的insert/update, 则由于数据库侧会进行精度裁剪,不会出现问题。 + +如 `insert into .. (col_with_precision)values(?);` + +但,无法从内核层面把`col_with_precision = ?`后面的数值进行强行的降精度后对比,不符合常规逻辑。 + +因此,此问题不适合定义为内核的bug, 而是属于使用ODBC程序时,涉及带精度带小数的数字使用上的一个注意事项。 + +解决方法: + +- 标准用法A + + 调整SQL, 在SQL层面指定准确的数据类型,如: + + ``` + merge INTO Parts using dual on partid = $1::numeric(10,2) + when matched then update set Price = $2 + when not matched then insert values($3,$4) + ``` + +- 标准用法B + + ODBC绑定变量时使用 SQL_NUMERIC_STRUCT,保证精度信息正确传到数据库,其中精度部分使用数据库指定的精度或者数字本身的精度。 + +- 绕过的用法A: + +​ ODBC绑定变量时使用 SQL_CHAR,转成SQL_CHAR时使用数据库指定的精度或者数字本身的精度 + +- 绕过的用法B: + + 在SQL中绑定变量部分添加round函数, 类似于round( ? , <精度> ),其中精度部分使用数据库指定的精度或者数字本身的精度。 + + ``` + merge INTO Parts using dual on partid = round ($1,2) + when matched then update set Price = $2 + when not matched then insert values ($3, $4) + ``` + +## 二、问题描述 + +某个merge语句运行时,发生主键冲突报错。 + +## 三、问题分析过程 + +为了保密,所有SQL语句、表结构、数据均作了替换: + +### 3.1 报错信息 + +客户C程序通过ODBC驱动连接数据库,进行merge操作时,报了以下错误: + +``` +Duplicate key violentes unique constraint PK_tab +``` + +该错误代表发生主键冲突 + +### 3.2 SQL及表结构 + +SQL如下: + +``` +Merge Into test_tab using dual +On (id = $1 and price = $2) +when match then update set qty = $3 +When not matched insert (id,price, qty) values($4,$5,$6); +``` + +其中传入变量 \$1/\$4, \$2/\$5, \$3/\$6 两两相同。 + +Id,qty为整数,price为带小数的数字。 + +对应表结构如下: + +列:Id int, price(numeric(10,2)), qty (int) + +主键:(id,price) + +### 3.3 Merge语句逻辑分析 + +Merge语句是一个结合update和insert的SQL语句,其含义是,如果判断条件为true,也就是数据存在, 则执行update部分,如果条件为false, 则执行insert部分。 + +该语句中,判断条件的部分,正好和主键一致,正常情况下,主键对应数据存在,则进行更新,不存在才会insert, 因此,正常情况下,merge语句不会出现主键冲突的问题。 + +### 3.4 计算机处理带小数的精度问题 + +计算机内部使用二进制来代表数字,而日常中使用十进制来代表数字。由于十进制数字的小数点后部分转换成二进制,有一定可能出现除不尽的情况。因此,有时候会出现输入浮点数和实际保存浮点数(double/real/float等类型)不一致的情况。 + +因此,怀疑本问题是传递给数据库的price变量中出现了类似的问题,导致虽然数据库中实际存在,但merge判断条件时却不一致的情况。 + +### 3.5 逻辑推测 + +如这个语句: + +``` +Merge Into test_tab using dual +On (id = $1 and price = $2) +when match then update set qty = $3 +When not matched insert (id,price, qty) values($4,$5,$6); +``` + +假设: + +表里面存在一条数据: + +``` +Id=1, price=4.30, cnt=90 +``` + +当程序尝试merge (Id=1, price=4.30, cnt=100)时,给驱动传入以下变量 + +``` +$1/$4=1, $2/$5=4.30, $3/$6=100 +``` + +程序内部保存\$2/\$5的4.30数据,并不是4.30,而是可能是类似于 4.300000001。 + +这时候,数据库收到的语句就类似于: + +``` +Merge Into test_tab using dual +On (id = 1 and price = 4.300000001) +when match then update set qty = 100 +When not matched insert (id, price, qty) values (1, 4.300000001, 100); +``` + +数据库引擎首先对ON条件进行判断 + +``` +On (id = 1 and price = 4.300000001) +``` + +由于表里存的是1和4.30, 因此,这个表达式返回false. + +然后,数据库引擎根据merge的定义,决定进行insert操作 + +``` +Insert into test_tab (id, price, qty) values (1, 4.300000001, 100); +``` + +而由于数据库中定义的price精度为2,在插入的过程中,就会把price精度进行截断,等同于: + +``` +Insert into test_tab (id, price, qty) values (1, 4.30,100); +``` + +最终因为(id, price)为(1, 4.30)的记录已经存在,而报了主键冲突的错误。 + +### 3.6 SQL模拟 + +尝试用纯SQL来验证这种情况: + +初始化表结构及数据: + + ``` + Create table test_tab (Id int, price numeric (10,2), qty int); + Alter table test_tab add constraint pk_test_tab primary key (id, price); + Insert into test_tab values (1,4.3,100); + ``` + +尝试merge语句,其中price字段传入超出表定义中数据的精度。 + +``` +Merge Into test_tab using dual +On (id = 1 and price = 4.300000001) +when matched then +update set qty = 100 +When not matched then +insert (id, price, qty) values (1, 4.300000001,100); + +ERROR: duplicate key value violates unique constraint "pk_test_tab" +DETAIL: Key (id, price) = (1, 4.30) already exists. +``` + +的确会报错 + +### 3.7 ODBC官方文档查证 + +检查了ODBC的文档,当进行绑定变量时,如果使用double/real/float等类型,而不是SQL_NUMERIC或者SQL_DECIMAL,精度信息并不会被使用。 + +![img](../images/1.png) + +因此,的确可能产生上述推测的问题。 + +1. 程序做Bind(用double/float/real类型 4.30,设精度2) + +2. 驱动接收参数(精度信息丢失,double二进制类型不精确,4.30 => 4.30000001) + +3. 驱动把参数convert成字符串(精度发生改变4.30000001) + +4. 驱动把字符串传给数据库进程(数据库收到4.30000001) + +### 3.8 程序模拟 + +根据以上推测,写了个C/ODBC的程序,进行了一次模拟,模拟结果显示,如果是主键中使用double/real/float等类型,且使用了小数点后不为0的数据,merge语句在值已经存在时,的确有一定几率会导致主键冲突。 + +``` +#include +#include +#include +#include +#include + + + +#define DESC_LEN 51 +#define RC_SUCCESSFUL(rc) ((rc)==SQL_SUCCESS||(rc)==SQL_SUCCESS_WITH_INFO) + SQLRETURN rc; + + + SQLCHAR err_info[100]; + SQLCHAR state[6]; + SQLINTEGER NativeError; + SQLSMALLINT err_len; + + + SQLHENV henv; + SQLHDBC hdbc; + SQLHSTMT hstmt; + + +void get_dmesg(SQLSMALLINT HandleType, SQLHANDLE Handle, const char *sourceSQL) +{ + std::cerr << "*** get_dmesg from pid:" << pthread_self() << " handle: " << Handle << " point:" << sourceSQL << std::endl; + + SQLRETURN rc3 = SQLGetDiagRec(HandleType, Handle, 1, state, &NativeError, err_info, sizeof(err_info), &err_len); + //add log + std::cerr << "*** get dmesg return " << pthread_self() << " " << rc3 << std::endl; + + if (RC_SUCCESSFUL(rc3)) + { + std::cerr << "*** get dmesg ErrInfo:" << pthread_self() << " " << err_info << std::endl; + std::cerr << "*** get dmesg SQLState:" << pthread_self() << " " << state << std::endl; + } + else + { + std::cerr << "*** get dmesg SQLGetDiagRec failed, SQLSTATE:" << pthread_self() << " " << state << std::endl; + } + // if (HandleType == SQL_HANDLE_STMT) + // { + // cerr << "*** get dmesg Free handle, handle " << pthread_self() << " " << std::endl; + // for (unsigned int i = 0; i <= SQL_MAX_LEN; i++) + // { + // if (STMT_ARRAY[i]) + // { + // SQLFreeHandle(SQL_HANDLE_STMT, STMT_ARRAY[i]); + // STMT_ARRAY[i] = nullptr; + // } + // } + // reconnect(); + +} + +int main(int argc, char* argv[]){ + SQLAllocHandle(SQL_HANDLE_ENV, NULL, &henv); + if (henv == NULL) + { + std::cerr << "ERROR: ODBC ENV Handle Alloc failed " << std::endl; + return -1; + } + SQLSetEnvAttr(henv, SQL_ATTR_ODBC_VERSION, (SQLPOINTER)SQL_OV_ODBC3, SQL_IS_UINTEGER); + + + SQLAllocHandle(SQL_HANDLE_DBC, henv, &hdbc); + if (hdbc == NULL) + { + std::cerr << "ERROR: ODBC DBC Handle Alloc failed " << std::endl; + return -1; + } + char szConnectStr[100]="pg"; + + rc = SQLConnect(hdbc, (SQLCHAR *)szConnectStr, SQL_NTS, NULL, SQL_NTS, NULL, SQL_NTS); + if (RC_SUCCESSFUL(rc)) + { + std::cerr << "DBConnection sucess" << std::endl; + SQLAllocHandle(SQL_HANDLE_STMT, hdbc, &hstmt); + if (hstmt == NULL) + { + std::cerr << "ERROR: ODBC STMT Handle Alloc failed " << std::endl; + } + } + else + { + get_dmesg(SQL_HANDLE_DBC, hdbc, "CDBConnection::connect"); + } + + +#define ARRAY_SIZE 1 +SQLCHAR * Statement = (SQLCHAR*) "merge INTO Parts using dual on partid = ? when matched then update set Price = ? when not matched then insert values(?,?) "; +//SQLCHAR * Statement = (SQLCHAR*) "merge INTO Parts using dual on partid = round(?,2) when matched then update set Price = ? when not matched then insert values(?,?) "; +//SQLCHAR * Statement = (SQLCHAR*) "INSERT INTO Parts (PartID, Price) VALUES (?, ?)"; +SQLREAL PartIDArray[ARRAY_SIZE]; +SQLREAL DescArray[ARRAY_SIZE][DESC_LEN]; +SQLREAL PriceArray[ARRAY_SIZE]; +SQLLEN PartIDIndArray[ARRAY_SIZE], PartIDIndArray2[ARRAY_SIZE], PriceIndArray[ARRAY_SIZE] , PriceIndArray2[ARRAY_SIZE]; +SQLUSMALLINT i, ParamStatusArray[ARRAY_SIZE]; +SQLULEN ParamsProcessed; + + +std::memset(PartIDIndArray, 0, sizeof(PartIDIndArray)); +std::memset(PriceIndArray, 0, sizeof(PriceIndArray)); + +// Set the SQL_ATTR_PARAM_BIND_TYPE statement attribute to use +// column-wise binding. +SQLSetStmtAttr(hstmt, SQL_ATTR_PARAM_BIND_TYPE, SQL_PARAM_BIND_BY_COLUMN, (SQLINTEGER)0); + +// Specify the number of elements in each parameter array. +SQLSetStmtAttr(hstmt, SQL_ATTR_PARAMSET_SIZE, (SQLPOINTER)ARRAY_SIZE, SQL_NTS); + +// Specify an array in which to return the status of each set of +// parameters. +SQLSetStmtAttr(hstmt, SQL_ATTR_PARAM_STATUS_PTR, ParamStatusArray, (SQLINTEGER)0); + +// Specify an SQLUINTEGER value in which to return the number of sets of +// parameters processed. +SQLSetStmtAttr(hstmt, SQL_ATTR_PARAMS_PROCESSED_PTR, &ParamsProcessed, (SQLINTEGER)0); + +// Bind the parameters in column-wise fashion. +SQLBindParameter(hstmt, 1, SQL_PARAM_INPUT, SQL_C_NUMERIC, SQL_NUMERIC, 12, 2, PartIDArray, 0, PartIDIndArray); +SQLBindParameter(hstmt, 2, SQL_PARAM_INPUT, SQL_C_NUMERIC, SQL_NUMERIC, 12, 2, PriceArray, 0, PriceIndArray); +SQLBindParameter(hstmt, 3, SQL_PARAM_INPUT, SQL_C_NUMERIC, SQL_NUMERIC, 12, 2, PartIDArray, 0, PartIDIndArray); +SQLBindParameter(hstmt, 4, SQL_PARAM_INPUT, SQL_C_NUMERIC, SQL_NUMERIC, 12, 2, PriceArray, 0, PriceIndArray); + + + + + +// Set part ID, description, and price. +for (i = 0; i < ARRAY_SIZE; i++) { + //GetNewValues(&PartIDArray[i], DescArray[i], &PriceArray[i]); + PartIDArray[i]=4.30 ; + PriceArray[i]=4.30 ; + PartIDIndArray[i] = 0; + PriceIndArray[i] = 0; +std::cerr << PartIDArray[i] <sign = 1; + if (*wv == '-') + { + ns->sign = 0; + wv++; + } + else if (*wv == '+') + wv++; + + /* skip leading zeros */ + while (*wv == '0') + wv++; + + /* read the digits into calv */ + ns->precision = 0; + ns->scale = 0; + for (nlen = 0, dot_exist = FALSE;; wv++) + { + if (*wv == '.') + { + if (dot_exist) + break; + dot_exist = TRUE; + } + else if (*wv == '\0' || !isdigit((unsigned char) *wv)) + break; + else + { + if (nlen >= sizeof(calv)) + { + if (dot_exist) + break; + else + { + ns->scale--; + *overflow = TRUE; + continue; + } + } + if (dot_exist) + ns->scale++; + calv[nlen++] = *wv; + } + } + ns->precision = nlen; + + /* Convert the decimal digits to binary */ + memset(ns->val, 0, sizeof(ns->val)); + for (dig = 0; dig < nlen; dig++) + { + UInt4 carry; + /* multiply the current value by 10, and add the next digit */ + carry = calv[dig] - '0'; + for (i = 0; i < sizeof(ns->val); i++) + { + UInt4 t; + t = ((UInt4) ns->val[i]) * 10 + carry; + ns->val[i] = (unsigned char) (t & 0xFF); + carry = (t >> 8); + } + if (carry != 0) + *overflow = TRUE; + } + } + ``` + +- 绕过的用法A: + + ODBC绑定变量时使用 SQL_CHAR,转成SQL_CHAR时使用数据库指定的精度或者数字本身的精度 + +- 绕过的用法B + + 在SQL中绑定变量部分添加round函数, 类似于round( ? , <精度> ),其中精度部分使用数据库指定的精度或者数字本身的精度。 + + ``` + merge INTO Parts using dual on partid = round($1,2) + when matched then update set Price = $2 + when not matched then insert values($3,$4) + ``` \ No newline at end of file diff --git a/content/zh/post/luohaixiong/images/1.png b/content/zh/post/luohaixiong/images/1.png new file mode 100644 index 0000000000000000000000000000000000000000..f2c315e388d1182a2bd2b863ff6aa8127aa5616b Binary files /dev/null and b/content/zh/post/luohaixiong/images/1.png differ diff --git a/content/zh/post/luohaixiong/images/2.png b/content/zh/post/luohaixiong/images/2.png new file mode 100644 index 0000000000000000000000000000000000000000..0df4d728308e69624886295e39df9827fe44248b Binary files /dev/null and b/content/zh/post/luohaixiong/images/2.png differ diff --git a/content/zh/post/luohaixiong/images/3.png b/content/zh/post/luohaixiong/images/3.png new file mode 100644 index 0000000000000000000000000000000000000000..22ecc988dd0cb4afa8b4b6fbf87b72a0ea142b33 Binary files /dev/null and b/content/zh/post/luohaixiong/images/3.png differ diff --git a/content/zh/post/luohaixiong/title/img.png b/content/zh/post/luohaixiong/title/img.png new file mode 100644 index 0000000000000000000000000000000000000000..86a420b92fb8289658d807d49f137b6d13862f6d Binary files /dev/null and b/content/zh/post/luohaixiong/title/img.png differ diff --git a/content/zh/post/luohaixiong/title/img6.png b/content/zh/post/luohaixiong/title/img6.png new file mode 100644 index 0000000000000000000000000000000000000000..2ddddfa2858d77999b4cfec8e97e4f29ac0cab79 Binary files /dev/null and b/content/zh/post/luohaixiong/title/img6.png differ diff --git "a/content/zh/post/luohaixiong/\345\234\250openGauss MogDB\344\270\255\345\256\236\347\216\260months_between\345\207\275\346\225\260.md" "b/content/zh/post/luohaixiong/\345\234\250openGauss MogDB\344\270\255\345\256\236\347\216\260months_between\345\207\275\346\225\260.md" new file mode 100644 index 0000000000000000000000000000000000000000..2a77ee4b5ffe894f27bcd5145b0d546390ac7010 --- /dev/null +++ "b/content/zh/post/luohaixiong/\345\234\250openGauss MogDB\344\270\255\345\256\236\347\216\260months_between\345\207\275\346\225\260.md" @@ -0,0 +1,181 @@ ++++ + +title = "在openGauss/MogDB中实现months_between函数" + +date = "2022-04-11" + +tags = ["在openGauss/MogDB中实现months_between函数"] + +archives = "2022-04" + +author = "罗海雄" + +summary = "在openGauss/MogDB中实现months_between函数" + +img = "/zh/post/luohaixiong/title/img.png" + +times = "10:20" + ++++ + +# 在openGauss/MogDB中实现months_between函数 + +Oracle有个函数,months_between, Opengauss中并没有实现。 + +## Oracle实现逻辑 + +研究一下, Oracle的实现逻辑如下: +1,提取两个时间的年/月/日数值,年差值*12 + 月差值作为结果的基础部分。 + +2,如果正好两个日期都是月末的最后一天,会直接返回上一步的结果。而如果不是,则返回年差值*12 + 月差值+日差值/31,同时,需要注意的是,日差值部分,包括了时分秒。 + +在这种特殊的月末的规则下,会出现一些比较奇怪的的现象,特别是在2月月底的时候。 +举几个例子体会一下: + +- months_between(‘20210301’,‘20210228’)返回的值不是大家想象中的1/31, 而是4/31. +- months_between(‘20210331’,‘20210228’) 和months_between(‘20210331’,‘20210228’)都返回1 +- 但months_between(‘20210330’,‘20210228’)反而会返回1+2/31, months_between(‘20210329’,‘20210228’)返回1+1/31. +- months_between(‘20210531’,‘20210430’) + 和 months_between(‘20210530’,‘20210430’) 都返回1. + +## Opengauss(MogDB)实现 + +知道了规则后,可以通过在openGauss(MogDB)上创建自定义函数实现兼容。 + +### 提取年月日 + +首先,通过date_part函数(或者兼容Oracle的extract函数)取出年月日。下面t1代表入参的时间参数。 + +```sql +y1 := date_part('year',t1); +m1 := date_part('month',t1); +d1 := date_part('day',t1); +``` + +或者 + +```sql + y1 := extract(year from t1); + m1 := extract(month from t1); + d1 := extract(day from t1); +``` + +值得注意的是,由于Oracle函数的返回值里面,是包含了时分秒的时分秒的,因此,这里取出来的d1/d2不能直接用结果的返回。需要提取的是包含时分秒信息的日数值,可以通过入参减去月初来实现。 + +```sql + (t1-to_date(to_char(t1,'yyyymm'),'yyyymm')) +``` + +很遗憾的是,Opengauss没有实现trunc(时间列)的功能,否则,下面会是更简便的写法。 + +```sql + t1-trunc(t1,'MM') +``` + +### 月末判断 + +这里面有个比较麻烦的是,是闰年的2月,需要特殊判断。 + +#### 不涉及闰年(2月)的月末判断 + +最简单的写法,是拼接年和月,然后判断是否在1/31、3/31、4/30 … 12/31里面 + +```sql + m1||d1 in ('131','331','430','531','630','731','831','930','1031','1130','1231') +``` + +这个代码看起来似乎会有那么一丁点的问题,比如说,你也许会想,1月11号和 11月1号拼出来不就一样了吗?但其实没关系,因为1月份的日期,我们只关心1/31, 并不会造成混淆。 +当然,如果你很严谨,也可以这样来判断 + +```sql + m1*100 + d1 in (131,331,430,531,630,731,831,930,1031,1130,1231) +``` + +#### 2月的月末判断 + +2月判断拗口一点,需要了解闰年规则,闰年规则如下: +年能被4整除,不能被100整除,但被400整除又可以。 +说起来很拗口,但其实也不复杂 + +```sql + (mod(y1,4)=0 and mod(y1,100)!=0 or mod(y1,400)=0) +``` + +由于这个是和判断月末一起的,所以,可以连2月是否28日/29日一起判断。 + +```sql +/*leap year*/ +( m1 = 2 and d1=29 and (mod(y1,4)=0 and mod(y1,100)!=0 or mod(y1,400)=0) ) +or /*non-leap year*/ +( m1 = 2 and d1=28 and not (mod(y1,4)=0 and mod(y1,100)!=0 or mod(y1,400)=0) ) +``` + +#### 结合2月和非2月的完整判断 + +```sql + (m1||d1 in ('131','331','430','531','630','731','831','930','1031','1130','1231') + or /*leap year*/ + ( m1 = 2 and d1=29 and (mod(y1,4)=0 and mod(y1,100)!=0 or mod(y1,400)=0) ) + or /*non-leap year*/ + ( m1 = 2 and d1=28 and not (mod(y1,4)=0 and mod(y1,100)!=0 or mod(y1,400)=0) ) + ) + and + (m2||d2 in ('131','331','430','531','630','731','831','930','1031','1130','1231') + or /*leap year*/ + ( m2 = 2 and d2=29 and (mod(y2,4)=0 and mod(y2,100)!=0 or mod(y2,400)=0) ) + or /*non-leap year*/ + ( m2 = 2 and d2=28 and not (mod(y2,4)=0 and mod(y2,100)!=0 or mod(y2,400)=0) ) + ) + +``` + +## openGauss(MogDB)完整实现 + +结合前面的逻辑,可以写出最终的代码 + +```plsql +create or replace function months_between(t1 date ,t2 date) +returns number +-- months_between implementation in MogDB, by LuoHaixiong@Enmotech +as $$ +declare + y1 int; + y2 int; + m1 int; + m2 int; + d1 int; + d2 int; +begin +y1 := date_part('year', t1); +y2 := date_part('year', t2); +m1 := date_part('month', t1); +m2 := date_part('month', t2); +d1 := date_part('day', t1); +d2 := date_part('day', t2); +if --Both dates are end of month + ( (m1||d1 in ('131','331','430','531','630','731','831','930','1031','1130','1231') + or /*leap year*/ + ( m1 = 2 and d1=29 and (mod(y1,4)=0 and mod(y1,100)!=0 or mod(y1,400)=0) ) + or /*non-leap year*/ + ( m1 = 2 and d1=28 and not (mod(y1,4)=0 and mod(y1,100)!=0 or mod(y1,400)=0) ) + ) + and + (m2||d2 in ('131','331','430','531','630','731','831','930','1031','1130','1231') + or /*leap year*/ + ( m2 = 2 and d2=29 and (mod(y2,4)=0 and mod(y2,100)!=0 or mod(y2,400)=0) ) + or /*non-leap year*/ + ( m2 = 2 and d2=28 and not (mod(y2,4)=0 and mod(y2,100)!=0 or mod(y2,400)=0) ) + ) +) then + return (y1-y2)*12+(m1-m2); +else --Normal days + return (y1-y2)*12+(m1-m2) + + ( + (t1-to_date(to_char(t1,'yyyymm'),'yyyymm')) + - (t2-to_date(to_char(t2,'yyyymm'),'yyyymm')) + )/31; +end if; +end; +$$ +LANGUAGE plpgsql; +``` diff --git "a/content/zh/post/luooofan/VMware+CentOS7.6+OpenGauss2.0.1\346\236\201\347\256\200\347\211\210\345\256\211\350\243\205.md" "b/content/zh/post/luooofan/VMware+CentOS7.6+OpenGauss2.0.1\346\236\201\347\256\200\347\211\210\345\256\211\350\243\205.md" new file mode 100644 index 0000000000000000000000000000000000000000..7a578706e2911b9c6592559f0091e9f788f1e100 --- /dev/null +++ "b/content/zh/post/luooofan/VMware+CentOS7.6+OpenGauss2.0.1\346\236\201\347\256\200\347\211\210\345\256\211\350\243\205.md" @@ -0,0 +1,318 @@ ++++ +title = "openGauss数据库的安装与使用" +date = "2021-11-27" +tags = ["openGauss安装部署"] +archives = "2021-11" +author = "luooofan" +summary = "VMware+CentOS7.6+OpenGauss2.0.1极简版安装过程记录,包括环境准备,安装以及JDBC连接使用" +img = "/zh/post/luooofan/title/openGauss.png" ++++ + +# VMware+CentOS7.6+OpenGauss2.0.1极简版安装 + +## 环境准备 + +Arch:x86-64 + +OS:Win11 + +VMware:16.1.2 + + + +### 安装CentOS 7.6 + +CentOS镜像:[CentOS-7.6.1810](https://archive.kernel.org/centos-vault/7.6.1810/isos/x86_64/) + +VMware中创建新的虚拟机时:1CPU 2Core 4GB NAT 其他选择默认配置即可 + +CentOS引导安装: + +- 语言选择英语 + + image-20210916203132127 + +- 打开网络 + + image-20210916202936433 + +- 设置时区 + + image-20210916203102725 + +- 安装位置和分区 + + image-20210916203224112 + +- 软件选择最小安装(也可以选带GUI安装) + + image-20210916203248690 + +- 开始安装 + +- 设置ROOT密码和新建用户 + +- 安装完成后重启 + + + +### 配置CentOS7.6 + +- 简单配置 + + 设置字体 `setfont lat4-19` + + [重新安装VMware Tools(Optional)](https://segmentfault.com/a/1190000038633985)(对于无GUI的应该没啥用,可以在本地ssh到vmware中就可以复制粘贴了) + + ![image-20210916204547163](http://img.luooofan.site/20210917-125044-image-20210916204547163.png) + + ![image-20210916204516274](http://img.luooofan.site/20210917-125044-image-20210916204516274.png) + +- 使用MobaXterm SSH连接 + +- yum源更新 `yum update` + +- 安装Python3.6 `yum install -y python36` + +- 安装其他依赖`yum install -y bzip2 libaio-devel flex bison ncurses-devel glibc-devel patch` + +- 关闭防火墙和SeLinux,然后重启,重连ssh + + ![image-20210916205823744](http://img.luooofan.site/20210917-125045-image-20210916205823744.png) + +- 设置字符集参数:在/etc/profile文件中添加”export LANG=XXX”(XXX为Unicode编码) + + ![image-20210916210201985](http://img.luooofan.site/20210917-125045-image-20210916210201985.png) + +- 检查设置时区 + + ![image-20210916210325940](http://img.luooofan.site/20210917-125046-image-20210916210325940.png) + +- 关闭swap交换内存(Optional) `swapoff -a` + +- 关闭RemovelPC,CentOS操作系统默认为关闭 + + + + +## 开始安装 + +- 创建用户组dbgrp、用户omm,将该用户添加至root组,并修改用户omm的密码 + + ![image-20210916212750036](http://img.luooofan.site/20210917-125046-image-20210916212750036.png) + +- 配置共享文件夹,把openGauss的安装包(2.0.1 极简版)放进去 + +- 以root用户创建软件目录,以omm用户登录解压 + + ![image-20210916213043287](http://img.luooofan.site/20210917-125046-image-20210916213043287.png) + +- 进入simpleInstall目录,执行安装脚本 `sh install.sh -w xxxx` + + 出现如下错误: + + ![image-20210916212452036](http://img.luooofan.site/20210917-125047-image-20210916212452036.png) + + 解决办法: + + 在/etc/sysctl.conf中加入语句`kernel.sem = 250 32000 100 999`,然后执行`sysctl -p` + + ![image-20210916213623749](http://img.luooofan.site/20210917-125047-image-20210916213623749.png) + + ![image-20210916213918471](http://img.luooofan.site/20210917-125048-image-20210916213918471.png) + +- 执行完安装脚本之后: + + openGauss端口号默认为5432 + + 默认生成名称为postgres的数据库 + + 数据库目录安装路径/opt/software/openGauss/data/single_node,其中/opt/software/openGauss为解压包路径,data/single_node为新创建的数据库节点目录。 + +- 使用ps和gs_ctl查看进程是否正常(如果没有找到gs_ctl,配置环境变量即可 `export PATH=/opt/software/openGauss/bin:$PATH`,如果缺失lib则配置`LD_LIBRARY_PATH`) + + ![image-20210916220723579](http://img.luooofan.site/20210917-125048-image-20210916220723579.png) + + 如果ps里没有的话可以先重启一下: + + `gs_ctl restart -D /opt/software/openGauss/data/single_node -Z single_node` + + + +## 连接与使用 + +### Gsql方式 + +数据库安装完成后,默认生成名称为postgres的数据库,默认端口是5432。 + +因为omm用户是管理员用户,因此系统显示“DBNAME=#”。若使用普通用户身份登录和连接数据库,系统显示“DBNAME=>”。 + +“Non-SSL connection”表示未使用SSL方式连接数据库。如果需要高安全性时,请使用SSL连接。 + +![image-20210916224215117](http://img.luooofan.site/20210917-125049-image-20210916224215117.png) + +查看对象操作: + +- 查看帮助信息:\\? +- 列举数据库:\\l +- 列举表:\\dt +- 查看表结构:\\d tablename +- 切换数据库:\\c dbname +- 列举schema:\\dn +- 查看索引:\\di + +![image-20210916225721959](http://img.luooofan.site/20210917-125049-image-20210916225721959.png) + + + +### 远程连接前的准备 + +- 配置数据库用户和权限(omm不能用于远程连接): + + ![image-20210917011415027](http://img.luooofan.site/20210917-125050-image-20210917011415027.png) + +- 修改数据库的pg_hba.conf文件,**给要远程连接的主机/用户放行**(这里好像不能用gs_auc命令完成,直接编辑文件即可): + + ![image-20210917014949550](http://img.luooofan.site/20210917-125050-image-20210917014949550.png) + +- 使用gs_ctl将策略生效:`gs_ctl reload -D ./data/single_node` + +- **修改postgresql.conf中的监听地址**:`listen_addresses = '*'`,默认只监听local,改后包括了要进行远程连接的ip地址就行 + +- 重启数据库生效:`gs_ctl restart -D ./data/single_node` + +如果之后进行JDBC连接的时候出现如下图中的目标地址拒绝连接的异常,可能就是这一步出问题了 + +![image-20210917015658388](http://img.luooofan.site/20210917-125050-image-20210917015658388.png) + + + +### ODBC/JDBC + +根据文档JDBC应该是都支持,ODBC支持受限 + +image-20210917001738561 + +因为是在CentOS 7.6上装的openGauss,所以选择JDBC。 + +JDBC Connectors可以不用从源码构建,直接从官网下载页下载即可。 + +在Win上VSCode中配置好JAVA环境,需要Java11和Java8,配置该项目的JDK Runtime和Referenced Libraries(把下载解压得到的postgresql.jar加进去) + +代码: + +```java +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Timestamp; +import java.util.Properties; + +class openGaussTest { + public static void main(String[] args) { + System.out.println("=====START TEST====="); + + Connection conn = getConnect("luooofan", "Luooofan1"); + + Statement stmt = null; + try { + stmt = conn.createStatement(); + ResultSet rs = null; + try { + rs = stmt.executeQuery( + "select pro_c_id, pro_id, pro_status, pro_quantity, pro_income, pro_purchase_time from property;"); + while (rs.next()) { + int pro_c_id = rs.getInt(1); + int pro_id = rs.getInt(2); + String pro_status = rs.getString(3); + int pro_quantity = rs.getInt(4); + int pro_income = rs.getInt(5); + Timestamp pro_pruchase_time = rs.getTimestamp("pro_purchase_time"); + System.out.printf("%4d %4d %4s %4d %4d ", pro_c_id, pro_id, pro_status, pro_quantity, pro_income); + System.out.println(pro_pruchase_time); + } + } catch (SQLException e) { + if (rs != null) { + try { + rs.close(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } + e.printStackTrace(); + } + + stmt.close(); + } catch (SQLException e) { + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } + e.printStackTrace(); + } + + try { + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + public static Connection getConnect(String username, String passwd) { + // 驱动类 + String driver = "org.postgresql.Driver"; + // 数据库连接描述符 + String sourceURL = "jdbc:postgresql://192.168.193.129:5432/finance"; + Connection conn = null; + + try { + // 加载驱动 + Class.forName(driver); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + + try { + // 创建连接 + conn = DriverManager.getConnection(sourceURL, username, passwd); + System.out.println("Connection succeed!"); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + + return conn; + } +} +``` + +NOTE: + +- 要执行什么操作就要赋予用户什么权限,代码中要对property表查询,故:`grant select on table property to luooofan;` + +- 因为CentOS没配置中文编码,所以直接在CentOS上查找的话应该无法显示中文,不过这并不影响在win 上远程连接然后查询并显示 + + ![image-20210917121925894](http://img.luooofan.site/20210917-125051-image-20210917121925894.png) + + + + + +## References + +[openGauss Server&Connectors Official Download](https://opengauss.org/zh/download.html) + +[官方文档-快速入门](https://opengauss.org/zh/docs/2.0.1/docs/Quickstart/Quickstart.html) + +[官方文档-开发者指南](https://opengauss.org/zh/docs/2.0.1/docs/Developerguide/Developerguide.html) + +[openGauss数据库部署Linux遇到的问题](https://blog.csdn.net/RANGO_CSDN/article/details/118761322) + +[【数据库原理实验(openGauss)】 使用JDBC连接数据库](https://blog.csdn.net/BlacKingZ/article/details/117915010) + diff --git "a/content/zh/post/luooofan/openGauss\344\272\213\345\212\241\346\234\272\345\210\266-MVCC\346\212\200\346\234\257\347\232\204\345\256\236\347\216\260.md" "b/content/zh/post/luooofan/openGauss\344\272\213\345\212\241\346\234\272\345\210\266-MVCC\346\212\200\346\234\257\347\232\204\345\256\236\347\216\260.md" new file mode 100644 index 0000000000000000000000000000000000000000..4ae8ddce50b0761fa50943aa845ea4e7329e7fcc --- /dev/null +++ "b/content/zh/post/luooofan/openGauss\344\272\213\345\212\241\346\234\272\345\210\266-MVCC\346\212\200\346\234\257\347\232\204\345\256\236\347\216\260.md" @@ -0,0 +1,519 @@ ++++ +title = "openGauss事务机制中MVCC技术的实现分析" +date = "2021-11-27" +tags = ["openGauss事务机制", "openGaussMVCC技术", "openGauss源码解析"] +archives = "2021-11" +author = "luooofan" +summary = "分析openGauss2.0.1中MVCC技术的实现部分" +img = "/zh/post/luooofan/title/openGauss.png" ++++ + +# openGauss事务机制中MVCC技术的实现分析 + +## 概述 + +### 事务 + +**事务**是为用户提供的最核心、最具吸引力的数据库功能之一。简单地说,事务是用户定义的一系列**数据库操作**(如查询、插入、修改或删除等)的**集合**,从数据库内部保证了该操作集合作为一个整体的原子性(Atomicity)、一致性(Consistency)、隔离性(Isolation)和持久性(Durability),这些特性统称事务的**ACID特性**。 + + + +### DBMS中的并发控制 + +并发控制旨在针对数据库中对**事务并行**的场景,保证 ACID 中的**一致性**(Consistency)与**隔离性**(Isolation)。数据库技术中主流的三种并发控制技术分别是: Multi-version Concurrency Control (MVCC), Strict Two-Phase Locking (S2PL), 以及 Optimistic Concurrency Control (OCC),每种技术也都有很多的变种。 + + + +### MVCC + +MVCC的基本机制是:写事务不会原地修改元组内容,每次写操作都会在旧的版本之上创建新的版本,并且会保留旧的版本。当某个事务需要读取数据时,数据库系统会从所有的版本中选取出符合该事务隔离级别要求的版本。 + +MVCC 的主要优点是读数据的锁请求与写数据的锁请求不冲突,以此来实现**读不阻塞写,写也不阻塞读**。 + + + +### openGauss事务整体架构 + +![img](http://img.luooofan.site/20211017-210839-v2-58a3a0df18e1a92b9cc209036fb149ab_b.jpg) + + + +在openGauss中,事务的实现与存储引擎的实现有很强关联,代码主要集中在src/gausskernel/storage/access/transam及src/gausskernel/storage/lmgr下,关键文件如图所示。 + +(1) 事务管理器:事务系统的中枢,它的实现是一个有限循环状态机,通过接受外部系统的命令并根据当前事务所处的状态决定事务的下一步执行过程。 + +(2) 日志管理器:用来记录事务执行的状态以及数据变化的过程,包括事务提交日志(CLOG)、事务提交序列日志(CSNLOG)以及事务日志(XLOG)。其中CLOG日志只用来记录事务执行的结果状态,CSNLOG记录日志提交的顺序,用于可见性判断;XLOG是数据的redo日志,用于恢复及持久化。 + +(3) 线程管理机制:通过一片内存区域记录所有线程的事务信息,任何一个线程可以通过访问该区域获取其他事务的状态信息。 + +(4) MVCC机制:openGauss系统中,事务执行读流程结合各事务提交的CSN序列号,采用了多版本并发控制机制,实现了元组的读和写互不阻塞。 + +(5) 锁管理器:实现系统的写并发控制,通过锁机制来保证事务写流程的隔离性。 + + + + + +## MVCC的实现 + +我们需要关注: + +- 元组版本号的实现 +- 快照的实现 +- 判断数据有效性、可见性、可更新性的算法的实现 +- 不同的隔离级别的实现 + +### 多版本元组存储结构 + +*src/include/access/htup.h* + +为了定义MVCC 中不同版本的数据,Opengauss在每个元组的头部信息HeapTupleHeaderData中引入了一些字段如下: + +```c++ +typedef struct HeapTupleHeaderData { + union { + HeapTupleFields t_heap; /* 存储该元组的一些描述信息 */ + DatumTupleFields t_datum; + } t_choice; + + ItemPointerData t_ctid; /* (块号,块内偏移) 存储用来记录当前元组或新元组的物理位置 */ + + /* Fields below here must match MinimalTupleData! */ + + uint16 t_infomask2; + + uint16 t_infomask; /* various flag bits, see below */ + + uint8 t_hoff; + + /* ^ - 23 bytes - ^ */ + + bits8 t_bits[FLEXIBLE_ARRAY_MEMBER]; + + /* MORE DATA FOLLOWS AT END OF STRUCT */ +} HeapTupleHeaderData; +typedef HeapTupleHeaderData* HeapTupleHeader +``` + +HeapTupleFields + +```c++ +typedef struct HeapTupleFields { + ShortTransactionId t_xmin; /* 存放插入该 Tuple 时的 txid */ + ShortTransactionId t_xmax; /* 存放删除或者更新该 Tuple 时的 txid,如果还没更新或者删除,那么置 0,表示无效 */ + + union { + CommandId t_cid; /* 创建或更新/删除该 Tuple 的命令在该事务内执行的所有 SQL 命令中的编号 */ + ShortTransactionId t_xvac; /* old-style VACUUM FULL xact ID */ + } t_field3; +} HeapTupleFields; +``` + +t_infomask + +```c++ +#define HEAP_HASNULL 0x0001 /* has null attribute(s) */ +#define HEAP_HASVARWIDTH 0x0002 /* has variable-width attribute(s) */ +#define HEAP_HASEXTERNAL 0x0004 /* has external stored attribute(s) */ +#define HEAP_HASOID 0x0008 /* has an object-id field */ +#define HEAP_COMPRESSED 0x0010 /* has compressed data */ +#define HEAP_COMBOCID 0x0020 /* t_cid is a combo cid */ +#define HEAP_XMAX_EXCL_LOCK 0x0040 /* xmax is exclusive locker */ +#define HEAP_XMAX_SHARED_LOCK 0x0080 /* xmax is shared locker */ +/* if either LOCK bit is set, xmax hasn't deleted the tuple, only locked it */ +#define HEAP_IS_LOCKED (HEAP_XMAX_EXCL_LOCK | HEAP_XMAX_SHARED_LOCK) +#define HEAP_XMIN_COMMITTED 0x0100 /* t_xmin committed */ +#define HEAP_XMIN_INVALID 0x0200 /* t_xmin invalid/aborted */ +#define HEAP_XMIN_FROZEN (HEAP_XMIN_INVALID | HEAP_XMIN_COMMITTED) +#define HEAP_XMAX_COMMITTED 0x0400 /* t_xmax committed */ +#define HEAP_XMAX_INVALID 0x0800 /* t_xmax invalid/aborted */ +... +``` + + + +### 插入、删除、更新元组 + +#### 元组在页中是如何存放的 + +![Fig. 5.3. Representation of tuples.](https://img.luooofan.site/20211015-225510-fig-5-03.png) + +![img](https://img.luooofan.site/20211015-225127-update.png) + +#### 插入 + +假设一个txid为99的事务插入一个元组 + +![Fig. 5.4. Tuple insertion.](https://img.luooofan.site/20211015-225511-fig-5-04.png) + +#### 删除 + +假设一个txid为111的事务删除一个元组 + +![Fig. 5.5. Tuple deletion.](https://img.luooofan.site/20211015-225511-fig-5-05.png) + +#### 更新 + +假设99号事务插入的元组被100号事务更新了两次 + +![Fig. 5.6. Update the row twice.](https://img.luooofan.site/20211015-225511-fig-5-06.png) + +openGauss通过HeapTupleHeaderData 的几个特殊的字段,给元组设置了不同的版本号,元组的每次更新操作都会产生一条新版本的元组,版本之间从旧到新形成了一条版本链(旧的ctid指向新的元组)。 + + + +### 事务快照的实现 + +为了实现元组对事务的可见性判断,openGauss引入了事务快照SnapshotData + +在openGauss中,有两种方式来实现快照。 + +#### (1)活跃事务数组方法 + +在数据库进程中,维护一个**全局的数组**,其中的成员为**正在执行的事务信息**,包括事务的事务号,该数组即活跃事务数组。 + +在每个事务开始的时候,复制一份该数组内容。 + +当事务执行过程中扫描到某个元组时,需要通过判断元组xmin和xmax这两个事务对于查询事务的可见性,来决定该元组是否对查询事务可见。 + + + +![img](https://img.luooofan.site/20211015-225512-d34f1a911a8804c0b1f8d791a65f175e.png) + + + +#### (2)时间戳方法 + +![img](https://img.luooofan.site/20211015-225512-72285f7db5051f38a7940e7f235f49df.png) + +在openGauss内部,使用一个全局自增的长整数作为逻辑的时间戳,模拟数据库内部的时序,该逻辑时间戳被称为提交顺序号(Commit Sequence Number,简称CSN)。 + +每当一个事务提交的时候,在CSN日志中会记录该事务号 XID对应的逻辑时间戳 CSN 值。 + +![img](https://img.luooofan.site/20211015-225513-64eaedd1d1501b104652b104bd3152b2.png) + +```c++ +#define COMMITSEQNO_INPROGRESS UINT64CONST(0x0) // 表示该事务还未提交或回滚 +#define COMMITSEQNO_ABORTED UINT64CONST(0x1) // 表示该事务已经回滚 +#define COMMITSEQNO_FROZEN UINT64CONST(0x2) // 表示该事务已提交,且对任何快照可见 +#define COMMITSEQNO_FIRST_NORMAL UINT64CONST(0x3) // 事务正常的CSN号起始值 +#define COMMITSEQNO_COMMIT_INPROGRESS (UINT64CONST(1) << 62) // 事务正在提交中 +``` + + + +#### 事务快照数据结构SnapshotData + +*src/include/utils/snapshot.h* + +获取快照时会记录当前活跃的最小的xid,记为snapshot.xmin。当前最新提交的“事务id(latestCompleteXid) + 1”,记为snapshot.xmax。当前最新提交的“CSN号 + 1”(NextCommitSeqNo),记为snapshot.csn。 + +```c++ +typedef struct SnapshotData { + SnapshotSatisfiesFunc satisfies; /* 判断可见性的函数;通常使用MVCC,即HeapTupleSatisfiesMVCC */ + TransactionId xmin; /*当前活跃事务最小值,小于该值的事务说明已结束 */ + TransactionId xmax; /*最新提交事务id(latestCompeleteXid)+1,大于等于改值说明事务还未开始,该事务id不可见 */ + TransactionId* xip; /*记录当前活跃事务链表,在CSN版本中该值无用 */ + TransactionId* subxip; /* 记录缓存子事务活跃链表,在CSN版本中该值无用 */ + uint32 xcnt; /* 记录活跃事务的个数(xip中元组数)在CSN版本中该值无用 */ + ... + + CommitSeqNo snapshotcsn; /* 快照的CSN号,一般为最新提交事务的CSN号+1(NextCommitSeqNo),CSN号严格小于该值的事务可见。 */ + ... + + CommandId curcid; /*事务块中的命令序列号,即同一事务中,前面插入的数据,后面可见。 */ + uint32 active_count; /* ActiveSnapshot stack的refcount */ + uint32 regd_count; /* RegisteredSnapshotList 的refcount*/ + void* user_data; /* 本地多版本快照使用,标记该快照还有线程使用,不能直接释放 */ + SnapshotType snapshot_type; /* openGauss单机无用 */ +} SnapshotData; +``` + +satisfies是openGauss提供的对于事务可见性判断的统一操作接口。 + +*src/gausskernel/storage/access/heap/heapam_visibility.c* + +- HeapTupleSatisfiesMVCC:判断元组对某一快照版本是否有效 +- HeapTupleSatisfiesUpdate:判断元组是否可更新 +- HeapTupleSatisfiesDirty:判断当前元组是否已脏 +- HeapTupleSatisfiesSelf:判断tuple对自身信息是否有效 +- HeapTupleSatisfiesToast:用于TOAST表(参考[文档](httpss://www.postgresql.org/docs/10/static/storage-toast.html))的判断 +- HeapTupleSatisfiesVacuum:用在VACUUM,判断某个元组是否对任何正在运行的事务可见,如果是,则该元组不能被VACUUM删除 +- HeapTupleSatisfiesAny:所有元组都可见 +- HeapTupleSatisfiesHistoricMVCC:用于CATALOG 表 +- …… + + + +#### MVCC可见性判断机制 + +| 状态 | xmax对于查询可见 | xmax对于查询不可见 | +| :----------------: | :--------------------------: | :--------------------------: | +| xmin对于查询可见 | 记录不可见(先插入,后删除) | 记录可见(先插入,未删除) | +| xmin对于查询不可见 | 不可能发生 | 记录不可见(未插入,未删除) | + +##### XidVisibleInSnapshot + +*src/gausskernel/storage/access/heap/heapam_visibility.c* + +```c++ +bool XidVisibleInSnapshot(TransactionId xid, Snapshot snapshot, TransactionIdStatus* hintstatus, Buffer buffer, bool* sync) +{ + bool looped = false; + *hintstatus = XID_INPROGRESS; + if (GTM_MODE && TransactionIdFollowsOrEquals(xid, snapshot->xmax)) { + return false; + } +loop: + csn = TransactionIdGetCommitSeqNo(xid, false, true, false); + if (COMMITSEQNO_IS_COMMITTED(csn)) { + *hintstatus = XID_COMMITTED; + if (csn < snapshot->snapshotcsn) + return true; + else + return false; + } else if (COMMITSEQNO_IS_COMMITTING(csn)) { + ... + } else { + if (csn == COMMITSEQNO_ABORTED) + *hintstatus = XID_ABORTED; + return false; + } +} +``` + +如果xid事务正在执行: + +```c++ +if (looped) { + ereport(DEBUG1, (errmsg("transaction id %lu's csn %ld is changed to ABORT after lockwait.", xid, csn))); + RecheckXidFinish(xid, csn); + CSNLogSetCommitSeqNo(xid, 0, NULL, COMMITSEQNO_ABORTED); + SetLatestFetchState(xid, COMMITSEQNO_ABORTED); + *hintstatus = XID_ABORTED; + return false; +} else { + if (!COMMITSEQNO_IS_SUBTRANS(csn)) { + ... + CommitSeqNo latestCSN = GET_COMMITSEQNO(csn); + if (latestCSN >= snapshot->snapshotcsn) { + ... + return false; + } + } else { + parentXid = (TransactionId)GET_PARENTXID(csn); + } + ... + if (TransactionIdIsValid(parentXid)) + SyncWaitXidEnd(parentXid, buffer); + else + SyncWaitXidEnd(xid, buffer); + looped = true; + parentXid = InvalidTransactionId; + goto loop; +} +``` + +##### HeapTupleSatisfiesMVCC + + +```c++ +static bool HeapTupleSatisfiesMVCC(HeapTuple htup, Snapshot snapshot, Buffer buffer) +{ + // 取元组头 + HeapTupleHeader tuple = htup->t_data; + ... + // 根据hint bit,若xmin没有被标记为已提交:可能被标记为回滚,或者还未标记 + if (!HeapTupleHeaderXminCommitted(tuple)) { + // 如果xmin已经被标记为invalid,说明插入该元组的事务已经回滚,直接返回不可见 + if (HeapTupleHeaderXminInvalid(tuple)) + return false; + // xmin还未标记,并且xmin为当前事务,说明是在同一个事务内的插入命令和扫描命令,则需要去判断CID + // 同一个事务内,后面的查询可以查到当前事务之前命令插入的并且未删除的结果 + if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(page, tuple))) { + if ((tuple->t_infomask & HEAP_COMBOCID) && CheckStreamCombocid(tuple, snapshot->curcid, page)) + return true; /* delete after stream producer thread scan started */ + + // 当前扫描命令之后的某条命令才插入 + if (HeapTupleHeaderGetCmin(tuple, page) >= snapshot->curcid) + return false; /* inserted after scan started */ + // 到这里说明当前扫描命令之前已经插入 + // 根据hint bit,xmax被标记为invalid + if (tuple->t_infomask & HEAP_XMAX_INVALID) + return true; + + ... + + // 当前扫描命令之后的某条命令删除了该元组 + if (HeapTupleHeaderGetCmax(tuple, page) >= snapshot->curcid) + return true; /* deleted after scan started */ + else + return false; /* deleted before scan started */ + } + // xmin还没打标记,并且不是当前事务 + else { + // 通过csnlog判断事务是否可见,并且返回该事务的最终提交状态 + visible = XidVisibleInSnapshot(HeapTupleHeaderGetXmin(page, tuple), snapshot, &hintstatus, buffer, NULL); + // 如果该事务提交,则打上提交的hint bit用于加速判断 + if (hintstatus == XID_COMMITTED) + SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED, HeapTupleHeaderGetXmin(page, tuple)); + // 如果事务回滚,则打上回滚标记 + if (hintstatus == XID_ABORTED) { + ... + SetHintBits(tuple, buffer, HEAP_XMIN_INVALID, InvalidTransactionId); + } + // 如果xmin不可见,则该元组不可见 + if (!visible) { + ... + return false; + } + } + } + // 根据hint bit,若xmin已经被标记为已提交,则通过函数接口CommittedXidVisibleInSnapshot判断是否对本次快照可见 + else { + /* xmin is committed, but maybe not according to our snapshot */ + if (!HeapTupleHeaderXminFrozen(tuple) && + !CommittedXidVisibleInSnapshot(HeapTupleHeaderGetXmin(page, tuple), snapshot, buffer)) { + if (...) { + return false; /* treat as still in progress */ + } + } + } + // 到此为止认为xmin visible,继续判断xmax的可见性 + +recheck_xmax: + // 根据hint bit,xmax已经被标记为invalid,即已经回滚 + if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid or aborted */ + return true; + + ... // 还有一些其他状态判断 + + // 根据hint bit,xmax没有被标记为commited + if (!(tuple->t_infomask & HEAP_XMAX_COMMITTED)) { + bool sync = false; + TransactionId xmax = HeapTupleHeaderGetXmax(page, tuple); + + // 如果xmax为当前事务 + if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmax(page, tuple))) { + // 如果删除该元组的命令后发生于快照扫描时刻 + if (HeapTupleHeaderGetCmax(tuple, page) >= snapshot->curcid) + return true; /* deleted after scan started */ + else + return false; /* deleted before scan started */ + } + + visible = XidVisibleInSnapshot(HeapTupleHeaderGetXmax(page, tuple), snapshot, &hintstatus, buffer, &sync); + /* + * If sync wait, xmax may be modified by others. So we need to check xmax again after acquiring the page lock. + */ + if (sync && (xmax != HeapTupleHeaderGetXmax(page, tuple))) { + goto recheck_xmax; + } + // 根据hintstatus在元组头部打标记 hint bit + if (hintstatus == XID_COMMITTED) { + SetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED, HeapTupleHeaderGetXmax(page, tuple)); + } + if (hintstatus == XID_ABORTED) { + ... + SetHintBits(tuple, buffer, HEAP_XMAX_INVALID, InvalidTransactionId); + } + if (!visible) { + if (...) { + if (sync && (xmax != HeapTupleHeaderGetXmax(page, tuple))) { + goto recheck_xmax; + } + return true; /* treat as still in progress */ + } + } + } + // 根据hint bit,xmax被标记为commited + else { + /* xmax is committed, but maybe not according to our snapshot */ + if (!CommittedXidVisibleInSnapshot(HeapTupleHeaderGetXmax(page, tuple), snapshot, buffer)) { + if (...) { + return true; /* treat as still in progress */ + } + } + } + return false; +} +``` + + + + + + + +### 隔离级别的实现 + +| 隔离级别 | P0:脏写 | P1:脏读 | P4:更新丢失 | P2:不可重复读 | P3:幻读 | A5A:读偏斜 | A5B:写偏斜 | +| ------------ | -------- | -------- | ----------- | -------------- | ------- | ---------- | ---------- | +| 读未提交 | 不可能 | 可能 | 可能 | 可能 | 可能 | 可能 | 可能 | +| 读已提交 | 不可能 | 不可能 | 可能 | 可能 | 可能 | 可能 | 可能 | +| 可重复读 | 不可能 | 不可能 | 不可能 | 不可能 | 可能 | 不可能 | 不可能 | +| 快照一致性读 | 不可能 | 不可能 | 不可能 | 不可能 | 偶尔 | 不可能 | 可能 | +| 可串行化 | 不可能 | 不可能 | 不可能 | 不可能 | 不可能 | 不可能 | 不可能 | + +(1)**脏写(dirty write)**:两个事务分别写入,两个事务分别提交或回滚,则事务的结果无法确定,即一个事务可以回滚另一个事务的提交。 + +(2)**脏读(dirty read)**:一个事务可以读取另一个事务未提交的修改数据。 + +(3)**不可重复读(fuzzy read)**:一个事务重复读取前面读取过的数据,数据的结果被另外的事务修改。 + +(4)**幻读(phantom)**:一个事务重复执行范围查询,返回一组符合条件的数据,每次查询的结果集因为其他事务的修改发生改变(条数)。 + +(5)**更新丢失(lost update)**:一个事务在读取元组并更新该元组的过程中,有另一个事务修改了该元组的值,导致最终这次修改丢失。 + +(6)**读偏斜(read skew)**:假设数据x,y有隐式的约束x+y=100;事务一读取x=50;事务二写x=25并更新y=75保证约束成立,事务二提交,事务一再读取y=75,导致事务一中读取x+y=125,不满足约束。 + +(7)**写偏斜(write skew)**:假设数据x,y有隐式的约束x+y<=100;事务一读取x=50,并写入y=50;事务二读取y=30并写入x=70,并提交;事务一再提交;最终导致x=70,y=50不满足x+y<=100的约束。 + + + +隔离级别越高,在一个事务执行过程中,它能“感知”到的并发事务的影响越小。在最高的可串行化隔离级别下,任意一个事务的执行,均“感知”不到有任何其他并发事务执行的影响,并且所有事务执行的效果就和一个个顺序执行的效果完全相同。 + +在openGauss中,隔离级别的实现基于MVCC和快照机制,因此这种隔离方式被称为快照隔离(`Snapshot Isolation`,SI)。目前,openGauss支持读已提交和可重复读这两种隔离级别。两者实现上的差别在于在一个事务中获取快照的次数。(在实现上可重复读隔离级别无幻读问题,有A5B写偏斜问题) + + +如果采用读已提交的隔离级别,那么在一个事务块中每条语句的执行开始阶段,都会去获取一次最新的快照,从而可以看到那些在本事务块开始以后、在前面语句执行过程中提交的并发事务的效果。 + +如果采用可重复读的隔离级别,那么在一个事务块中,只会在第一条语句的执行开始阶段,获取一次快照,后面执行的所有语句都会采用这个快照,整个事务块中的所有语句均不会看到该快照之后提交的并发事务的效果。 + +![img](http://img.luooofan.site/20211017-204222-dc83a9cc72803e849caa49dae027369f.png) + + + + + +## 总结 +- **元组版本号的实现**:使用元组头部信息的字段来标示元组的版本号 +- **快照的实现**:活跃事务数组方法和时间戳方法 +- **判断数据有效性、可见性、可更新性的算法的实现**: `XidVisibleInSnapshot`和`HeapTupleSatisfiesMVCC` +- **不同隔离级别的实现**:在一个事务中获取快照的次数 + + + +## 参考资料 + +[知乎:openGauss数据库源码解析系列文章](https://www.zhihu.com/column/c_1358363246349635584) + +[CSDN:openGauss事务机制](https://blog.csdn.net/GaussDB/article/details/116058155) + +[The Internals of PostgreSQL](https://www.interdb.jp/pg/pgsql05.html) + +[PgSQL · 特性分析 · MVCC机制浅析](http://mysql.taobao.org/monthly/2017/10/01) + +[PosrgreSQL 学习计划——Vacuum 清理机制](https://blog.yasking.org/a/postgresql-vacuum.html) + +[PostgreSql pageinspect 更深的理解PG](https://www.modb.pro/db/24577) + +[openGauss的MVCC以及vacuum机制源码解析—CSN LOG](https://blog.opengauss.org/zh/post/minshengyunwei/opengauss%E7%9A%84mvcc%E4%BB%A5%E5%8F%8Avacuum%E6%9C%BA%E5%88%B6%E6%BA%90%E7%A0%81%E8%A7%A3%E6%9E%90csn-log/) + +[浅谈数据库并发控制 - 锁和 MVCC](https://draveness.me/database-concurrency-control/) + +[PostgreSQL MVCC可见性判断](https://cloud.tencent.com/developer/article/1598195) + + + +​ diff --git a/content/zh/post/luooofan/title/openGauss.png b/content/zh/post/luooofan/title/openGauss.png new file mode 100644 index 0000000000000000000000000000000000000000..86a420b92fb8289658d807d49f137b6d13862f6d Binary files /dev/null and b/content/zh/post/luooofan/title/openGauss.png differ diff --git a/content/zh/post/luooofan/title/title1.png b/content/zh/post/luooofan/title/title1.png new file mode 100644 index 0000000000000000000000000000000000000000..830c8bc490a1b830e759df1f04b453909a097406 Binary files /dev/null and b/content/zh/post/luooofan/title/title1.png differ diff --git "a/content/zh/post/luoyuchen/OpenGauss\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\344\270\216\344\275\277\347\224\250.md" "b/content/zh/post/luoyuchen/OpenGauss\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\344\270\216\344\275\277\347\224\250.md" new file mode 100644 index 0000000000000000000000000000000000000000..6d3397d9c001bfe0427f35d6dd84d63e78b1b14c --- /dev/null +++ "b/content/zh/post/luoyuchen/OpenGauss\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\344\270\216\344\275\277\347\224\250.md" @@ -0,0 +1,224 @@ ++++ + +title = "OpenGauss数据库安装与使用" + +date = "2021-11-27" + +tags = ["OpenGauss数据库安装与使用"] + +archives = "2021-11" + +author = "罗宇辰" + +summary = "OpenGauss数据库安装与使用" + +img = "/zh/post/luoyuchen/title/1.png" + +times = "13:50" + ++++ + +# OpenGauss数据库安装与使用报告 + +### 1.获取openGauss安装包 + +在 https://opengauss.org/zh/download.html 官网上下载安装包 + +![1.png](../figures/1.png "1") + +这里我安装的是极简版。 + +![2.png](../figures/2.png "2") + +### 2.CentOS设置 + +####1)CentOS版本 +CentOS7 +![3.png](../figures/3.png "3") + +####2)centos网络连接设置 +>cd /etc/sysconfig/network-scripts #进入网络配置文件目录 +ls #找到目录下第一个文件 +vi 文件名 #用vi修改配置 + +![4.png](../figures/4.png "4") +最后一行改为yes。 +>service network restart #重启网卡,使配置生效 + +####3)yum安装图形化界面(非必须) +>yum groupinstall -y "GNOME Desktop" + +安装完重启。在命令行界面输入 +>startx + +进入图形化界面。 +![5.png](../figures/5.png "5") + +####4)配置yum源 +![6.png](../figures/6.png "6") +####5)yum安装依赖的软件包 +以下环境配置均参考 https://zhuanlan.zhihu.com/p/402928515 +>yum install libaio-devel flex bison ncurses-devel glibc-devel patch redhat-lsb-core readline-devel -y + +####6)关闭防火墙 + +>systemctl status firewalld +systemctl disable firewalld.service +systemctl stop firewalld.service + +![7.png](../figures/7.png "7") +####7)关闭SELinux + +>sed -i '/SELINUX=/d' /etc/selinux/config +echo "SELINUX=disabled" >> /etc/selinux/config +cat /etc/selinux/config|grep -v ^#|grep -v '^$' + +![8.png](../figures/8.png "8") + +####8)设置时区 + +>rm -fr /etc/localtime +ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime +ll /etc/localtime + +![9.png](../figures/9.png "9") + +####9)关闭swap +修改分区表文件,删除SWAP挂载信息 +>cp /etc/fstab /etc/fstab.bak +sed -i '/swap/s/^/#/' /etc/fstab +cat /etc/fstab|grep -v ^#|grep -v '^$' + +关闭swap +>swapoff -a + +####10)创建dbgrp的组,创建新用户omm并授权 + +>groupadd -g 1001 dbgrp +useradd -u 2001 -g dbgrp omm +chown -R omm:dbgrp /opt + +####11)配置操作系统内核参数 + +>cat >> /etc/sysctl.conf << EOF +net.ipv4.tcp_max_tw_buckets = 10000 +net.ipv4.tcp_tw_reuse = 1 +net.ipv4.tcp_tw_recycle = 1 +net.ipv4.tcp_keepalive_time = 30 +net.ipv4.tcp_keepalive_probes = 9 +net.ipv4.tcp_keepalive_intvl = 30 +net.ipv4.tcp_retries1 = 5 +net.ipv4.tcp_syn_retries = 5 +net.ipv4.tcp_synack_retries = 5 +net.ipv4.tcp_retries2 = 12 +vm.overcommit_memory = 0 +net.ipv4.tcp_rmem = 8192 250000 16777216 +net.ipv4.tcp_wmem = 8192 250000 16777216 +net.core.wmem_max = 21299200 +net.core.rmem_max = 21299200 +net.core.wmem_default = 21299200 +net.core.rmem_default = 21299200 +net.ipv4.ip_local_port_range = 26000 65535 +kernel.sem = 250 6400000 1000 25600 +vm.min_free_kbytes = 102400 ##suggest to set as physical memory * 5% +net.core.somaxconn = 65535 +net.ipv4.tcp_syncookies = 1 +net.core.netdev_max_backlog = 65535 +net.ipv4.tcp_max_syn_backlog = 65535 +net.ipv4.tcp_fin_timeout = 60 +kernel.shmall = 1152921504606846720 +kernel.shmmax = 18446744073709551615 +net.ipv4.tcp_sack = 1 +net.ipv4.tcp_timestamps = 1 +vm.extfrag_threshold = 500 +vm.overcommit_ratio = 90 +EOF +sysctl -p + +###3.安装OpenGauss +####1)创建用于安装openGauss的文件夹 +>mkdir -p /opt/software/openGauss + +将本机上下载的xxx.tar.bz2拖动到图形化界面中的任意文件夹 +右键打开properties找到压缩包所在位置 +![10.png](../figures/10.png "10") + +![11.png](../figures/11.png "11")) + +解压到创建的文件夹 +>cd /home/louie +tar -jxf openGauss-2.0.1-CentOS-64bit.tar.bz2 -C /opt/software/openGauss/ + +####2)安装OpenGauss +进入解压好的文件夹中 +>cd /opt/software/openGauss/simpleInstall/ + +根据文件夹中readme文件中的指导开始安装 +>sh install.sh -w gauss@123 + +极简版采用安装脚本 +![12.png](../figures/12.png "12") + +####3)安装成功 +![13.png](../figures/13.png "13") + +###4.OpenGauss使用测试 +####1)加入PATH +![14.png](../figures/14.png "14") + +####2)连接默认数据库测试 +![15.png](../figures/15.png "15") + +####3)创建新数据库test(指令后要加分号) +![16.png](../figures/16.png "16") +####4)测试直接建表 +![17.png](../figures/17.png "17") +####5)测试使用JDBC连接数据库 +Ifconfig命令查找安装opengauss的虚拟机所在IP地址(inet后面的地址) +![18.png](../figures/18.png "18") +根据找到的ip地址修改pg_hba.conf文件 +![19.png](../figures/19.png "19") +![20.png](../figures/20.png "20") +在原电脑编写测试连接的java文件,例如openGaussDemo.java +![21.png](../figures/21.png "21") +配置好jdk,解压下载好的jdbc,获得postgresql.jar文件。 +将postgresql.jar openGaussDemo.java放在同一文件夹中,在cmd中进入该目录,执行: +>javac -encoding utf-8 -cp d:\Download\postgresql.jar openGaussDemo.java + +编译java文件。 +再执行: +>java -cp .;D:/Download/postgresql.jar openGaussDemo + +运行测试代码。 +![22.png](../figures/22.png "22") +连接成功。 + +###5.过程中遇到的问题 +####1)安装step1报错 +![23.png](../figures/23.png "23") +解决:不能在root下进行安装。 + +####2)安装step2报错 +![24.png](../figures/24.png "24") +解决:在/etc/sysctl.conf中加入语句kernel.sem = 250 32000 100 999,然后执行sysctl -p。 +实际上这个报错是在没有进行系统内核参数修改时出现的,执行完上面centos的环境配置后可以一步到位。 + +####3)step3报错 +解决:需要用chown -R 用户名:dbgrp /opt授权。 + +####4)创建用户组dbgrp时报错 +![25.png](../figures/25.png "25") +解决:在root下操作 + +####5)测试新建数据库,发现没完成指令。 +解决:sql指令后加分号。 + +####6)虚拟机卡死 +原因:原先设置的分配虚拟机内存为2G,估计是安装了centos的图形化界面,导致卡死。 +解决:虚拟机分配4G内存。 + +####7)jdbc连接虚拟机上的数据库报错:FATAL: Forbid remote connection with trust method! +![26.png](../figures/26.png "26") +解决:参考https://bbs.huaweicloud.com/forum/thread-102401-1-1.html +进入安装目录中/opt/software/openGauss/data/single_node中修改pg_hba.conf 将其中trust改为sha56 + diff --git a/content/zh/post/luoyuchen/figures/1.png b/content/zh/post/luoyuchen/figures/1.png new file mode 100644 index 0000000000000000000000000000000000000000..c0766c878d2df76297a0e599c3ab07e75a417d07 Binary files /dev/null and b/content/zh/post/luoyuchen/figures/1.png differ diff --git a/content/zh/post/luoyuchen/figures/10.png b/content/zh/post/luoyuchen/figures/10.png new file mode 100644 index 0000000000000000000000000000000000000000..69e422b7b8214227b629a82e035162bc14e64a8b Binary files /dev/null and b/content/zh/post/luoyuchen/figures/10.png differ diff --git a/content/zh/post/luoyuchen/figures/11.png b/content/zh/post/luoyuchen/figures/11.png new file mode 100644 index 0000000000000000000000000000000000000000..5945694fed68c166e904226a943b9fd35edd29cd Binary files /dev/null and b/content/zh/post/luoyuchen/figures/11.png differ diff --git a/content/zh/post/luoyuchen/figures/12.png b/content/zh/post/luoyuchen/figures/12.png new file mode 100644 index 0000000000000000000000000000000000000000..eb8899c31a96abba83da4970853e9545a3fb6b55 Binary files /dev/null and b/content/zh/post/luoyuchen/figures/12.png differ diff --git a/content/zh/post/luoyuchen/figures/13.png b/content/zh/post/luoyuchen/figures/13.png new file mode 100644 index 0000000000000000000000000000000000000000..805876c5700b4be674aa04e21d3576b4c795ee06 Binary files /dev/null and b/content/zh/post/luoyuchen/figures/13.png differ diff --git a/content/zh/post/luoyuchen/figures/14.png b/content/zh/post/luoyuchen/figures/14.png new file mode 100644 index 0000000000000000000000000000000000000000..5eb07059426f68c0a9d4f3d1145fb8f6cd6dbbdb Binary files /dev/null and b/content/zh/post/luoyuchen/figures/14.png differ diff --git a/content/zh/post/luoyuchen/figures/15.png b/content/zh/post/luoyuchen/figures/15.png new file mode 100644 index 0000000000000000000000000000000000000000..17c54ce53e7b79b4123f6419f0224b37055f82f6 Binary files /dev/null and b/content/zh/post/luoyuchen/figures/15.png differ diff --git a/content/zh/post/luoyuchen/figures/16.png b/content/zh/post/luoyuchen/figures/16.png new file mode 100644 index 0000000000000000000000000000000000000000..66d6879f5ce8d2c83e25ac7151fdaed56a38de9f Binary files /dev/null and b/content/zh/post/luoyuchen/figures/16.png differ diff --git a/content/zh/post/luoyuchen/figures/17.png b/content/zh/post/luoyuchen/figures/17.png new file mode 100644 index 0000000000000000000000000000000000000000..4a1b24758f9544c97781b3c3503ed0c16e45f914 Binary files /dev/null and b/content/zh/post/luoyuchen/figures/17.png differ diff --git a/content/zh/post/luoyuchen/figures/18.png b/content/zh/post/luoyuchen/figures/18.png new file mode 100644 index 0000000000000000000000000000000000000000..b27712a4a64a44c8379feaff8a52cc69bb7c998f Binary files /dev/null and b/content/zh/post/luoyuchen/figures/18.png differ diff --git a/content/zh/post/luoyuchen/figures/19.png b/content/zh/post/luoyuchen/figures/19.png new file mode 100644 index 0000000000000000000000000000000000000000..a078bbb82adcd494f07b7009f2f4a75fdf556163 Binary files /dev/null and b/content/zh/post/luoyuchen/figures/19.png differ diff --git a/content/zh/post/luoyuchen/figures/2.png b/content/zh/post/luoyuchen/figures/2.png new file mode 100644 index 0000000000000000000000000000000000000000..10d6d32ae5b14ad677bdd217885016a5dc7c2916 Binary files /dev/null and b/content/zh/post/luoyuchen/figures/2.png differ diff --git a/content/zh/post/luoyuchen/figures/20.png b/content/zh/post/luoyuchen/figures/20.png new file mode 100644 index 0000000000000000000000000000000000000000..e002e0d1f13249320275e92412a3af39b0924629 Binary files /dev/null and b/content/zh/post/luoyuchen/figures/20.png differ diff --git a/content/zh/post/luoyuchen/figures/21.png b/content/zh/post/luoyuchen/figures/21.png new file mode 100644 index 0000000000000000000000000000000000000000..b3d07f93b0d98b1b6cb243c58edb291271ff1bdd Binary files /dev/null and b/content/zh/post/luoyuchen/figures/21.png differ diff --git a/content/zh/post/luoyuchen/figures/22.png b/content/zh/post/luoyuchen/figures/22.png new file mode 100644 index 0000000000000000000000000000000000000000..96dd498496327339586b2e4594895a1edfefb85e Binary files /dev/null and b/content/zh/post/luoyuchen/figures/22.png differ diff --git a/content/zh/post/luoyuchen/figures/23.png b/content/zh/post/luoyuchen/figures/23.png new file mode 100644 index 0000000000000000000000000000000000000000..682fdee0c0b6cc2c89657cf3a1f21545cc56c9de Binary files /dev/null and b/content/zh/post/luoyuchen/figures/23.png differ diff --git a/content/zh/post/luoyuchen/figures/24.png b/content/zh/post/luoyuchen/figures/24.png new file mode 100644 index 0000000000000000000000000000000000000000..42f2e9faebb879437ebf58590f28b5edb198cad7 Binary files /dev/null and b/content/zh/post/luoyuchen/figures/24.png differ diff --git a/content/zh/post/luoyuchen/figures/25.png b/content/zh/post/luoyuchen/figures/25.png new file mode 100644 index 0000000000000000000000000000000000000000..11deb6e9af9c641d7cc70347ddb34a09ea78eafe Binary files /dev/null and b/content/zh/post/luoyuchen/figures/25.png differ diff --git a/content/zh/post/luoyuchen/figures/26.png b/content/zh/post/luoyuchen/figures/26.png new file mode 100644 index 0000000000000000000000000000000000000000..14aba54c7481132a8cda88ba7b3394ca3fc94a11 Binary files /dev/null and b/content/zh/post/luoyuchen/figures/26.png differ diff --git a/content/zh/post/luoyuchen/figures/3.png b/content/zh/post/luoyuchen/figures/3.png new file mode 100644 index 0000000000000000000000000000000000000000..af1466ae5403bbc6518a7cb055c264a520480e5c Binary files /dev/null and b/content/zh/post/luoyuchen/figures/3.png differ diff --git a/content/zh/post/luoyuchen/figures/4.png b/content/zh/post/luoyuchen/figures/4.png new file mode 100644 index 0000000000000000000000000000000000000000..f4484aba9a466b5e791e3321e6ac1c8c1a824822 Binary files /dev/null and b/content/zh/post/luoyuchen/figures/4.png differ diff --git a/content/zh/post/luoyuchen/figures/5.png b/content/zh/post/luoyuchen/figures/5.png new file mode 100644 index 0000000000000000000000000000000000000000..f3081daf3d83a2651860ed40ccd30273d15a357d Binary files /dev/null and b/content/zh/post/luoyuchen/figures/5.png differ diff --git a/content/zh/post/luoyuchen/figures/6.png b/content/zh/post/luoyuchen/figures/6.png new file mode 100644 index 0000000000000000000000000000000000000000..f8da3a74ec63049cc77041c8a6e98897f6c2b930 Binary files /dev/null and b/content/zh/post/luoyuchen/figures/6.png differ diff --git a/content/zh/post/luoyuchen/figures/7.png b/content/zh/post/luoyuchen/figures/7.png new file mode 100644 index 0000000000000000000000000000000000000000..b83661e7e305e5c5d46d34442a0ab16e6459a411 Binary files /dev/null and b/content/zh/post/luoyuchen/figures/7.png differ diff --git a/content/zh/post/luoyuchen/figures/8.png b/content/zh/post/luoyuchen/figures/8.png new file mode 100644 index 0000000000000000000000000000000000000000..d8c8b63c703e5594927717fe996ef7b6cb13ea10 Binary files /dev/null and b/content/zh/post/luoyuchen/figures/8.png differ diff --git a/content/zh/post/luoyuchen/figures/9.png b/content/zh/post/luoyuchen/figures/9.png new file mode 100644 index 0000000000000000000000000000000000000000..247beb379fe0380a01392b7620dc52f5aaa88714 Binary files /dev/null and b/content/zh/post/luoyuchen/figures/9.png differ diff --git a/content/zh/post/luoyuchen/title/1.png b/content/zh/post/luoyuchen/title/1.png new file mode 100644 index 0000000000000000000000000000000000000000..c0766c878d2df76297a0e599c3ab07e75a417d07 Binary files /dev/null and b/content/zh/post/luoyuchen/title/1.png differ diff --git a/content/zh/post/mingruifu/image/1.jpg b/content/zh/post/mingruifu/image/1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0b7fac59c9955d78d6b0d8b9a8dc99e37adafbed Binary files /dev/null and b/content/zh/post/mingruifu/image/1.jpg differ diff --git a/content/zh/post/mingruifu/image/2.jpg b/content/zh/post/mingruifu/image/2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7870fdeea0c60f5f7b6f996cecaec49017d101cf Binary files /dev/null and b/content/zh/post/mingruifu/image/2.jpg differ diff --git "a/content/zh/post/mingruifu/openGauss\345\256\211\350\243\205\345\271\266\344\275\277\347\224\250DataStudio\345\267\245\345\205\267\350\277\236\346\216\245\344\275\277\347\224\250.md" "b/content/zh/post/mingruifu/openGauss\345\256\211\350\243\205\345\271\266\344\275\277\347\224\250DataStudio\345\267\245\345\205\267\350\277\236\346\216\245\344\275\277\347\224\250.md" new file mode 100644 index 0000000000000000000000000000000000000000..ebace71b4dc5843b45bd8d33a7740dd983e6ae3e --- /dev/null +++ "b/content/zh/post/mingruifu/openGauss\345\256\211\350\243\205\345\271\266\344\275\277\347\224\250DataStudio\345\267\245\345\205\267\350\277\236\346\216\245\344\275\277\347\224\250.md" @@ -0,0 +1,177 @@ ++++ +title = "本地安装单机版openGauss并使用DataStudio工具连接使用" +date = "2021-12-20" +tags = ["openGauss安装及DataStudio工具简单使用"] +archives = "2021-12" +author = "mingruifu" +summary = "openGauss安装及DataStudio工具简单使用" +img="/zh/post/mingruifu/title/title1.png" +times = "11:30" ++++ + +### 1.虚拟机的安装 + +#### 1.1安装virtualbox + +1. 访问[https://www.virtualbox.org/wiki/Downloads](https://www.virtualbox.org/wiki/Downloads) 安装对应平台的版本 +2. 按照提示步骤安装好virtualbox环境 + +#### 1.2安装vagrant + +1. 访问[https://www.vagrantup.com/downloads](https://www.vagrantup.com/downloads) 安装对应平台的vagrant + +2. 按照提示步骤安装好vagrant环境 + +3. 执行`vagrant -v`,显示如下信息,则安装成功 + + ```shell + Vagrant 2.2.19 + ``` + +#### 1.3安装centos7 + +1. 建立一个文件夹创建虚拟机环境 + +2. 使用国内镜像源初始化Vagrantfile文件 + + ```shell + vagrant init centos7 https://mirrors.ustc.edu.cn/centos-cloud/centos/7/vagrant/x86_64/images/CentOS-7.box + ``` + +3. 修改Vagrantfile文件,配置虚拟机 + +```ruby +Vagrant.configure("2") do |config| + # 镜像名称 + config.vm.box = "centos7" + # 国内镜像源 + config.vm.box_url = "https://mirrors.ustc.edu.cn/centos-cloud/centos/7/vagrant/x86_64/images/CentOS-7.box" + # 使用公共网络 + config.vm.network "public_network" + config.vm.provider "virtualbox" do |vb| + # 配置虚拟机名称 + vb.name = "openGauss" + # 配置虚拟机cpu个数 + vb.cpus = 2 + # 配置虚拟机内存 + vb.memory = 1024 + end +end +``` + + 4.启动并进入虚拟机 + +```shell +# 启动虚拟机 +vagrant up +# 进入虚拟机 +vagrant ssh +# 关闭防火墙 +systemctl disable firewalld.service +systemctl stop firewalld.service +# 关闭SELinux +sed -i s/SELINUX=.*/SELINUX=disabled/ /etc/selinux/config +# 将各数据库节点的字符集设置为相同的字符集,可以在/etc/profile文件中添加“export LANG=XXX”(XXX为Unicode编码) +vim /etc/profile +# 关闭交换内存 +swapoff -a +# 更新yum +yum -y update +# 清除缓存 +yum clean all +# 安装openjdk +yum install -y java-1.8.0-openjdk-1.8.0.212.b04-0.el7_6.x86_64 +yum install -y java-1.8.0-openjdk-devel-1.8.0.212.b04-0.el7_6.x86_64 +# 配置jdk环境变量 +vi /etc/profile +export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.212.b04-0.el7_6.x86_64 +export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar +export PATH=$PATH:$JAVA_HOME/bin +# 生效环境变量 +source /etc/profile +# 重启虚拟机 +vagrant reload +``` + +### 2.安装单机版openGauss + +1. 访问`https://opengauss.org/zh/download.html`下载`openGauss_2.1.0 极简版` +2. 使用vagrant用户安装openGauss + +```shell +# 将安装包放在Vagrantfile同级目录,并重启虚拟机 +vagrant reload +#进入安装包所在目录 +cd /vagrant +# 解压openGauss压缩包到安装目录 +tar -jxf openGauss-2.1.0-CentOS-64bit.tar.bz2 -C /opt/openGauss +# 进入解压后目录下的simpleInstall +cd /opt/openGauss/simpleInstall +# 执行install.sh脚本安装openGauss +sh install.sh -w test@123 -p 40200 +-w 初始化密码 +-p 指定端口 默认5432 +# 使用如下命令连接数据库 +gs_ctl start|stop|restart -D $GAUSSHOME/data/single_node -Z single_node +``` + +> 执行时,如果出现报错“the maximum number of SEMMNI is not correct, the current SEMMNI is xxx. Please check it.”,请使用有root权限的用户执行如下命令 。 + +```shell +sysctl -w kernel.sem="250 85000 250 330" +``` + +3. 修改`/opt/openGauss/data/single_node/pg_hba.conf`文件 + +```shell +# 在文件末尾追加客户端host认证方式 +host all all 0.0.0.0/0 sha256 +``` + +4. 修改`/opt/openGauss/data/single_node/postgresql.conf`文件 + +```shell +# 将监听地址设置为*或者指定的客户端ip地址 +listen_addresses = '*' +``` + +5. 重启数据库 + +```shell +cd /opt/openGauss/bin +# 重启数据库 +./gs_ctl restart -D /opt/openGauss/data/single_node/ -Z single_node +``` + +6. 连接数据库,创建用户 + +```shell +# 连接数据库 +/opt/openGauss/bin/gsql -d postgres -p 40200 -W 'test@123' -r +# 创建一个用户 +create role test with createddb password "test@1234"; +# 创建数据库 +create database testdb owner test; +# 授予用户对数据库的全部权限 +grant all privileges on database testdb to test; +# 授予该用户管理员权限 +grant all privileges to test; +``` + +7. 使用新用户连接数据库 + +```shell +/opt/openGauss/bin/gsql -d postgres -p 40200 -U test -W 'test@123' -r +``` + +### 3.下载安装DataStudio + +1. 访问[https://opengauss.org/zh/download.html](https://opengauss.org/zh/download.html) 下载DataStudio_2.1.0 +2. 打开DataStudio连接数据库 +![连接数据库](../image/1.jpg) +3. 连接上之后可以看见DataStudio的功能操作界面 +![连接数据库](../image/2.jpg) +- 1:工具栏菜单 +- 2:数据库对象浏览器 +- 3:数据库操作终端,书写业务sql +- 4: sql助手 diff --git a/content/zh/post/mingruifu/title/title1.png b/content/zh/post/mingruifu/title/title1.png new file mode 100644 index 0000000000000000000000000000000000000000..70953c80a88ba62ea617a60b8527e6e739a8f9ea Binary files /dev/null and b/content/zh/post/mingruifu/title/title1.png differ diff --git "a/content/zh/post/mqq/OpenGauss SQL\350\247\243\346\236\220\346\272\220\347\240\201\345\210\206\346\236\220.md" "b/content/zh/post/mqq/OpenGauss SQL\350\247\243\346\236\220\346\272\220\347\240\201\345\210\206\346\236\220.md" new file mode 100644 index 0000000000000000000000000000000000000000..1d1b4fb6d393f5e15e72dead837b0ba6fbaae43b --- /dev/null +++ "b/content/zh/post/mqq/OpenGauss SQL\350\247\243\346\236\220\346\272\220\347\240\201\345\210\206\346\236\220.md" @@ -0,0 +1,186 @@ ++++ +title = "OpenGauss SQL解析源码分析" +date = "2021-11-27" +tags = ["OpenGauss SQL解析源码分析"] +archives = "2021-11" +author = "mqq" +summary = "OpenGauss SQL解析源码分析" +img = "/zh/post/mqq/title/title.png" ++++ + +# OpenGauss SQL解析源码分析 + + +## SQL 引擎简介: + +SQL引擎整个编译的过程如下图所示,在编译的过程中需要对输入的SQL语言进行词法分析、语法分析、语义分析,从而生成逻辑执行计划,逻辑执行计划经过代数优化和代价优化之后,产生物理执行计划。 + +SQL解析通常包含词法分析、语法分析、语义分析几个子模块。SQL是介于关系演算和关系代数之间的一种描述性语言,它吸取了关系代数中一部分逻辑算子的描述,而放弃了关系代数中"过程化"的部分,SQL解析主要的作用就是将一个SQL语句编译成为一个由关系算子组成的逻辑执行计划。![](https://img-blog.csdnimg.cn/20c9730d6b754a57b2e145a25fc8b47d.png?x-oss-process,type_ZHJvaWRzYW5zZmFsbGJhY2s,shadow_50,text_Q1NETiBAcXFfNDQzNjExMzY=,size_15,color_FFFFFF,t_70,g_se,x_16) + +数据库的SQL引擎是数据库重要的子系统之一,它对上负责承接应用程序发送过来的SQL语句,对下则负责指挥执行器运行执行计划。其中优化器作为SQL引擎中最重要、最复杂的模块,被称为数据库的"大脑",优化器产生的执行计划的优劣直接决定数据库的性能。右图为SQL引擎的各个模块的响应过程。下图的绿色部分代表解析树的生成,;蓝色部分代表查询树的生成部分。 + +![](https://img-blog.csdnimg.cn/c5e36efb9b22452c9aed5f746586897d.png?x-oss-process,type_ZHJvaWRzYW5zZmFsbGJhY2s,shadow_50,text_Q1NETiBAcXFfNDQzNjExMzY=,size_19,color_FFFFFF,t_70,g_se,x_16) + + +## 理论分析 +**SQL解析各模块功能介绍:** + + 假设要在student表里找到查找序号为1的学生姓名,其SQL语句如下: + +> Select name +> +>from student +>where no=1 + +1. **词法分析:** + +> 从查询语句中识别出系统支持的关键字、标识符、运算符、终结符等,确定每个词固有的词性。分析结果如下图所示,可以看到一个SQL语句按一个一个的字词或符号分开,形成可以被解读语义的字符。 + + | **词性** | **内容** | + | ------------ | --------- | + | **关键字** | **Select、from、where** | + | **标识符** | **name、student、no** | + | **操作符** | **=** | + | **常量** | **1** | + +###### **(2)语法分析:** + +根据SQL的标准定义语法规则,使用词法分析中产生的词去匹配语法规则,如果一个SQL语句能够匹配一个语法规则,则生成对应的抽象语法树(AST)。 + +下图中的\代表投影,\代表关系,即查询来源那些表,\代表条件,一般是一些表达式。 + +![](https://img-blog.csdnimg.cn/74294d0fa82b455fa7dea8305ca0e55b.png?x-oss-process,type_ZHJvaWRzYW5zZmFsbGJhY2s,shadow_50,text_Q1NETiBAcXFfNDQzNjExMzY=,size_20,color_FFFFFF,t_70,g_se,x_16) + + +###### **(3)语义分析:** + +对语法树进行有效性检查,检查语法树中对应的表、列、函数、表达式是否有对应的元数据,将抽象语法树转换为逻辑执行计划(关系代数表达式)。在SQL标准中,确定了SQL的关键字以及语法规则信息,对语法树进行有效性检查,检查语法树中对应的表、列、函数、表达式是否有对应的元数据,将抽象语法树转换为逻辑执行计划(关系代数表达式),即查询树。可由右图的关系表达式来呈现:![在这里插入图片描述](https://img-blog.csdnimg.cn/8efad4ba77db41bc8572776fff2dbd62.png) + + +## 代码分析 + +### 3.1总体流程 +![在这里插入图片描述](https://img-blog.csdnimg.cn/8c67c66f177142ba98ac57849f6db27a.png) + + + +exec_simple_query函数是整个过程的主函数,调渡解析中的所有过程,主函数调用函数pg_parse_query进入词法分析和语法分析的主过程,函数pg_parse_query再调用词法分析和语法分析的入口函数raw_parser生成分析树;之后返回分析树(raw_parsetree_list)给exec_simple_query函数;exec_simple_query函数调用查询与重写函数,查询与重写函数再调用paser_analyze函数进行语义分析返回查询树链表query,最后将查询树链表传递给查询重写模块,最后返回给exec_simple_query主函数。 + +### 3.2词法分析 + +(1)openGauss中的词法文件是scan.l,它根据SQL语言标准对SQL语言中的关键字、标识符、操作符、常量、终结符进行了定义和识别。代码如下图: + +![](https://img-blog.csdnimg.cn/fb71b61d3b3e4bc8bf293e5a51294327.png) + + +(2)下图是词法分析涉及到的关键字原型,由三部分组成,分别是名字、Token值、类别。名字是字符串原型,Token值是一个int型的数。openGauss在kwlist.h中定义了大量的关键字,按照字母的顺序排列,方便在查找关键字时通过二分法进行查找。 + +![](https://img-blog.csdnimg.cn/103d9351b5e14e66a682fb587e161983.png) + + +### 3.3语法分析: + +解析树的节点定义如下: + +(1)仅在叶节点出现的的一些基本属性(一些常见的属性会在后面介绍) + +![](https://img-blog.csdnimg.cn/1358e54e68784a7e947bbc51b52f85f4.png) + + +(2)下图的属性不仅用于叶节点还能用于更上层的非叶子节点 + +![](https://img-blog.csdnimg.cn/3a89a6715dc542a9aaf07d8202672c75.png) + + +(3)下图部分的属性值及用于非叶子节点 + +![](https://img-blog.csdnimg.cn/e45863803bd04af6ae637b2e3e7a5ed3.png) + + +#### 原始解析树的生成(完成语法和词法分析后生成的多叉树): + +对于下面的SQL查询语句: +![在这里插入图片描述](https://img-blog.csdnimg.cn/cc1bd07c4b1f4766a6fee3101fd20f63.png) + +完成语法分析后生成的原始解析树如下(蓝色部分分别于上述语句各部分相对应): + +![](https://img-blog.csdnimg.cn/39b455c8c19d407699a0bc98f50743c1.png) + + + +- targetList:最后查询完成显示的目标列 + +- fromClause:from子句,标识该查询来自某个或某些表。 + +- whereClause:where子句,一般接一些条件表达式,表示查询条件 + +- sortClause:sort by子句节点,如果需要按照某一列排序时会用到。 + +*需要注意的是,由于解析器仅在生成解析树时检查输入的语法,因此只有在查询中出现语法错误时才会返回错误。解析器不检查输入查询的语义。例如,即使查询包含不存在的表名,解析器也不会返回错误。语义检查由分析器(analyse.cpp)完成。* + +### 3.4语义分析 + +(1)总体流程:进入到analyze.cpp进行语义分析,将所有语句转换为查询树供重写器和计划器进一步处理。 + +输入:原始语法解析树parseTree和源语句sourceText + +输出:查询树query + +![](https://img-blog.csdnimg.cn/758de674b1804aebb7564fd82128fb32.png) + + +(2)查询树节点的定义: + +![\[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-XZA5s16z-1638013803029)(./images/media/image14.png)\]](https://img-blog.csdnimg.cn/169dff7e9d574e28b47707007096237a.png) + + +上图是查询树节点的基础属性部分,也是大部分语句都共有的部分,表示SQL语句的类型(增删改查),来源和标识号等等; + +![\[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-RzIMf1Ri-1638013803029)(./images/media/image15.png)\]](https://img-blog.csdnimg.cn/43792a6e6cbe4ff4b260afd4af827785.png) + + +上图为查询树节点的第二部分定义bool类型的变量,表示该查询的相关属性,例如有无子语句,是否需要去重等等。 +![](https://img-blog.csdnimg.cn/b24c8403347c498a9f27761098084946.png) + +上图为查询树节点的第三部分定义。主要用于表示目标列表,from子句,where子句等的节点或链表。 + +### 3.5查询树的生成和可视化 + +第一步,修改/opt/software/openGauss/data/single_node目录下的配置文件postgresql.conf中的配置项debug_print_parse +即可在日志文件中打印查询树 + +![在这里插入图片描述](https://img-blog.csdnimg.cn/deef654c2a2b4bf4a7fe86aabb36c694.png) + + +第二步,修改client_min_messages为log(如下图),以便可以在客户端输出语法树。 + +![\[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-eVdcTUmO-1638013803031)(./images/media/image18.png)\]](https://img-blog.csdnimg.cn/56c210614e96424693ab82cb4d200222.png) + + +之后我们可以看到客户端生成的输出语法树如下: + +![\[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-ZqbDJRxd-1638013803032)(./images/media/image19.png)\]](https://img-blog.csdnimg.cn/d93be651f52940bfbb93d8ac19d274a6.png) + + +最后,利用开源工具(https://github.com/shenyuflying/pgNodeGraph)可以把客户端输出的查询树可视化,如下图: +![在这里插入图片描述](https://img-blog.csdnimg.cn/ca0457a13f034c42a633ea581203a33d.png) + + +对比语法分析后所生成的解析树(下图1)和语义分析后生成的查询树(下图2,已简化)不难看出其节点结构的变化, + +- 红框部分是目标列表,对应查询语句中的select子句,即查询完成后需要显示的某一或某几属性列。 + +- 绿框部分代表查询来源的表,主要对应的是查询语句中的from子句,在查询树生成时候还会附带表的别名和其他信息。 + +- 黄框部分在解析树时候主要对应where子句的节点,其中A_Expr代表运算符号,ColumnRef是该属性(id)源自哪张表,A_CONST代表的比较子句的常量;而在查询树时from子句和where子句会合并为一个jointree节点。 + +- 蓝框部分主要是一些其他的sort,having等子句的节点主要做排序等作用。 + +**图一** + +![在这里插入图片描述](https://img-blog.csdnimg.cn/0d44f584d75c4e728a68662a53b5250b.png) + +**图二** + +![在这里插入图片描述](https://img-blog.csdnimg.cn/5a49c04825784806b7ff29f7fbdfbfe2.png) + diff --git "a/content/zh/post/mqq/OpenGauss\346\225\260\346\215\256\345\272\223\347\232\204\345\256\211\350\243\205\350\277\220\350\241\214\357\274\210OpenGauss2.0.1\357\274\211.md" "b/content/zh/post/mqq/OpenGauss\346\225\260\346\215\256\345\272\223\347\232\204\345\256\211\350\243\205\350\277\220\350\241\214\357\274\210OpenGauss2.0.1\357\274\211.md" new file mode 100644 index 0000000000000000000000000000000000000000..5e8ab45190fc408a891ab9c2ec17b879d2828e27 --- /dev/null +++ "b/content/zh/post/mqq/OpenGauss\346\225\260\346\215\256\345\272\223\347\232\204\345\256\211\350\243\205\350\277\220\350\241\214\357\274\210OpenGauss2.0.1\357\274\211.md" @@ -0,0 +1,196 @@ ++++ + +title = "openGauss数据库的安装运行(openGauss2.0.1)" +date = "2021-12-06" +tags = ["openGauss数据库的安装运行"] +archives = "2021-12" +author = "mqq" +summary = "openGauss数据库的安装运行" +img = "/zh/post/mqq/title/title.png" + ++++ + +# 环境准备 +
    + + +## 硬件要求 +- 内存 >= 32GB + +- CPU >= 8核 2.0GHZ + +- 硬盘 >= 40GB + +## 软件要求 +- 操作系统:CentOS 7.6 x86_64 + +- Python:Python 3.6.X + +- 虚拟机:VMware:16.1.2 + + + +# 配置CentOS7.6 +![](https://img-blog.csdnimg.cn/d2d3fe5be58b46a5952b06a0b25133e4.png?x-oss-process=image/watermark,type_ZHJvaWRzYW5zZmFsbGJhY2s,shadow_50,text_Q1NETiBAcXFfNDQzNjExMzY=,size_20,color_FFFFFF,t_70,g_se,x_16) +安装完成CentOS7.6后进入系统,开始配置。 + +## 设置语言,时区: +![在这里插入图片描述](https://img-blog.csdnimg.cn/8740d1fb5ed64b3bba4855c531784e90.png?x-oss-process=image/watermark,type_ZHJvaWRzYW5zZmFsbGJhY2s,shadow_50,text_Q1NETiBAcXFfNDQzNjExMzY=,size_20,color_FFFFFF,t_70,g_se,x_16) +## 设置网络连接: + +![在这里插入图片描述](https://img-blog.csdnimg.cn/b11f73c4ac074eeea847d46389ca9963.png?x-oss-process=image/watermark,type_ZHJvaWRzYW5zZmFsbGJhY2s,shadow_50,text_Q1NETiBAcXFfNDQzNjExMzY=,size_20,color_FFFFFF,t_70,g_se,x_16) + +设置完成后,来到设备选项界面按照如下配置即可。 +![在这里插入图片描述](https://img-blog.csdnimg.cn/a58114e33f1642c7918e897222ae1bc0.png?x-oss-process=image/watermark,type_ZHJvaWRzYW5zZmFsbGJhY2s,shadow_50,text_Q1NETiBAcXFfNDQzNjExMzY=,size_20,color_FFFFFF,t_70,g_se,x_16) +## 设置用户名和密码 +![在这里插入图片描述](https://img-blog.csdnimg.cn/e25dfce8d3ef47b19b8945b59c8e5c7e.png?x-oss-process=image/watermark,type_ZHJvaWRzYW5zZmFsbGJhY2s,shadow_50,text_Q1NETiBAcXFfNDQzNjExMzY=,size_20,color_FFFFFF,t_70,g_se,x_16) +###安装net-tools工具 + + +![在这里插入图片描述](https://img-blog.csdnimg.cn/ff4b631155754420b539f442bcb3afe4.png?x-oss-process=image/watermark,type_ZHJvaWRzYW5zZmFsbGJhY2s,shadow_50,text_Q1NETiBAcXFfNDQzNjExMzY=,size_20,color_FFFFFF,t_70,g_se,x_16) +安装完成后可以看到 “complete!” +![在这里插入图片描述](https://img-blog.csdnimg.cn/4b7c5e21866649048b9cac39e195a3bc.png?x-oss-process=image/watermark,type_ZHJvaWRzYW5zZmFsbGJhY2s,shadow_50,text_Q1NETiBAcXFfNDQzNjExMzY=,size_20,color_FFFFFF,t_70,g_se,x_16) +## 使用SSH连接并设置相关 +![在这里插入图片描述](https://img-blog.csdnimg.cn/efbb700cf55d4f90819df39b48eca339.png) + + +_可能遇到的问题:过程试图写入的管道不存在_ +![在这里插入图片描述](https://img-blog.csdnimg.cn/8e01856f87264150a96042e9a6e106de.png) + +_解决方法:重新检查Host地址是否变更,每次输入的Host可能会发生变化_ + +## yum源更新 +![在这里插入图片描述](https://img-blog.csdnimg.cn/b931bb98ec5a4bf3990207f0d567ecec.png?x-oss-process=image/watermark,type_ZHJvaWRzYW5zZmFsbGJhY2s,shadow_50,text_Q1NETiBAcXFfNDQzNjExMzY=,size_20,color_FFFFFF,t_70,g_se,x_16) +## 安装python3.6 +![在这里插入图片描述](https://img-blog.csdnimg.cn/41aa77d1bbf74b99a933880df7f65952.png?x-oss-process=image/watermark,type_ZHJvaWRzYW5zZmFsbGJhY2s,shadow_50,text_Q1NETiBAcXFfNDQzNjExMzY=,size_20,color_FFFFFF,t_70,g_se,x_16) +## 安装其他相关文件 +![在这里插入图片描述](https://img-blog.csdnimg.cn/900ea56cf36049df862dba1b154aab78.png) +# 开始安装 +
    + +## 关闭防火墙和Selinux +命令如下: +``` +systemctl disable firewalld.service +systemctl stop firewalld.service +sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config +setenforce 0 +``` +设置字符集参数并检查设置时区: +![在这里插入图片描述](https://img-blog.csdnimg.cn/57aa4acc8cfa4df0af61deee08fd9922.png?x-oss-process=image/watermark,type_ZHJvaWRzYW5zZmFsbGJhY2s,shadow_50,text_Q1NETiBAcXFfNDQzNjExMzY=,size_20,color_FFFFFF,t_70,g_se,x_16) +## 创建用户组与用户 +创建用户组dbgrp、用户omm,将该用户添加至root组,并修改用户omm的密码 +![在这里插入图片描述](https://img-blog.csdnimg.cn/66d95285e77b450dab0d48a9d0727d59.png) +## 解压 +![在这里插入图片描述](https://img-blog.csdnimg.cn/7cabd2989f9749fa99479fccab82daa5.png) +_可能遇到的问题:解压不成功_ +![在这里插入图片描述](https://img-blog.csdnimg.cn/9de721393c0d4431bb8c7109e66a8f27.png) +_解决方案:尝试重新安装VMware Tools_ +![在这里插入图片描述](https://img-blog.csdnimg.cn/f4aa88d607de4efb9084ec13b9d8d922.png?x-oss-process=image/watermark,type_ZHJvaWRzYW5zZmFsbGJhY2s,shadow_50,text_Q1NETiBAcXFfNDQzNjExMzY=,size_20,color_FFFFFF,t_70,g_se,x_16) + +## 安装脚本: +命令格式:`sh install.sh -w xxxx` +![在这里插入图片描述](https://img-blog.csdnimg.cn/ff8c2cefb38a45f29e67d5a4d86e6816.png) +由于openGauss端口号默认为5432默认生成名称为postgres的数据库: +![在这里插入图片描述](https://img-blog.csdnimg.cn/4d3f3788652c4628bc1319a4d46447b3.png?x-oss-process=image/watermark,type_ZHJvaWRzYW5zZmFsbGJhY2s,shadow_50,text_Q1NETiBAcXFfNDQzNjExMzY=,size_20,color_FFFFFF,t_70,g_se,x_16) +使用ps和gs_ctl查看进程是否正常: +![在这里插入图片描述](https://img-blog.csdnimg.cn/18e941ec7eea4345bb9269d587e79270.png?x-oss-process=image/watermark,type_ZHJvaWRzYW5zZmFsbGJhY2s,shadow_50,text_Q1NETiBAcXFfNDQzNjExMzY=,size_20,color_FFFFFF,t_70,g_se,x_16) +## 命令行访问数据库 +*以下以默认数据库里的school数据库为例* + +查看数据库school的class表结构: +![在这里插入图片描述](https://img-blog.csdnimg.cn/0327a25726284e2aa1a2dc0651686145.png?x-oss-process=image/watermark,type_ZHJvaWRzYW5zZmFsbGJhY2s,shadow_50,text_Q1NETiBAcXFfNDQzNjExMzY=,size_20,color_FFFFFF,t_70,g_se,x_16) +school数据库相关信息: +![在这里插入图片描述](https://img-blog.csdnimg.cn/c6f52563079a46ffa568c1736ff3ca57.png?x-oss-process=image/watermark,type_ZHJvaWRzYW5zZmFsbGJhY2s,shadow_50,text_Q1NETiBAcXFfNDQzNjExMzY=,size_20,color_FFFFFF,t_70,g_se,x_16) +# 以JDBC的方式访问数据库并查找 +*以查找school数据库中的class表为例,查找其中的 cla_id, cla_name, cla_teacher并输出:* + +```java +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +class openGaussTest { + public static void main(String[] args) { + Connection conn = getConnect("mqq", "Mqq123123"); + + Statement stmt = null; + try { + stmt = conn.createStatement(); + ResultSet rs = null; + try { + rs = stmt.executeQuery( + "select cla_id, cla_name, cla_teacher from class;"); + while (rs.next()) { + int cla_id = rs.getInt(1); + String cla_name = rs.getString(2); + int cla_teacher = rs.getInt(3); + System.out.println(cla_id +" "+ cla_name + " "+cla_teacher); + } + } catch (SQLException e) { + if (rs != null) { + try { + rs.close(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } + e.printStackTrace(); + } + + stmt.close(); + } catch (SQLException e) { + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } + e.printStackTrace(); + } + + try { + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + public static Connection getConnect(String username, String passwd) { + // 驱动类 + String driver = "org.postgresql.Driver"; + // 数据库连接描述符 + String sourceURL = "jdbc:postgresql://192.168.195.129:5432/school"; + Connection conn = null; + + try { + // 加载驱动 + Class.forName(driver); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + + try { + // 创建连接 + conn = DriverManager.getConnection(sourceURL, username, passwd); + System.out.println("Connection succeed!"); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + + return conn; + } + +} +``` + +*查询结果如下:* +![在这里插入图片描述](https://img-blog.csdnimg.cn/ac49acca2f72474abe7c997e8b79fc83.png?x-oss-process=image/watermark,type_ZHJvaWRzYW5zZmFsbGJhY2s,shadow_50,text_Q1NETiBAcXFfNDQzNjExMzY=,size_20,color_FFFFFF,t_70,g_se,x_16) + + + diff --git a/content/zh/post/mqq/title/title.png b/content/zh/post/mqq/title/title.png new file mode 100644 index 0000000000000000000000000000000000000000..557eeb0f0228faee274149d39dc2bd6885ce7159 Binary files /dev/null and b/content/zh/post/mqq/title/title.png differ diff --git "a/content/zh/post/opengauss_deploy/ECS\344\270\212\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223.md" "b/content/zh/post/opengauss_deploy/ECS\344\270\212\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223.md" new file mode 100644 index 0000000000000000000000000000000000000000..fe372152a96dc8da792455f1d7aea4962e68c1a8 --- /dev/null +++ "b/content/zh/post/opengauss_deploy/ECS\344\270\212\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223.md" @@ -0,0 +1,609 @@ ++++ + +title = "在ECS上安装部署openGauss数据库指导手册" +date = "2021-11-13" +tags = ["在ECS上安装部署openGauss数据库指导手册"] +archives = "2021-11-13" +author = "opengauss_deploy" +summary = "在ECS上安装部署openGauss数据库指导手册" +times = "19:30" + ++++ + +## 在ECS上安装部署openGauss数据库指导手册 + +文档下载:[在ECS上安装部署openGauss数据库指导手册.docx](../docs/在ECS上安装部署openGauss数据库指导手册.docx) + + + +## 前 言 +### 简介 +openGauss是关系型数据库,采用客户端/服务器,单进程多线程架构,支持单机和一主多备部署方式,备机可读,支持双机高可用和读扩展。 +本实验主要描述openGauss数据库在弹性云服务器(openEuler)上的安装部署。 +#### 内容描述 +本实验主要内容为弹性云服务器(openEuler)上安装部署openGauss数据库,并进行简单的数据库相关操作。 +#### 前置条件 +由于本实验主要是在openEuler操作系统上进行openGauss数据库的部署,需要掌握Linux系统的基本操作和系统命令,详细请参见附录一。 +#### 实验环境说明 + 1. 组网说明 +本实验环境为华为云环境,需要购买弹性云服务器。 + 2. 设备介绍 +为了满足openGauss安装部署实验需要,建议每套实验环境采用以下配置: +软件类型和配置描述如下: + +| 设备名称 | 设备型号 | +| --- | --- | +| Linux操作系统 | openEuler20.3LTS | +| Python | Python 3.7.X | + +### 实验概览 + +本实验概览图 +![](../images/ecscloud/001.png) + + +## 1 openGauss数据库安装 + +### 1.1 实验介绍 + +1.1.1 关于本实验 + +本实验主要描述openGauss数据库在openEuler弹性云服务器上的安装部署。 + +1.1.2 实验目的 + +> 了解openGauss数据库部署方式;\ +> 掌握openGauss数据库安装部署方法。 +> +### 1.2 购买弹性云服务器ECS(openEuler ARM 操作系统) +1.2.1 登录华为云 \ +步骤 1进入华为云官网。\ +华为云官网:https://www.huaweicloud.com/,进入华为云官网,点击登录。\ +![](../images/ecscloud/002.png) + +步骤 2输入账号名和密码,点击登录。 +![](../images/ecscloud/003.png) + +如果还没有注册,点击免费注册,按步骤进行注册后进行登录。 + +1.2.2 购买弹性云服务器ECS \ +步骤 1 在华为云主页(https://www.huaweicloud.com/)点击产品,选择基础服务,再选择弹性云服务器ECS。 +![](../images/ecscloud/004.png) + +步骤 2 进入弹性云服务器ECS购买界面。 +![](../images/ecscloud/005.png) + +步骤 3 自定义购买进行基础配置。 +表1-1ECS基础配置 + +| 配置选项 | 配置值 | +| --- | --- | +|计费| 华北-北京四(推荐,其他区域可能会导致无法选择openEuler公共镜像)| +|CPU架构| 鲲鹏计算| +|规格| 最新系列 2vCPUs|4GB| +|镜像| 公共镜像:openEuler openEuler 20.03 64bit with ARM(40GB) | + +![](../images/ecscloud/006.png) +![](../images/ecscloud/007.png) + + +其余默认即可,点击下一步网络配置。 + +步骤 4 自定义购买进行网路配置。 +表1-2ECS网络配置 + +| 配置选项 | 配置值 | +| --- | --- | +|网络| Vpc-default(192.168.0.0/16)(选现有默认网络即可) | +|弹性公网IP| 现在购买| +|公网带宽| 按流量计费| +|带宽大小| 5| +![](../images/ecscloud/008.png) +![](../images/ecscloud/009.png) + + + +其余默认即可,点击下一步高级配置。 + +步骤 5 自定义购买进行高级配置。\ +![](../images/ecscloud/010.png) +记住用户名为root,然后输入自定义密码和确认密码,其余默认即可。点击下一步确认设置。 + + +步骤 6 确认配置购买成功。 + +![](../images/ecscloud/011.png) +确认设置信息,尤其是配置费用,然后勾选协议“我已经阅读并同意《华为镜像免责声明》”,点击立即购买。 +![](../images/ecscloud/012.png) + +查看云服务器列表 + +![](../images/ecscloud/013.png) + +购买成功! +注意:本次购买鲲鹏服务器价格为公测价格,具体价格以华为云官网为准。 + +### 1.3 修改操作系统配置 + +为了操作方便,可以使用SSH工具(比如:PuTTY等)从本地电脑通过配置弹性云服务器的弹性公网IP地址(如:124.70.36.251)来连接ECS,并使用ROOT用户来登录。 +![](../images/ecscloud/014.png) + +1.3.2 设置字符集参数 + +将各数据库节点的字符集设置为相同的字符集,可以在/etc/profile文件中添加"export LANG=XXX"(XXX为Unicode编码)。 +步骤 1在/etc/profile文件中添加"export LANG= en_US.UTF‐8"。 +``` +[root@ecs-c9bf ~]# cat >>/etc/profile< + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` +弹性云服务器名称及私有IP查看: +![](../images/ecscloud/015.png) + +步骤 4 点击“ESC”退出INSERT模式,然后输入“:wq”后回车退出编辑并保存文本。\ +表1-3配置文件参数附加说明 + +| 参数 | 说明 | +| --- | --- | +| clusterName| openGauss名称| +|nodeNames | openGauss中主机名称。| +|backIp1s| 主机在后端存储网络中的IP地址(内网IP)。所有openGauss主机使用后端存储网络通讯。| +|gaussdbAppPath | openGauss程序安装目录。此目录应满足如下要求:• 磁盘空间>1GB • 与数据库所需其它路径相互独立,没有包含关系。| +|gaussdbLogPath| openGauss运行日志和操作日志存储目录。此目录应满足如下要求: •磁盘空间建议根据主机上的数据库节点数规划。数据库节点预留1GB空间的基础上,再适当预留冗余空间。 •与openGauss所需其它路径相互独立,没有包含关系。 此路径可选。不指定的情况下,openGauss安装时会默认指定“$GAUSSLOG/安装用户名”作为日志目录。| +| tmpdbPath | 数据库临时文件存放目录。若不配置tmpdbPath,默认存放在/opt/huawei/wisequery/perfadm_db目录下。| +|gaussdbToolPath | openGauss系统工具目录,主要用于存放互信工具等。此目录应满足如下要求: • 磁盘空间>100MB • 固定目录,与数据库所需其它目录相互独立,没有包含关系。此目录可选。不指定的情况下,openGauss安装时会默认指定“/opt/huawei/wisequery”作为数据库系统工具目录。| +| corePath| openGauss core文件的指定目录。| + +***须知:*** +* “/opt/huawei/newsql/tools”存放互信等工具,避免权限问题,不要把实例数据目录放在此目录下。 +* 安装目录和数据目录需为空或者不存在,否则可能导致安装失败。 +* 在对数据库节点的实例进行具体配置时,需确保配置的目录之间不相互耦合。即各个配置目录不关联,删除其中任意一个目录,不会级联删除其它目录。如gaussdbAppPath为"/opt/gaussdb/app",gaussdbLogPath为"/opt/gaussdb/app/omm"。当gaussdbAppPath目录被删除时,会级联删除gaussdbLogPath目录,从而引起其它问题。 +* 若需要安装脚本自动创建安装用户时各配置的目录需保证不与系统创建的默认用户目录耦合关联。 +*配置openGauss路径和实例路径时,路径中不能包含"|",";","&","$","<",">","`","\\","'","\"","{","}","(",")","[","]","~","*","?"特殊字符。 + +1.4.3 初始化安装环境 + +为了保证openGauss的正确安装,请首先对主机环境进行配置。 + +1.4.3.1 准备安装用户及环境、 +创建完openGauss配置文件后,在执行安装前,为了后续能以最小权限进行安装及openGauss管理操作,保证系统安全性,需要运行安装前置脚本gs_preinstall准备好安装用户及环境。 + +1.4.3.2 前提条件\ + +1.4.3.3 注意事项\ +* 用户需要检查上层目录权限,保证安装用户对安装包和配置文件目录读写执行的权限; +* xml文件中主机的名称与IP映射配置正确; +* 只能使用root用户执行gs_preinstall命令。 +* +1.4.3.4 操作步骤\ + +步骤 1修改performance.sh文件。\ +使用vi打开文件“/etc/profile.d/performance.sh”,具体如下:\ +[root@ecs-c9bf openGauss]# vi /etc/profile.d/performance.sh \ +输入”i”,进入INSERT模式。用#注释sysctl -w vm.min_free_kbytes=112640 &> /dev/null这行。 +``` +CPUNO=`cat /proc/cpuinfo|grep processor|wc -l` +export GOMP_CPU_AFFINITY=0-$[CPUNO - 1] + +#sysctl -w vm.min_free_kbytes=112640 &> /dev/null +sysctl -w vm.dirty_ratio=60 &> /dev/null +sysctl -w kernel.sched_autogroup_enabled=0 &> /dev/null +``` +点击“ESC”退出INSERT模式。输入“:wq”后回车,保存退出。 + +步骤 2 为确保openssl版本正确,执行预安装前加载安装包中lib库。\ +执行命令如下,其中packagePath为用户安装包放置的路径,本示例中为/opt/software/openGauss。\ +`[root@ecs-c9bf openGauss]# vi /etc/profile`\ + +输入i,进入INSERT模式,在文件的底部添加如下代码,加载安装包中lib库。按下“Esc”退出INSERT模式,输入”:wq”后回车,保存后退出。 +``` +export packagePath=/opt/software/openGauss +export LD_LIBRARY_PATH=$packagePath/script/gspylib/clib:$LD_LIBRARY_PATH +``` +配置完成后,输入如下命令,使设置生效。 +``` +[root@ecs-c9bf openGauss]# source /etc/profile +``` +步骤 3 在安装包所在的目录下,解压安装包。 +``` +[root@ecs-c9bf openGauss]# cd /opt/software/openGauss +``` +解压缩安装包:\ +先解压openGauss-1.1.0-openEuler-64bit-all.tar.gz包。\ +``` +[root@ecs-c9bf openGauss]# tar -zxvf openGauss-1.1.0-openEuler-64bit-all.tar.gz +``` +再先解压openGauss-1.1.0-openEuler-64bit-om.tar.gz包。 +``` +[root@ecs-c9bf openGauss]# tar -zxvf openGauss-1.1.0-openEuler-64bit-om.tar.gz +``` +解压后如下,用ls命令查看如下: +``` +[root@ecs-c9bf openGauss]# ls +clusterconfig.xml openGauss-Package-bak_392c0438.tar.gz +lib script +openGauss-1.1.0-openEuler-64bit-all.tar.gz simpleInstall +openGauss-1.1.0-openEuler-64bit-om.sha256 upgrade_sql.sha256 +openGauss-1.1.0-openEuler-64bit-om.tar.gz upgrade_sql.tar.gz +openGauss-1.1.0-openEuler-64bit.sha256 version.cfg +openGauss-1.1.0-openEuler-64bit.tar.bz2 +``` +安装包解压后,会在/opt/software/openGauss路径下自动生成script子目录,并且在script目录下生成gs_preinstall等各种OM工具脚本。 + +步骤 4使用gs_preinstall准备好安装环境,切换到gs_preinstall命令所在目录。 +``` +[root@ecs-c9bf openGauss]# cd /opt/software/openGauss/script/ +script中内容显示如下: +[root@ecs-c9bf script]# ls +gs_backup gs_checkperf gs_om gspylib gs_uninstall __init__.py +gs_check gs_collector gs_postuninstall gs_ssh gs_upgradectl killall +gs_checkos gs_install gs_preinstall gs_sshexkey impl local +``` +步骤 5采用交互模式执行,并在执行过程中会创建root用户互信和openGauss用户互信:\ +``` +[root@ecs-c9bf script]# python gs_preinstall -U omm -G dbgrp -X /opt/software/openGauss/clusterconfig.xml +``` +这里的omm为操作系统用户(注:同时omm也是openGauss的数据库管理员账号,在下面的1.4.4环节中会创建),dbgrp为运行openGauss的操作系统用户的群组名称,/opt/software/openGauss/clusterconfig.xml为openGauss配置文件路径。在执行过程中,用户根据提示选择是否创建互信,并输入root用户或openGauss用户的密码。 + +对root创建trust,输入root的密码,购买弹性服务云时自定义的密码。 +``` +Are you sure you want to create trust for root (yes/no)? yes +Please enter password for root. +Password: --说明:此处输入密码时,屏幕上不会有任何反馈,不用担心,这是LINUX操作系统对密码的保护. +Creating SSH trust for the root permission user. +创建操作系统omm用户,并对omm创建trust,并设置密码,设置为Admin@123(建议用户自定义设置密码)。 +Are you sure you want to create the user[omm] and create trust for it (yes/no)? yes +Please enter password for cluster user. +Password: +Please enter password for cluster user again. +Password: +Successfully created [omm] user on all nodes. +成功后显示为: +…… +Setting finish flag. +Successfully set finish flag. +Preinstallation succeeded. +``` + +1.4.4 执行安装\ +执行前置脚本准备好openGauss安装环境之后,按照启动安装过程部署openGauss。 + +1.4.4.1 前提条件\ +* 已成功执行前置脚本gs_preinstall; +* 服务器操作系统和网络均正常运行。 +* +1.4.4.2 操作步骤 + +步骤 1 修改文件权限。 +``` +[root@ecs-c9bf script]# chmod -R 755 /opt/software/openGauss/script +``` +步骤 2 登录到openGauss的主机,并切换到omm用户。\ +``` +[root@ecs-c9bf script]# su - omm +``` +注: +* omm指的是前置脚本gs_preinstall中-U参数指定的用户。 +* 安装脚本gs_install必须以前置脚本中指定的omm执行,否则,脚本执行会报错。 +* +步骤 3使用gs_install安装openGauss。 + +执行以下命令进行安装 +``` +gs_install -X /opt/software/openGauss/clusterconfig.xml --gsinit-parameter="--encoding=UTF8" --dn-guc="max_process_memory=4GB" --dn-guc="shared_buffers=256MB" --dn-guc="bulk_write_ring_size=256MB" --dn-guc="cstore_buffers=16MB" +``` +具体如下: +``` +[omm@ecs-c9bf ~]$ gs_install -X /opt/software/openGauss/clusterconfig.xml --gsinit-parameter="--encoding=UTF8" --dn-guc="max_process_memory=4GB" --dn-guc="shared_buffers=256MB" --dn-guc="bulk_write_ring_size=256MB" --dn-guc="cstore_buffers=16MB" +/opt/software/ openGauss/clusterconfig.xml为openGauss配置文件的路径。在执行过程中,用户需根据提示输入数据库管理员omm用户的密码,密码具有一定的复杂度,为保证用户正常使用该数据库,请记住输入的数据库密码。 +按照设置密码要求,设置密码为GaussDB@123(建议用户自定义设置密码): +encrypt cipher and rand files for database. +Please enter password for database: +Please repeat for database: +begin to create CA cert files +设置的密码要符合复杂度要求: +最少包含8个字符; +不能和用户名和当前密码(ALTER)相同,或和当前密码反序; +至少包含大写字母(A-Z),小写字母(a-z),数字,非字母数字字符(限定为~!@#$%^&*()-_=+\|[{}];:,<.>/?)四类字符中的三类字符。 +如果安装成功,显示如下: +…… +Successfully deleted instances from all nodes. +Checking node configuration on all nodes. +Initializing instances on all nodes. +Updating instance configuration on all nodes. +Check consistence of memCheck and coresCheck on database nodes. +Configuring pg_hba on all nodes. +Configuration is completed. +Successfully started cluster. +Successfully installed application. +end deploy.. +``` + +1.4.1 安装生成的目录 \ +安装后的目录及各目录下的文件说明请参见错误!未找到引用源。\ + +表1-4安装后的目录及各目录下的文件说明 + +| 序号 | 项目目录说明 | 目录 | 子目录 | 说明 | +| --- | --- | --- | --- | --- | +| 1 | 集群openGauss安装目录 | /opt/gaussdb/app etc cgroup工具配置文件。 include 存放数据库运行所需要的头文件。 lib 存放数据库的库文件的目录。 share 存放数据库运行所需要的公共文件,如配置文件模板。| +| 2 | 集群openGauss数据目录 | /gaussdb/data data_dnxxx DBnode实例的数据目录,其中主实例的目录名为“data_dnxxx”,备实例的为data_dnSxxx。xxx代表DBnode编号。| +| 3 | 集群openGauss日志目录 | /var/log/gaussdb/用户名 bin 二进制程序的日志目录。 gs_profile 数据库内核性能日志目录。 om OM的日志目录。例如: +部分local脚本产生的日志,增删数据库节点接口的日志,gs_om接口的日志,前置接口的日志,节点替换接口的日志等。 pg_audit 数据库审计日志目录。 pg_log 数据库节点实例的运行日志目录。 | +| 4 | 集群openGauss系统工具目录 | /opt/huawei/wisequery +script 用于openGauss用户进行openGauss管理的脚本文件。 lib bin目录下的二进制文件依赖的库文件。 | + + +## 2 数据库使用 + +本节描述使用数据库的基本操作。通过此节您可以完成创建数据库、创建表及向表中插入数据和查询表中数据等操作。 + +### 2.1 前提条件 +* openGauss正常运行。 +* 由于本实验是对openGauss数据库的基本使用,需要掌握openGauss数据库的基本操作和SQL语法,openGauss数据库支持SQL2003标准语法,数据库基本操作参见附录二。 + +### 2.2 操作步骤 + +步骤 1 在数据库主节点服务器上,切换至omm操作系统用户环境。 +`[root@ecs-c9bf script]# su - omm ` + +若不确定数据库主节点部署在哪台服务器,请确认连接信息。 + +步骤 2 启动数据库服务(可选操作,如未启动,请按此步骤启动)。 +启动服务命令: +``` +[omm@ecs-c9bf ~]$ gs_om -t start +Starting cluster. +========================================= +========================================= +Successfully started. +``` +查看服务是否启动: +``` +[omm@ecs-9a68 ~]$ gs_om -t status +----------------------------------------------------------------------- + +cluster_state : Normal +redistributing : No + +----------------------------------------------------------------------- +``` +步骤 3 连接数据库。 +``` +[omm@ecs-c9bf ~]$ gsql -d postgres -p 26000 -r +``` +当结果显示为如下信息,则表示连接成功。 +``` +gsql ((openGauss 1.1.0 build 290d125f) compiled at 2020-05-08 02:59:43 commit 2143 last mr 131 +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +postgres=# +``` +其中,postgres为openGauss安装完成后默认生成的数据库。初始可以连接到此数据库进行新数据库的创建。26000为数据库主节点的端口号,需根据openGauss的实际情况做替换,请确认连接信息获取。\ +引申信息: +* 使用数据库前,需先使用客户端程序或工具连接到数据库,然后就可以通过客户端程序或工具执行SQL来使用数据库了。 +* gsql是openGauss数据库提供的命令行方式的数据库连接工具。 +* +步骤 4 第一次连接数据库时,需要先修改omm用户密码,新密码修改为Bigdata@123(建议用户自定义密码)。 +``` +postgres=# alter role omm identified by 'Bigdata@123' replace 'GaussDB@123'; +``` +成功显示如下: +``` +ALTER ROLE +``` +步骤 5创建数据库用户。\ +默认只有openGauss安装时创建的管理员用户可以访问初始数据库,您还可以创建其他数据库用户帐号。 +``` +postgres=# CREATE USER joe WITH PASSWORD "Bigdata@123"; +``` +当结果显示为如下信息,则表示创建成功。 +``` +CREATE ROLE +``` +如上创建了一个用户名为joe,密码为Bigdata@123的用户。 + +步骤 6 创建数据库。 +``` +postgres=# CREATE DATABASE db_tpcc OWNER joe; +``` +当结果显示为如下信息,则表示创建成功。 +``` +CREATE DATABASE +``` +步骤 7 使用新用户连接到此数据库执行接下来的创建表等操作。当然,也可以选择继续在默认的postgres数据库下做后续的体验。 +退出postgres数据库。 +``` +postgres=# \q +``` +使用新用户连接到此数据库。 +``` +[omm@ecs-c9bf ~]$ gsql -d db_tpcc -p 26000 -U joe -W Bigdata@123 -r +``` +当结果显示为如下信息,则表示连接成功。 +``` +gsql ((openGauss 1.1.0 build 290d125f) compiled at 2020-05-08 02:59:43 commit 2143 last mr 131 +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +db_tpcc=> +``` + +步骤 8创建SCHEMA。 +``` +db_tpcc=> CREATE SCHEMA joe AUTHORIZATION joe; +``` +当结果显示为如下信息,则表示创建SCHEMA成功。 +``` +CREATE SCHEMA +``` +步骤 9 创建表。 +``` +创建一个名称为mytable,只有一列的表。字段名为firstcol,字段类型为integer。 +db_tpcc=> CREATE TABLE mytable (firstcol int); +CREATE TABLE +``` + +步骤 10向表中插入数据: +``` +db_tpcc=> INSERT INTO mytable values (100); +``` +当结果显示为如下信息,则表示插入数据成功。 +``` +INSERT 0 1 +``` +0:表示OID,1:表示插入的条数。 +查看表中数据: +``` +db_tpcc=> SELECT * from mytable; + firstcol +---------- + 100 +(1 row) +``` +本实验结束。 + diff --git "a/content/zh/post/opengauss_deploy/docs/\344\275\277\347\224\250\350\231\232\346\213\237\346\234\272\351\225\234\345\203\217\346\226\207\344\273\266\345\257\274\345\205\245\351\203\250\347\275\262CentOS+openGauss\346\214\207\345\257\274\346\211\213\345\206\214.docx" "b/content/zh/post/opengauss_deploy/docs/\344\275\277\347\224\250\350\231\232\346\213\237\346\234\272\351\225\234\345\203\217\346\226\207\344\273\266\345\257\274\345\205\245\351\203\250\347\275\262CentOS+openGauss\346\214\207\345\257\274\346\211\213\345\206\214.docx" new file mode 100644 index 0000000000000000000000000000000000000000..b9181965273278a9941dcf143cfff967d4b9729d Binary files /dev/null and "b/content/zh/post/opengauss_deploy/docs/\344\275\277\347\224\250\350\231\232\346\213\237\346\234\272\351\225\234\345\203\217\346\226\207\344\273\266\345\257\274\345\205\245\351\203\250\347\275\262CentOS+openGauss\346\214\207\345\257\274\346\211\213\345\206\214.docx" differ diff --git "a/content/zh/post/opengauss_deploy/docs/\344\275\277\347\224\250\350\231\232\346\213\237\346\234\272\351\225\234\345\203\217\346\226\207\344\273\266\345\257\274\345\205\245\351\203\250\347\275\262CentOS+openGauss\346\214\207\345\257\274\346\211\213\345\206\214.pdf" "b/content/zh/post/opengauss_deploy/docs/\344\275\277\347\224\250\350\231\232\346\213\237\346\234\272\351\225\234\345\203\217\346\226\207\344\273\266\345\257\274\345\205\245\351\203\250\347\275\262CentOS+openGauss\346\214\207\345\257\274\346\211\213\345\206\214.pdf" new file mode 100644 index 0000000000000000000000000000000000000000..8b9ee0c326ba045c7d04af3d086c2cdb9deb0eeb Binary files /dev/null and "b/content/zh/post/opengauss_deploy/docs/\344\275\277\347\224\250\350\231\232\346\213\237\346\234\272\351\225\234\345\203\217\346\226\207\344\273\266\345\257\274\345\205\245\351\203\250\347\275\262CentOS+openGauss\346\214\207\345\257\274\346\211\213\345\206\214.pdf" differ diff --git "a/content/zh/post/opengauss_deploy/docs/\344\275\277\347\224\250\350\231\232\346\213\237\346\234\272\351\225\234\345\203\217\346\226\207\344\273\266\345\257\274\345\205\245\351\203\250\347\275\262openEuler+openGauss\346\214\207\345\257\274\346\211\213\345\206\214.docx" "b/content/zh/post/opengauss_deploy/docs/\344\275\277\347\224\250\350\231\232\346\213\237\346\234\272\351\225\234\345\203\217\346\226\207\344\273\266\345\257\274\345\205\245\351\203\250\347\275\262openEuler+openGauss\346\214\207\345\257\274\346\211\213\345\206\214.docx" new file mode 100644 index 0000000000000000000000000000000000000000..9b395e091f97fe6fe0aa41a3f2747a50e87375cb Binary files /dev/null and "b/content/zh/post/opengauss_deploy/docs/\344\275\277\347\224\250\350\231\232\346\213\237\346\234\272\351\225\234\345\203\217\346\226\207\344\273\266\345\257\274\345\205\245\351\203\250\347\275\262openEuler+openGauss\346\214\207\345\257\274\346\211\213\345\206\214.docx" differ diff --git "a/content/zh/post/opengauss_deploy/docs/\344\275\277\347\224\250\350\231\232\346\213\237\346\234\272\351\225\234\345\203\217\346\226\207\344\273\266\345\257\274\345\205\245\351\203\250\347\275\262openEuler+openGauss\346\214\207\345\257\274\346\211\213\345\206\214.pdf" "b/content/zh/post/opengauss_deploy/docs/\344\275\277\347\224\250\350\231\232\346\213\237\346\234\272\351\225\234\345\203\217\346\226\207\344\273\266\345\257\274\345\205\245\351\203\250\347\275\262openEuler+openGauss\346\214\207\345\257\274\346\211\213\345\206\214.pdf" new file mode 100644 index 0000000000000000000000000000000000000000..d5b0d9f51627b76b4462ac7913a27a71a1adc516 Binary files /dev/null and "b/content/zh/post/opengauss_deploy/docs/\344\275\277\347\224\250\350\231\232\346\213\237\346\234\272\351\225\234\345\203\217\346\226\207\344\273\266\345\257\274\345\205\245\351\203\250\347\275\262openEuler+openGauss\346\214\207\345\257\274\346\211\213\345\206\214.pdf" differ diff --git "a/content/zh/post/opengauss_deploy/docs/\345\234\250ECS\344\270\212\345\256\211\350\243\205\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223\346\214\207\345\257\274\346\211\213\345\206\214.docx" "b/content/zh/post/opengauss_deploy/docs/\345\234\250ECS\344\270\212\345\256\211\350\243\205\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223\346\214\207\345\257\274\346\211\213\345\206\214.docx" new file mode 100644 index 0000000000000000000000000000000000000000..aa6f9569c31cb65ea5afa483be231ef985c53af0 Binary files /dev/null and "b/content/zh/post/opengauss_deploy/docs/\345\234\250ECS\344\270\212\345\256\211\350\243\205\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223\346\214\207\345\257\274\346\211\213\345\206\214.docx" differ diff --git "a/content/zh/post/opengauss_deploy/docs/\345\234\250ECS\344\270\212\345\256\211\350\243\205\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223\346\214\207\345\257\274\346\211\213\345\206\214.pdf" "b/content/zh/post/opengauss_deploy/docs/\345\234\250ECS\344\270\212\345\256\211\350\243\205\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223\346\214\207\345\257\274\346\211\213\345\206\214.pdf" new file mode 100644 index 0000000000000000000000000000000000000000..edabdcc3322fc9a3b55f2bb2778b167ea01de1a8 Binary files /dev/null and "b/content/zh/post/opengauss_deploy/docs/\345\234\250ECS\344\270\212\345\256\211\350\243\205\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223\346\214\207\345\257\274\346\211\213\345\206\214.pdf" differ diff --git "a/content/zh/post/opengauss_deploy/docs/\345\234\250\350\231\232\346\213\237\346\234\272+CentOS\344\270\212\345\256\211\350\243\205\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223\346\214\207\345\257\274\346\211\213\345\206\214.docx" "b/content/zh/post/opengauss_deploy/docs/\345\234\250\350\231\232\346\213\237\346\234\272+CentOS\344\270\212\345\256\211\350\243\205\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223\346\214\207\345\257\274\346\211\213\345\206\214.docx" new file mode 100644 index 0000000000000000000000000000000000000000..4aa85f8afa63815b1e6b38f0a6db3b2eca1ccfb0 Binary files /dev/null and "b/content/zh/post/opengauss_deploy/docs/\345\234\250\350\231\232\346\213\237\346\234\272+CentOS\344\270\212\345\256\211\350\243\205\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223\346\214\207\345\257\274\346\211\213\345\206\214.docx" differ diff --git "a/content/zh/post/opengauss_deploy/docs/\345\234\250\350\231\232\346\213\237\346\234\272+CentOS\344\270\212\345\256\211\350\243\205\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223\346\214\207\345\257\274\346\211\213\345\206\214.pdf" "b/content/zh/post/opengauss_deploy/docs/\345\234\250\350\231\232\346\213\237\346\234\272+CentOS\344\270\212\345\256\211\350\243\205\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223\346\214\207\345\257\274\346\211\213\345\206\214.pdf" new file mode 100644 index 0000000000000000000000000000000000000000..e889e2b0bd6e20f019cffeb0af533a1b1f9ca429 Binary files /dev/null and "b/content/zh/post/opengauss_deploy/docs/\345\234\250\350\231\232\346\213\237\346\234\272+CentOS\344\270\212\345\256\211\350\243\205\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223\346\214\207\345\257\274\346\211\213\345\206\214.pdf" differ diff --git "a/content/zh/post/opengauss_deploy/docs/\345\234\250\350\231\232\346\213\237\346\234\272+openEuler\344\270\212\345\256\211\350\243\205\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223\346\214\207\345\257\274\346\211\213\345\206\214.docx" "b/content/zh/post/opengauss_deploy/docs/\345\234\250\350\231\232\346\213\237\346\234\272+openEuler\344\270\212\345\256\211\350\243\205\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223\346\214\207\345\257\274\346\211\213\345\206\214.docx" new file mode 100644 index 0000000000000000000000000000000000000000..052a577c6b4ceebc9190e05879bb80c45c373427 Binary files /dev/null and "b/content/zh/post/opengauss_deploy/docs/\345\234\250\350\231\232\346\213\237\346\234\272+openEuler\344\270\212\345\256\211\350\243\205\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223\346\214\207\345\257\274\346\211\213\345\206\214.docx" differ diff --git "a/content/zh/post/opengauss_deploy/docs/\345\234\250\350\231\232\346\213\237\346\234\272+openEuler\344\270\212\345\256\211\350\243\205\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223\346\214\207\345\257\274\346\211\213\345\206\214.pdf" "b/content/zh/post/opengauss_deploy/docs/\345\234\250\350\231\232\346\213\237\346\234\272+openEuler\344\270\212\345\256\211\350\243\205\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223\346\214\207\345\257\274\346\211\213\345\206\214.pdf" new file mode 100644 index 0000000000000000000000000000000000000000..40c8591ce6e91898757b59679da153e06c407264 Binary files /dev/null and "b/content/zh/post/opengauss_deploy/docs/\345\234\250\350\231\232\346\213\237\346\234\272+openEuler\344\270\212\345\256\211\350\243\205\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223\346\214\207\345\257\274\346\211\213\345\206\214.pdf" differ diff --git a/content/zh/post/opengauss_deploy/images/ecscloud/001.png b/content/zh/post/opengauss_deploy/images/ecscloud/001.png new file mode 100644 index 0000000000000000000000000000000000000000..c3d5e34f99c3492acc51bb3d3b031d33fa9fe1ee Binary files /dev/null and b/content/zh/post/opengauss_deploy/images/ecscloud/001.png differ diff --git a/content/zh/post/opengauss_deploy/images/ecscloud/002.png b/content/zh/post/opengauss_deploy/images/ecscloud/002.png new file mode 100644 index 0000000000000000000000000000000000000000..34f1be26f780a0dc5cdabd3a6772d48c8934224c Binary files /dev/null and b/content/zh/post/opengauss_deploy/images/ecscloud/002.png differ diff --git a/content/zh/post/opengauss_deploy/images/ecscloud/003.png b/content/zh/post/opengauss_deploy/images/ecscloud/003.png new file mode 100644 index 0000000000000000000000000000000000000000..a1887cde01508c25de6cf83db4be979f41e5f8b0 Binary files /dev/null and b/content/zh/post/opengauss_deploy/images/ecscloud/003.png differ diff --git a/content/zh/post/opengauss_deploy/images/ecscloud/004.png b/content/zh/post/opengauss_deploy/images/ecscloud/004.png new file mode 100644 index 0000000000000000000000000000000000000000..a5a4f712b119f17c37a265154227fca03dfed635 Binary files /dev/null and b/content/zh/post/opengauss_deploy/images/ecscloud/004.png differ diff --git a/content/zh/post/opengauss_deploy/images/ecscloud/005.png b/content/zh/post/opengauss_deploy/images/ecscloud/005.png new file mode 100644 index 0000000000000000000000000000000000000000..5c9f74b287032820322e80c3d6822f725a1df3ba Binary files /dev/null and b/content/zh/post/opengauss_deploy/images/ecscloud/005.png differ diff --git a/content/zh/post/opengauss_deploy/images/ecscloud/006.png b/content/zh/post/opengauss_deploy/images/ecscloud/006.png new file mode 100644 index 0000000000000000000000000000000000000000..02f5e050b3ca44184a51447ed6d8184ea968ac70 Binary files /dev/null and b/content/zh/post/opengauss_deploy/images/ecscloud/006.png differ diff --git a/content/zh/post/opengauss_deploy/images/ecscloud/007.png b/content/zh/post/opengauss_deploy/images/ecscloud/007.png new file mode 100644 index 0000000000000000000000000000000000000000..40424e4b11d2e6fdb5d9cca17ef8b7b99eb1add3 Binary files /dev/null and b/content/zh/post/opengauss_deploy/images/ecscloud/007.png differ diff --git a/content/zh/post/opengauss_deploy/images/ecscloud/008.png b/content/zh/post/opengauss_deploy/images/ecscloud/008.png new file mode 100644 index 0000000000000000000000000000000000000000..583efb960ce0c0670ddbcfcc3e550a770b8570e1 Binary files /dev/null and b/content/zh/post/opengauss_deploy/images/ecscloud/008.png differ diff --git a/content/zh/post/opengauss_deploy/images/ecscloud/009.png b/content/zh/post/opengauss_deploy/images/ecscloud/009.png new file mode 100644 index 0000000000000000000000000000000000000000..2a03abdbdddf29fa7d52437e79fafc1dcbe5ad64 Binary files /dev/null and b/content/zh/post/opengauss_deploy/images/ecscloud/009.png differ diff --git a/content/zh/post/opengauss_deploy/images/ecscloud/010.png b/content/zh/post/opengauss_deploy/images/ecscloud/010.png new file mode 100644 index 0000000000000000000000000000000000000000..89ae3b794fb0009c6c07c9206e0ecfb0debf2644 Binary files /dev/null and b/content/zh/post/opengauss_deploy/images/ecscloud/010.png differ diff --git a/content/zh/post/opengauss_deploy/images/ecscloud/011.png b/content/zh/post/opengauss_deploy/images/ecscloud/011.png new file mode 100644 index 0000000000000000000000000000000000000000..2abb2a3607c51633909f8382980d88f6e31866c4 Binary files /dev/null and b/content/zh/post/opengauss_deploy/images/ecscloud/011.png differ diff --git a/content/zh/post/opengauss_deploy/images/ecscloud/012.png b/content/zh/post/opengauss_deploy/images/ecscloud/012.png new file mode 100644 index 0000000000000000000000000000000000000000..630cca2d8f7cca3138f0cb1db0c9b2b127de6ad2 Binary files /dev/null and b/content/zh/post/opengauss_deploy/images/ecscloud/012.png differ diff --git a/content/zh/post/opengauss_deploy/images/ecscloud/013.png b/content/zh/post/opengauss_deploy/images/ecscloud/013.png new file mode 100644 index 0000000000000000000000000000000000000000..15a185906e67a182812120286ad890f46df5507d Binary files /dev/null and b/content/zh/post/opengauss_deploy/images/ecscloud/013.png differ diff --git a/content/zh/post/opengauss_deploy/images/ecscloud/014.png b/content/zh/post/opengauss_deploy/images/ecscloud/014.png new file mode 100644 index 0000000000000000000000000000000000000000..6bf3f2f64763e8ffba976597c83148719df9e8ad Binary files /dev/null and b/content/zh/post/opengauss_deploy/images/ecscloud/014.png differ diff --git a/content/zh/post/opengauss_deploy/images/ecscloud/015.png b/content/zh/post/opengauss_deploy/images/ecscloud/015.png new file mode 100644 index 0000000000000000000000000000000000000000..33743b7a8ac0958116722f1c820e3261abd420f2 Binary files /dev/null and b/content/zh/post/opengauss_deploy/images/ecscloud/015.png differ diff --git "a/content/zh/post/opengauss_deploy/\345\234\250\350\231\232\346\213\237\346\234\272\344\270\212\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223.md" "b/content/zh/post/opengauss_deploy/\345\234\250\350\231\232\346\213\237\346\234\272\344\270\212\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223.md" new file mode 100644 index 0000000000000000000000000000000000000000..8eaaa16f016f60064676d1898ad23f53df44c151 --- /dev/null +++ "b/content/zh/post/opengauss_deploy/\345\234\250\350\231\232\346\213\237\346\234\272\344\270\212\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223.md" @@ -0,0 +1,91 @@ ++++ + +title = "在虚拟机上安装部署openGauss数据库指导手册" +date = "2021-11-13" +tags = ["在虚拟机上安装部署openGauss数据库指导手册"] +archives = "2021-11-13" +author = "opengauss_deploy" +summary = "在虚拟机上安装部署openGauss数据库指导手册" +img = "/zh/post/xingchen/title/img1.png" +times = "19:30" + ++++ + + +### 前言 + +本文介绍了如何在虚拟机上安装部署openGauss数据库,分别使用Centos7.6系统和openEuler20.03LTS操作系统进行实验。 + +包含了在华为云上ECS(弹性云服务器)以及通过本地windows系统上虚拟化一个linux系统进行安装。 + +### 资源下载 + +文档中使用到了VirtualBox客户端进行linux虚拟化,以及使用到的Centos7.6和openEuler20.03LTS操作系统镜像,提供如下地址可供下载: + +#### VirtualBox + +* 官网下载地址 + ``` + https://download.virtualbox.org/virtualbox/ + https://download.virtualbox.org/virtualbox/6.1.14/VirtualBox-6.1.14-140239-Win.exe + ``` +* 归档 + ``` + https://opengauss-beta.obs.cn-north-4.myhuaweicloud.com/iso/VirtualBox-6.1.14-140239-Win.exe + ``` + +#### Centos7.6 x86_64 操作系统ISO镜像 + +* 官网下载地址 + ``` + https://mirrors.huaweicloud.com/centos-vault/7.8.2003/isos/x86_64/ + https://mirrors.huaweicloud.com/centos-vault/7.8.2003/isos/x86_64/CentOS-7-x86_64-DVD-2003.iso + ``` +* 归档 + ``` + https://opengauss-beta.obs.cn-north-4.myhuaweicloud.com/iso/CentOS-7-x86_64-DVD-2003.iso + ``` + +#### openEuler-20.03-LTS x86_64 操作系统ISO镜像 + +* 官网下载地址 + ``` + https://mirrors.huaweicloud.com/openeuler/openEuler-20.03-LTS/ISO/x86_64/ + https://mirrors.huaweicloud.com/openeuler/openEuler-20.03-LTS/ISO/x86_64/openEuler-20.03-LTS-x86_64-dvd.iso + ``` +* 归档 + ``` + https://opengauss-beta.obs.cn-north-4.myhuaweicloud.com/iso/openEuler-20.03-LTS-x86_64-dvd.iso + ``` + +### 虚拟机部署文档 + +1. 在华为云ECS上安装部署openGauss数据库指导手册 + + 文档下载: + wps: [在ECS上安装部署openGauss数据库指导手册.docx](../docs/在ECS上安装部署openGauss数据库指导手册.docx) + pdf: [在ECS上安装部署openGauss数据库指导手册.pdf](../docs/在ECS上安装部署openGauss数据库指导手册.pdf) + +2. 在虚拟机+CentOS上安装部署openGauss数据库指导手册 + + 文档下载: + wps: [在虚拟机+CentOS上安装部署openGauss数据库指导手册.docx](../docs/在虚拟机+CentOS上安装部署openGauss数据库指导手册.docx) + pdf: [在虚拟机+CentOS上安装部署openGauss数据库指导手册.pdf](../docs/在虚拟机+CentOS上安装部署openGauss数据库指导手册.pdf) + +3. 在虚拟机+openEuler上安装部署openGauss数据库指导手册 + + 文档下载: + wps: [在虚拟机+openEuler上安装部署openGauss数据库指导手册.docx](../docs/在虚拟机+openEuler上安装部署openGauss数据库指导手册.docx) + pdf: [在虚拟机+openEuler上安装部署openGauss数据库指导手册.pdf](../docs/在虚拟机+openEuler上安装部署openGauss数据库指导手册.pdf) + +4. 使用虚拟机镜像文件导入部署CentOS+openGauss指导手册 + + 文档下载: + wps: [使用虚拟机镜像文件导入部署CentOS+openGauss指导手册.docx](../docs/使用虚拟机镜像文件导入部署CentOS+openGauss指导手册.docx) + pdf: [使用虚拟机镜像文件导入部署CentOS+openGauss指导手册.pdf](../docs/使用虚拟机镜像文件导入部署CentOS+openGauss指导手册.pdf) + +5. 使用虚拟机镜像文件导入部署openEuler+openGauss指导手册 + + 文档下载: + wps: [使用虚拟机镜像文件导入部署openEuler+openGauss指导手册.docx](../docs/使用虚拟机镜像文件导入部署openEuler+openGauss指导手册.docx) + pdf: [使用虚拟机镜像文件导入部署openEuler+openGauss指导手册.pdf](../docs/使用虚拟机镜像文件导入部署openEuler+openGauss指导手册.pdf) \ No newline at end of file diff --git "a/content/zh/post/opengaussblog/\345\246\202\344\275\225\345\220\221openGauss\347\244\276\345\214\272\346\217\220\344\272\244\344\275\240\347\232\204\347\254\254\344\270\200\347\257\207\345\215\232\345\256\242.md" "b/content/zh/post/opengaussblog/\345\246\202\344\275\225\345\220\221openGauss\347\244\276\345\214\272\346\217\220\344\272\244\344\275\240\347\232\204\347\254\254\344\270\200\347\257\207\345\215\232\345\256\242.md" index 1e4ccb5a222a892e63e415e82ced93ecef237af8..6979770411190c05fd521c7055839aebb632ecbd 100644 --- "a/content/zh/post/opengaussblog/\345\246\202\344\275\225\345\220\221openGauss\347\244\276\345\214\272\346\217\220\344\272\244\344\275\240\347\232\204\347\254\254\344\270\200\347\257\207\345\215\232\345\256\242.md" +++ "b/content/zh/post/opengaussblog/\345\246\202\344\275\225\345\220\221openGauss\347\244\276\345\214\272\346\217\220\344\272\244\344\275\240\347\232\204\347\254\254\344\270\200\347\257\207\345\215\232\345\256\242.md" @@ -19,7 +19,7 @@ openGauss社区的博客板块对外完全开放,以方便参与者分享个 openGauss社区源代码以及官方网站和博客都托管在码云Gitee上面,组织仓库地址https://gitee.com/opengauss。 -首先请参考http://git.mydoc.io/?t=179267,完成Gitee账号的注册,然后需要前往http://gitee.com/profile/emails绑定你的主邮箱。 +首先请参考https://gitee.com/help/articles/4113,完成Gitee账号的注册,然后需要前往http://gitee.com/profile/emails绑定你的主邮箱。 ## 签署贡献者协议CLA @@ -27,7 +27,7 @@ openGauss社区源代码以及官方网站和博客都托管在码云Gitee上面 在该过程中,你需要填写一个表单,其中邮箱需要填写你之前在码云上设置的主邮箱地址。 -![](../figures/20201209-095220(WeLinkPC).png) +![](figures/20201209-095220(WeLinkPC).png) ## 提交你的博客 @@ -35,17 +35,17 @@ openGauss社区源代码以及官方网站和博客都托管在码云Gitee上面 找到https://gitee.com/opengauss/blog仓库,点击右上角的Fork。 -![](../figures/640.png) +![](figures/640.png) 在弹窗中选择你的个人仓库,并点击 “确定(Confirm)”。 -![](../figures/1.png) +![](figures/1.png) **将个人仓库Clone到本地** 访问你个人账号的blog仓库:https://gitee.com/\(user\)/blog,点击“克隆或下载”按钮,复制HTTPS地址。 -![](../figures/2.png) +![](figures/2.png) 在你本地机器上进入你想要存放blog的路径,输入 @@ -105,7 +105,7 @@ times:完成时间,如“17:30” 官方网站效果如下: -![](../figures/3.png) +![](figures/3.png) **提交修改** @@ -125,17 +125,17 @@ git push –f origin new_branch_name **提交Pull Request** -![](../figures/640-0.png) +![](figures/640-0.png) 回到码云的\(user\)/blog仓库中,在页签中找到Pull Request,点击新建一个Pull Request。 -![](../figures/4.png) +![](figures/4.png) 在创建界面将你的提交Commit的分支和openGauss blog master分支进行比较,完成创建Pull Request。 以上全部流程可参照下图。 -![](../figures/5.png) +![](figures/5.png) **完成合入** diff --git a/content/zh/post/optimize/images/index4.png b/content/zh/post/optimize/images/index4.png index 63a8891f1306ccbc61af4f946a2308aef8ac7c2f..44adeaf908cb48d4fa79af257027053ef7f0a735 100644 Binary files a/content/zh/post/optimize/images/index4.png and b/content/zh/post/optimize/images/index4.png differ diff --git a/content/zh/post/optimize/opengauss-optimize1.md b/content/zh/post/optimize/opengauss-optimize1.md index 3c339f610596d44eb1ea283a38b1bdf809878868..2c1c4cf999913f451ee359b4c5e0b11414bd78f2 100644 --- a/content/zh/post/optimize/opengauss-optimize1.md +++ b/content/zh/post/optimize/opengauss-optimize1.md @@ -122,7 +122,7 @@ xfs_info /data1 用户注意根据需要,将所需数据备份至其他磁盘或其他机器 -<3> 重新格式化磁盘,设置block大小8k +<3> 重新格式化磁盘,设置block大小8k (X86环境不需执行此步骤) 以/dev/nvme0n1盘,加载点为/data1为例,相关参考命令如下 @@ -138,7 +138,7 @@ mount /dev/nvme0n1 /data1 ### 网络配置 -**1. 多中断队列设置** +**1. 多中断队列设置** (X86环境不需执行此步骤) 针对泰山服务器核数较多的特征,产品需要在服务器端和客户端设置网卡多队列。 当前推荐的配置为:服务器端网卡配置16中断队列,客户端网卡配置48中断队列。 @@ -187,7 +187,7 @@ ethtool –K enp3s0 gso on ```shell -sh bind_net_irq.sh 16 +sh bind_net_irq.sh 16 (X86环境不需执行此步骤) ``` @@ -256,10 +256,10 @@ bus-info: 0000:03:00.0 2、修改postgresql.conf参数。 -3、以绑核方式启动数据库: +3、以绑核方式启动数据库:(X86环境不需执行此步骤) `numactl --interleave=all bin/gaussdb -D ${DATA_DIR} --single_node` -4、以绑核方式启动benchmark: +4、以绑核方式启动benchmark:(X86环境不需执行此步骤) `numactl -C 0-19,32-51,64-83,96-115 ./runBenchmark.sh props.pg` \ 按照自己的绑核配置和benchmark配置文件执行此命令。这里的绑核参数是在数据库绑核参数的空隙 @@ -284,8 +284,8 @@ numactl --interleave=all gaussdb --single_node -D {DATA_DIR} -p {PORT} & **2. 服务器端参数设置** postgresql.conf中新增如下参数: -- `advance_xlog_file_num = 10` \ -此参数表示后台线程BackgroundWALWriter周期性地提前检测并初始化未来10个XLog文件,避免事务提交时才去执行XLog文件初始化,从而降低事务提交时延。只有在性能压力测试时作用才会体现出来,一般不用配置。默认为0,即不进行提前初始化。 +- `advance_xlog_file_num = 100` \ +此参数表示后台线程BackgroundWALWriter周期性地提前检测并初始化未来100个XLog文件,避免事务提交时才去执行XLog文件初始化,从而降低事务提交时延。只有在性能压力测试时作用才会体现出来,一般不用配置。默认为0,即不进行提前初始化。 - `numa_distribute_mode = 'all'` \ 此参数目前有all和none两个取值。all表示启用NUMA优化,将工作线程和对应的PGPROC、WALInsertlock进行统一分组,分别绑定到对应的NUMA Node下,以减少关键路径上的CPU远端访存。默认取值为none,表示不启用NUMA分布特性。只有在涉及到多个NUMA节点,且远端访存代价明显高于本地访存时使用。当前建议在性能压力测试情况下开启。 @@ -298,86 +298,89 @@ thread_pool_attr线程池配置 \ max_connections = 4096 allow_concurrent_tuple_update = true audit_enabled = off -checkpoint_segments = 1024 -checkpoint_timeout = 15min cstore_buffers = 16MB enable_alarm = off enable_codegen = false enable_data_replicate = off -full_page_writes = on +full_page_writes = off max_files_per_process = 100000 max_prepared_transactions = 2048 -shared_buffers = 350GB -use_workload_manager = off +shared_buffers = 350GB +use_workload_manager = off wal_buffers = 1GB work_mem = 1MB -log_min_messages = FATAL transaction_isolation = 'read committed' default_transaction_isolation = 'read committed' synchronous_commit = on fsync = on maintenance_work_mem = 2GB -vacuum_cost_limit = 2000 +vacuum_cost_limit = 10000 autovacuum = on autovacuum_mode = vacuum -autovacuum_max_workers = 5 -autovacuum_naptime = 20s +autovacuum_max_workers = 20 +autovacuum_naptime = 5s autovacuum_vacuum_cost_delay = 10 -xloginsert_locks = 48 update_lockwait_timeout = 20min - enable_mergejoin = off enable_nestloop = off enable_hashjoin = off -enable_bitmapscan = on enable_material = off - wal_log_hints = off log_duration = off checkpoint_timeout = 15min autovacuum_vacuum_scale_factor = 0.1 autovacuum_analyze_scale_factor = 0.02 enable_save_datachanged_timestamp = false - -log_timezone = 'PRC' -timezone = 'PRC' -lc_messages = 'C' -lc_monetary = 'C' -lc_numeric = 'C' -lc_time = 'C' - -enable_thread_pool = on -thread_pool_attr = '812,4,(cpubind:0-27,32-59,64-91,96-123)' -enable_double_write = off +enable_double_write = on enable_incremental_checkpoint = on enable_opfusion = on -advance_xlog_file_num = 10 -numa_distribute_mode = 'all' - +advance_xlog_file_num = 100 +numa_distribute_mode = 'all' (X86环境不需执行此步骤) track_activities = off enable_instr_track_wait = off enable_instr_rt_percentile = off track_counts = on track_sql_count = off enable_instr_cpu_timer = off - plog_merge_age = 0 session_timeout = 0 - enable_instance_metric_persistent = off enable_logical_io_statistics = off enable_page_lsn_check = off enable_user_metric_persistent = off enable_xlog_prune = off - enable_resource_track = off instr_unique_sql_count=0 +remote_read_mode=non_authentication +wal_level = archive +hot_standby = off +hot_standby_feedback = off +client_min_messages = ERROR +log_min_messages = FATAL +enable_asp = off +enable_bbox_dump = off +bgwriter_flush_after = 32 +minimum_pool_size = 200 +wal_keep_segments = 1025 +enable_bitmapscan = off +enable_seqscan = off enable_beta_opfusion=on -enable_beta_nestloop_fusion=on +enable_thread_pool = on (X86环境不需执行此步骤) +checkpoint_segments=8000 +enable_stmt_track=false +bgwriter_thread_num = 1 +bgwriter_delay = 5s +incremental_checkpoint_timeout = 5min +thread_pool_attr = '464,4,(cpubind:1-27,32-59,64-91,96-123)' (X86环境不需执行此步骤) +xloginsert_locks = 16 +wal_writer_cpu=0 +wal_file_init_num = 20 +xlog_idle_flushes_before_sleep = 500000000 +pagewriter_sleep = 10ms ``` -**3. TPCC客户端绑核设置** +**3. TPCC客户端绑核设置** (X86环境不需执行此步骤) 客户端通过 numactl 将客户端绑定在除网卡外的核上,下图以 128 核环境举例,共80个核用于处理业务逻辑,剩余48个核处理网络中断。 @@ -420,11 +423,11 @@ warehouses=1000 loadWorkers=200 // 设置最大并发数量, 跟服务端最大work数对应 -terminals=812 +terminals=696 //To run specified transactions per terminal- runMins must equal zero runTxnsPerTerminal=0 //To run for specified minutes- runTxnsPerTerminal must equal zero -runMins=5 +runMins=60 //Number of total transactions per minute limitTxnsPerMin=0 @@ -465,13 +468,140 @@ CREATE TABLESPACE example3 relative location 'tablespace3'; ``` 2.删除序列`bmsql_hist_id_seq` -3.给每一个表增加FACTOR属性 +3.给每一个表增加FACTOR属性,完整tableCreate.sql如下: ```sql +CREATE TABLESPACE example2 relative location 'tablespace2'; +CREATE TABLESPACE example3 relative location 'tablespace3'; +create table bmsql_config ( + cfg_name varchar(30), + cfg_value varchar(50) +);-- DISTRIBUTE BY REPLICATION; + +create table bmsql_warehouse ( + w_id integer not null, + w_ytd decimal(12,2), + w_tax decimal(4,4), + w_name varchar(10), + w_street_1 varchar(20), + w_street_2 varchar(20), + w_city varchar(20), + w_state char(2), + w_zip char(9) +)WITH (FILLFACTOR=80);-- DISTRIBUTE BY hash(w_id); + +create table bmsql_district ( + d_w_id integer not null, + d_id integer not null, + d_ytd decimal(12,2), + d_tax decimal(4,4), + d_next_o_id integer, + d_name varchar(10), + d_street_1 varchar(20), + d_street_2 varchar(20), + d_city varchar(20), + d_state char(2), + d_zip char(9) + )WITH (FILLFACTOR=80);-- DISTRIBUTE BY hash(d_w_id); + +create table bmsql_customer ( + c_w_id integer not null, + c_d_id integer not null, + c_id integer not null, + c_discount decimal(4,4), + c_credit char(2), + c_last varchar(16), + c_first varchar(16), + c_credit_lim decimal(12,2), + c_balance decimal(12,2), + c_ytd_payment decimal(12,2), + c_payment_cnt integer, + c_delivery_cnt integer, + c_street_1 varchar(20), + c_street_2 varchar(20), + c_city varchar(20), + c_state char(2), + c_zip char(9), + c_phone char(16), + c_since timestamp, + c_middle char(2), + c_data varchar(500) +)WITH (FILLFACTOR=80) +tablespace example2; +--DISTRIBUTE BY hash(c_w_id); + +-- create sequence bmsql_hist_id_seq; + +create table bmsql_history ( + hist_id integer, + h_c_id integer, + h_c_d_id integer, + h_c_w_id integer, + h_d_id integer, + h_w_id integer, + h_date timestamp, + h_amount decimal(6,2), + h_data varchar(24) +)WITH (FILLFACTOR=80);-- DISTRIBUTE BY hash(h_w_id); + +create table bmsql_new_order ( + no_w_id integer not null, + no_d_id integer not null, + no_o_id integer not null +)WITH (FILLFACTOR=80);-- DISTRIBUTE BY hash(no_w_id); + +create table bmsql_oorder ( + o_w_id integer not null, + o_d_id integer not null, + o_id integer not null, + o_c_id integer, + o_carrier_id integer, + o_ol_cnt integer, + o_all_local integer, + o_entry_d timestamp +)WITH (FILLFACTOR=80);-- DISTRIBUTE BY hash(o_w_id); + +create table bmsql_order_line ( + ol_w_id integer not null, + ol_d_id integer not null, + ol_o_id integer not null, + ol_number integer not null, + ol_i_id integer not null, + ol_delivery_d timestamp, + ol_amount decimal(6,2), + ol_supply_w_id integer, + ol_quantity integer, + ol_dist_info char(24) +)WITH (FILLFACTOR=80);-- DISTRIBUTE BY hash(ol_w_id); + +create table bmsql_item ( + i_id integer not null, + i_name varchar(24), + i_price decimal(5,2), + i_data varchar(50), + i_im_id integer +);-- DISTRIBUTE BY REPLICATION; + create table bmsql_stock ( s_w_id integer not null, - ..... + s_i_id integer not null, + s_quantity integer, + s_ytd integer, + s_order_cnt integer, + s_remote_cnt integer, + s_data varchar(50), + s_dist_01 char(24), + s_dist_02 char(24), + s_dist_03 char(24), + s_dist_04 char(24), + s_dist_05 char(24), + s_dist_06 char(24), + s_dist_07 char(24), + s_dist_08 char(24), + s_dist_09 char(24), s_dist_10 char(24) -) WITH (FILLFACTOR=80) tablespace example3; +)WITH (FILLFACTOR=80) +tablespace example3; +--DISTRIBUTE BY hash(s_w_id); ``` <2> 修改索引indexCreates.sql @@ -483,10 +613,44 @@ create table bmsql_stock ( 在该文件中添加下图中红色内容,可以在benchmark自动生成数据的时候自动生成到不同的数据表空间,如果未添加可以在benchmark生成数据之后再数据库端修改。用于分盘。 ![](../images/index3.png) +完整insexCreate.sql如下: +```sql +alter table bmsql_warehouse add constraint bmsql_warehouse_pkey + primary key (w_id); + +alter table bmsql_district add constraint bmsql_district_pkey + primary key (d_w_id, d_id); + +alter table bmsql_customer add constraint bmsql_customer_pkey + primary key (c_w_id, c_d_id, c_id); + +create index bmsql_customer_idx1 + on bmsql_customer (c_w_id, c_d_id, c_last, c_first); + +alter table bmsql_oorder add constraint bmsql_oorder_pkey + primary key (o_w_id, o_d_id, o_id); + +create index bmsql_oorder_idx1 + on bmsql_oorder (o_w_id, o_d_id, o_c_id); + +alter table bmsql_new_order add constraint bmsql_new_order_pkey + primary key (no_w_id, no_d_id, no_o_id) using index tablespace example2; + +alter table bmsql_order_line add constraint bmsql_order_line_pkey + primary key (ol_w_id, ol_d_id, ol_o_id, ol_number); + +alter table bmsql_stock add constraint bmsql_stock_pkey + primary key (s_w_id, s_i_id); + +alter table bmsql_item add constraint bmsql_item_pkey + primary key (i_id); + +``` <3> 修改runDatabaseBuild.sh文件 修改下图内容可避免生产数据时候的外键不支持错误 ![](../images/index4.png) +ATTER_LOAD="indexCreates buildFinish" **3. 导入数据** @@ -525,7 +689,7 @@ ln -svf $TABSPACE2_DIR/tablespace2 ./ ``` -**6. 运行TPCC程序** +**6. 运行TPCC程序** (X86环境不需携带numactl -C 0-19,32-51,64-83,96-115) ```shell diff --git "a/content/zh/post/pengchong/MogDBopenGauss\346\225\260\346\215\256\345\272\223package\345\205\263\351\224\256\345\255\227\347\232\204\344\270\244\347\247\215\347\224\250\346\263\225.md" "b/content/zh/post/pengchong/MogDBopenGauss\346\225\260\346\215\256\345\272\223package\345\205\263\351\224\256\345\255\227\347\232\204\344\270\244\347\247\215\347\224\250\346\263\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..6c0ed05a803bfa16876f4d25b93f277709bb9644 --- /dev/null +++ "b/content/zh/post/pengchong/MogDBopenGauss\346\225\260\346\215\256\345\272\223package\345\205\263\351\224\256\345\255\227\347\232\204\344\270\244\347\247\215\347\224\250\346\263\225.md" @@ -0,0 +1,218 @@ ++++ + +title = "MogDB/openGauss数据库package关键字的两种用法" + +date = "2022-04-13" + +tags = ["MogDB/openGauss数据库package关键字的两种用法"] + +archives = "2022-04" + +author = "彭冲" + +summary = "MogDB/openGauss数据库package关键字的两种用法" + +img = "/zh/post/pengchong/title/img9.png" + +times = "10:20" ++++ + +# MogDB/openGauss数据库package关键字的两种用法 + +本文出处:https://www.modb.pro/db/237701 + +package关键字在MogDB数据库里有两种用法: + +- package包,分为Package Specification和Package Body。 + 注意:需要在Oracle兼容下(dbcompatibility=‘A’) +- package属性,用于存储过程重载。 + +下面演示这两种用法。 + +## 一、package包 + +创建一个员工表 + +```sql +create table emp( +empno bigserial, +name varchar, +job varchar, +mgr numeric, +hiredate timestamp without time zone, +sal numeric, +comm numeric, +deptno numeric +); +``` + +创建package包接口,用于客户端接口调用 + +```sql +create package employee_management as + c_empno numeric = 9999; + function hire_emp (name varchar, job varchar, + mgr numeric, hiredate timestamp, + sal numeric, comm numeric, + deptno numeric) return numeric; + procedure fire_emp (emp_id numeric); +end employee_management; +/ +``` + +创建package body包实现,可用于后端修改 + +```sql +create package body employee_management as + function hire_emp (name varchar, job varchar, + mgr numeric, hiredate timestamp, sal numeric, + comm numeric, deptno numeric) + return numeric as + declare + new_empno numeric; + begin + select nextval('emp_empno_seq') into new_empno; + insert into emp values (new_empno, name, job, + mgr,hiredate, sal, comm, deptno); + return new_empno; + end; + + procedure fire_emp(emp_id in number) + as + begin + delete from emp where empno = emp_id; + end; +end employee_management; +/ +``` + +接下来我们可以通过员工管理接口来进行调用,命令如下。 + +``` +call employee_management.hire_emp('tom','teacher',1,localtimestamp,1,1,1); +``` + +![image.png](../images/20220113-26ad3133-6232-463a-babd-c0199c1cb88e.png) +通过上面示例,我们可以看到第一种用法,创建package包接口,里面可以创建不同的程序体,包括变量、函数、存储过程等。 + +## 二、package属性 + +#### PostgreSQL测试 + +首先我们来看PG数据库里的这个测试 + +```plsql +create or replace procedure myproc( +in p1 varchar, +in p2 varchar, +out p3 varchar) +as $$ +begin + p3 = p1; + raise notice 'procedure parameter: %', p1 ; +end ; +$$ +language plpgsql; + +create or replace procedure myproc( +in p1 varchar, +in p2 varchar) +as $$ +begin + raise notice 'procedure parameter: %', p1 ; +end ; +$$ +language plpgsql; +``` + +第二个同名的myproc存储过程创建会失败,提示信息如下: + +``` +ERROR: cannot change whether a procedure has output parameters HINT: Use DROP PROCEDURE myproc(character varying,character varying) first. +``` + +测试截图如下 +![image.png](../images/20220113-9b619d9d-d87c-4b6e-aaa4-a5472f95b7ea.png) + +上面的测试在MogDB数据库里也是同样的效果,不能通过输出参数来对存储过程进行重载,上面第一个myproc是有输出参数,第二个myproc没有输出参数。 + +#### MogDB测试 + +接下来我们在MogDB数据库里,不使用out输出参数,但我们改变in输入参数的类型,测试重载。 + +```sql +create or replace procedure myproc2( +in p1 varchar, +in p2 varchar) +as +begin + raise notice 'procedure parameter: %', p1 ; +end ; +/ + +create or replace procedure myproc2( +in p1 integer, +in p2 integer) +as +begin + raise notice 'procedure parameter: %', p1 ; +end ; +/ +``` + +最终我们的结果是只能看到最后一个两个输入参数为integer类型的myproc2,截图如下。 +![image.png](../images/20220113-cd0cf621-d2ff-41e4-b01f-aabce51fbe44.png) + +接下面我们对第一个输入参数为varchar类型的myproc2使用package属性进行重载(加到as关键字前面) + +```sql +create or replace procedure myproc2( +in p1 varchar, +in p2 varchar) +package as +begin + raise notice 'procedure parameter: %', p1 ; +end ; +/ +``` + +可以看到下面的提示: + +``` +ERROR: Do not allow package function overload not package function. +``` + +![image.png](../images/20220113-52ebc3c8-4185-4422-b3c7-4caf1ee2e52e.png) +可以看出我们不能使用后加package属性的myproc2(输入参数类型为varchar)去重载非package属性的myproc2(输入参数类型为integer)。 + +最后测试正确的示例: + +``` +create or replace procedure myproc3( +in p1 varchar, +in p2 varchar) +package as +begin + raise notice 'procedure parameter: %', p1 ; +end ; +/ + +create or replace procedure myproc3( +in p1 integer, +in p2 integer) +package as +begin + raise notice 'procedure parameter: %', p1 ; +end ; +/ + + +``` + +下面的截图可以看到符合我们的预期。 + +![image.png](../images/20220113-7d865eca-1576-4443-9d09-a0859b736b9b.png) + +## 三、结论 + +MogDB数据库里package关键字有两种用法,一种是package包,另一种package属性用于存储过程重载。函数的重载不需要使用package属性,存储过程重载需要显式使用package属性。 diff --git "a/content/zh/post/pengchong/MogDB\346\217\222\344\273\266\344\271\213\350\267\250\345\272\223\350\256\277\351\227\256.md" "b/content/zh/post/pengchong/MogDB\346\217\222\344\273\266\344\271\213\350\267\250\345\272\223\350\256\277\351\227\256.md" new file mode 100644 index 0000000000000000000000000000000000000000..a405e043b5b2f161f0b1ff61e612829d9dbbf7df --- /dev/null +++ "b/content/zh/post/pengchong/MogDB\346\217\222\344\273\266\344\271\213\350\267\250\345\272\223\350\256\277\351\227\256.md" @@ -0,0 +1,169 @@ ++++ + +title = "MogDB插件之跨库访问" + +date = "2022-05-18" + +tags = ["MogDB插件之跨库访问"] + +archives = "2022-05" + +author = "彭冲" + +summary = "MogDB插件之跨库访问" + +img = "/zh/post/pengchong/title/img9.png" + +times = "10:20" ++++ + +# MogDB插件之跨库访问 + +本文出处:[https://www.modb.pro/db/336337](https://www.modb.pro/db/336337) + +MogDB数据库从2.1版本开始将插件和工具包进行了封装,我们可以随时方便的进行集成。从官网https://www.mogdb.io/downloads/mogdb/的这个页面可以进行下载: +![image.png](../images/20220301-fea20923-c6e0-4fa8-92c6-81979a109dcf.png) + +本文将在Centos平台首先演示dblink插件的使用方法: + +### dblink插件准备 + +将官网下载的plugins-CentOS-x86-2.1.0.tar.gz上传到服务器后,解压 + +``` +$ tar zxvf plugins-CentOS-x86-2.1.0.tar.gz +``` + +将插件相关文件安装到MogDB数据库: + +- 方式一:使用脚本进行安装 + +``` +$ ./gs_install_plugin_local -X clusterconfig.xml --dblink +``` + +- 方式二:手工拷贝安装 + +``` +$ cd plugins/dblink +$ cp dblink.so /opt/mogdb210/lib/postgresql/ +$ cp dblink--1.0.sql dblink.control dblink--unpackaged--1.0.sql \ +/opt/mogdb210/share/postgresql/extension/ +``` + +本文使用第二种方式。 + +### 创建dblink扩展 + +创建扩展的用户需要具有sysadmin权限,本文使用moguser用户 + +``` +MogDB=# \du moguser + List of roles + Role name | Attributes | Member of +-----------+------------+----------- + moguser | Sysadmin | {} +``` + +下面使用moguser创建dblink扩展,并进行后续测试 + +``` +$ gsql -U moguser postgres -r +gsql ((MogDB 2.1.0 build 56189e20) compiled at 2022-01-07 18:47:53 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +MogDB=> create extension dblink with schema public; +CREATE EXTENSION +``` + +查看dblink扩展 + +``` +MogDB=> \dx dblink List of installed extensions Name | Version | Schema | Description --------+---------+--------+-------------------------------------------------------------- dblink | 1.0 | public | connect to other PostgreSQL databases from within a database (1 row) +``` + +### dblink测试 + +##### 连接实例 + +``` +MogDB=> \dx dblink + List of installed extensions + Name | Version | Schema | Description +--------+---------+--------+-------------------------------------------------------------- + dblink | 1.0 | public | connect to other PostgreSQL databases from within a database +(1 row) +``` + +上面使用远程用户dk连接到远程实例192.168.137.250的mydb。 + +##### 执行查询 + +``` +MogDB=> select * from dblink('mydblink','select * from dk.t1;') as t(id int , info text); + id | info +----+------ + 1 | one + 2 | two +(2 rows) + +``` + +##### 执行修改 + +insert、update、delete、truncate操作使用dblink_exec函数 + +insert测试 + +``` +MogDB=> select dblink_exec('mydblink', 'insert into t1 select generate_series(10,20), ''hello'''); + dblink_exec +------------- + INSERT 0 11 +(1 row) + +``` + +update测试 + +``` +MogDB=> select dblink_exec('mydblink', 'update t1 set info=''ten'' where id=10'); + dblink_exec +------------- + UPDATE 1 +(1 row) + +``` + +delete测试 + +``` +MogDB=> select dblink_exec('mydblink', 'delete from t1 where id=20'); + dblink_exec +------------- + DELETE 1 +(1 row) + +``` + +truncate测试 + +``` +MogDB=> select dblink_exec('mydblink', 'truncate t1'); + dblink_exec +---------------- + TRUNCATE TABLE +(1 row) + +``` + +##### 断开实例 + +``` +MogDB=> select dblink_disconnect('mydblink'); + dblink_disconnect +------------------- + OK +(1 row) +``` diff --git "a/content/zh/post/pengchong/MogDB\346\217\222\344\273\266\344\271\213\351\253\230\351\200\237\347\201\214\346\225\260.md" "b/content/zh/post/pengchong/MogDB\346\217\222\344\273\266\344\271\213\351\253\230\351\200\237\347\201\214\346\225\260.md" new file mode 100644 index 0000000000000000000000000000000000000000..07187251fcffc76ba5a3e1d1527de91b1859100c --- /dev/null +++ "b/content/zh/post/pengchong/MogDB\346\217\222\344\273\266\344\271\213\351\253\230\351\200\237\347\201\214\346\225\260.md" @@ -0,0 +1,235 @@ ++++ + +title = "MogDB插件之高速灌数" + +date = "2022-05-12" + +tags = ["MogDB插件之高速灌数"] + +archives = "2022-05" + +author = "彭冲" + +summary = "MogDB插件之高速灌数" + +img = "/zh/post/pengchong/title/img9.png" + +times = "10:20" ++++ + +# MogDB插件之高速灌数 + +本文出处:[https://www.modb.pro/db/336694](https://www.modb.pro/db/336694) + +对于写密集型系统,我们一般有如下方式来进行加速: + +1. 使用批量插入代替单条insert语句插入 +2. 更好的处理方式是使用copy语句代替insert语句 +3. 同时也可以使用多个session并行代替单个session的语句操作 + +相比直接使用copy,只要数据出现错误,它会中止所有的工作。假如我们最后一条记录有问题,我们前面大量的数据插入都会失效,这通常是我们不能接受的。 + +因而从外部数据源导入数据,应该考虑导入操作可以持续工作,然后把因为错误拒绝插入的数据另外保存起来。pgloader工具其实可以做到这点,虽然它不如copy速度快,但对于外部数据格式并不那么严格时,这是较好的方式。 + +对于像外部灌数这样的特定场景,使用pg_bulkload工具有时甚至比copy还要快,这得意于它采取积极的高性能特性,能够跳过shared buffer以及绕过写WAL文件,另外它也可以处理坏的数据行。 + +本文将在Centos平台演示pg_bulkload插件的使用方法: + +### pg_bulkload插件准备 + +注意:插件包的下载请参考[MogDB插件之跨库访问](https://www.modb.pro/db/336337) + +将官网下载的plugins-CentOS-x86-2.1.0.tar.gz上传到服务器后,解压 + +``` +$ tar zxvf plugins-CentOS-x86-2.1.0.tar.gz +``` + +将插件相关文件安装到MogDB数据库: + +- 方式一:使用脚本进行安装 + +``` +$ ./gs_install_plugin_local -X clusterconfig.xml --pg_bulkload +``` + +- 方式二:手工拷贝安装 + +``` +$ cd plugins/pg_bulkload +$ cp pg_bulkload.so pg_timestamp.so /opt/mogdb210/lib/postgresql/ +$ cp pg_bulkload.control pg_bulkload.sql pg_bulkload--1.0.sql pg_bulkload--unpackaged--1.0.sql uninstall_pg_bulkload.sql /opt/mogdb210/share/postgresql/extension/ +$ /usr/bin/mkdir -p /opt/mogdb210/share/postgresql/contrib +$ cp uninstall_pg_timestamp.sql pg_timestamp.sql /opt/mogdb210/share/postgresql/contrib +$ cp pg_bulkload /opt/mogdb210/bin/ +``` + +本文使用第二种方式。 + +### 创建pg_bulkload扩展 + +创建扩展的用户需要具有sysadmin权限,本文使用moguser用户 + +``` +MogDB=# \du moguser + List of roles + Role name | Attributes | Member of +-----------+------------+----------- + moguser | Sysadmin | {} + +``` + +下面使用moguser创建pg_bulkload扩展,并进行后续测试 + +``` +$ gsql -U moguser postgres -r +gsql ((MogDB 2.1.0 build 56189e20) compiled at 2022-01-07 18:47:53 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +MogDB=> create extension pg_bulkload with schema public; +CREATE EXTENSION + +``` + +查看pg_bulkload扩展 + +``` +MogDB=> \dx pg_bulkload + List of installed extensions + Name | Version | Schema | Description +-------------+---------+--------+----------------------------------------------------------------- + pg_bulkload | 1.0 | public | pg_bulkload is a high speed data loading utility for PostgreSQL +(1 row) + +``` + +### pg_bulkload测试 + +##### 直接使用参数测试 + +先创建表 + +``` +create table test_bulkload(id int, name varchar(128)); +``` + +再创建一个txt文件,写10W条数据 + +``` +$ seq 100000| awk '{print $0"|bulkload"}' > bulkload_output.txt +``` + +使用下面的命令导入数据 + +``` +$ pg_bulkload -i ./bulkload_output.txt -O test_bulkload -l test_bulkload.log -o "TYPE=csv" -o "DELIMITER=|" -d postgres -U moguser +``` + +执行结果如下: + +``` +NOTICE: BULK LOAD START +NOTICE: BULK LOAD END + 0 Rows skipped. + 100000 Rows successfully loaded. + 0 Rows not loaded due to parse errors. + 0 Rows not loaded due to duplicate errors. + 0 Rows replaced with new rows. + +``` + +连接数据库,查看数据是否导入成功: + +``` +MogDB=> select count(1) from test_bulkload ; + count +-------- + 100000 +(1 row) + +``` + +##### 使用控制文件测试 + +先创建表 + +``` +CREATE TABLE foo (a bigint, b text); +``` + +创建模拟导入数据文件foo.csv + +``` +$ more foo.csv +1,one +2 +3,three,111 +four,4 +5,five + +``` + +创建控制文件 + +``` +$ more sample_csv.ctl +OUTPUT = moguser.foo +INPUT = /home/omm/foo.csv +LOGFILE=/home/omm/pg_bulkload.log +LIMIT = INFINITE +TYPE = CSV +DELIMITER = "," +QUOTE = "\"" +ESCAPE = "\"" +WRITER = DIRECT +MULTI_PROCESS = NO +PARSE_ERRORS = -1 +PARSE-BADFILE=/home/omm/pg_bulkload_bad.log +DUPLICATE_ERRORS = 0 +ON_DUPLICATE_KEEP = NEW +TRUNCATE = YES + +``` + +控制文件OUTPUT描述数据导入的目标表,INPUT描述输入的数据文件,LOGFILE描述导入过程的日志,DELIMITER描述数据分割符, PARSE_BADFILE描述解析失败的记录文件,其他参数可以参考字面含义。 + +然后使用下面的命令 + +``` +$ pg_bulkload sample_csv.ctl -d postgres -U moguser +``` + +执行结果如下 + +``` +NOTICE: BULK LOAD START +NOTICE: BULK LOAD END + 0 Rows skipped. + 2 Rows successfully loaded. + 3 Rows not loaded due to parse errors. + 0 Rows not loaded due to duplicate errors. + 0 Rows replaced with new rows. +WARNING: some rows were not loaded due to errors. + +``` + +上面命令执行完成之后,错误数据我们可以从pg_bulkload_bad.log文件进行查看 + +``` +$ more pg_bulkload_bad.log +2 +3,three,111 +four,4 +``` + +正常的数据记录可以从foo表中查看 + +``` +MogDB=> select * from foo; + a | b +---+------ + 1 | one + 5 | five +(2 rows) +``` diff --git "a/content/zh/post/pengchong/MogDB\346\225\260\346\215\256\345\272\223\344\275\277\347\224\250\351\203\250\345\210\206\350\241\250\350\276\276\345\274\217\347\264\242\345\274\225\347\272\246\346\235\237\345\215\225\344\270\252null\345\200\274.md" "b/content/zh/post/pengchong/MogDB\346\225\260\346\215\256\345\272\223\344\275\277\347\224\250\351\203\250\345\210\206\350\241\250\350\276\276\345\274\217\347\264\242\345\274\225\347\272\246\346\235\237\345\215\225\344\270\252null\345\200\274.md" new file mode 100644 index 0000000000000000000000000000000000000000..e64d591728eb295d0a81a59adc47f5c8177678b4 --- /dev/null +++ "b/content/zh/post/pengchong/MogDB\346\225\260\346\215\256\345\272\223\344\275\277\347\224\250\351\203\250\345\210\206\350\241\250\350\276\276\345\274\217\347\264\242\345\274\225\347\272\246\346\235\237\345\215\225\344\270\252null\345\200\274.md" @@ -0,0 +1,117 @@ ++++ + +title = "MogDB数据库使用部分表达式索引约束单个null值" + +date = "2022-04-14" + +tags = ["MogDB数据库使用部分表达式索引约束单个null值"] + +archives = "2022-04" + +author = "彭冲" + +summary = "MogDB数据库使用部分表达式索引约束单个null值" + +img = "/zh/post/pengchong/title/img9.png" + +times = "10:20" ++++ + +# MogDB数据库使用部分表达式索引约束单个null值 + +本文出处:https://www.modb.pro/db/49802 + +
    + +> SQL标准允许在唯一约束列上插入多个null值,有些数据库是支持在唯一约束列上只允许插入单个null值的。MogDB数据库遵守SQL标准,是可以插入多个null值。 + +### 默认行为 + +快速搭建MogDB环境 + +``` +docker run --name mogdb \ --privileged=true \ --detach \ --env GS_PASSWORD=Admin@1234 \ --publish 15400:5432 \ swr.cn-east-3.myhuaweicloud.com/enmotech/mogdb:2.0.1_amd +``` + +进入容器 + +``` +docker exec -it mogdb bash +``` + +测试插入null值,使用gsql登录数据库 + +```sql +omm=# create table abc(a2 varchar(10) unique); +NOTICE: CREATE TABLE / UNIQUE will create implicit index "abc_a2_key" for table "abc" +CREATE TABLE +omm=# +omm=# insert into abc values('1'); +INSERT 0 1 +omm=# +omm=# insert into abc values(null); +INSERT 0 1 +omm=# +omm=# insert into abc values(null); +INSERT 0 1 + +``` + +可以看到默认情况下是可以插入多个null值的,因为PG里唯一索引是基于equal等值比较的,null不等于任何值(包括null值)。Java开发定义实体类时也是需要重写equal方法,进行null判断。 + +### 使用部分表达式索引 + +```sql +omm=# drop table if exists abc; +DROP TABLE +omm=# +omm=# create table abc(a2 varchar(10)); +CREATE TABLE +omm=# +omm=# CREATE UNIQUE INDEX idx_abc ON abc ((a2 is null)) WHERE a2 IS NULL; +CREATE INDEX +omm=# +omm=# insert into abc values('1'); +INSERT 0 1 +omm=# +omm=# insert into abc values(null); +INSERT 0 1 +omm=# +omm=# insert into abc values(null); +ERROR: duplicate key value violates unique constraint "idx_abc" +DETAIL: Key ((a2 IS NULL))=(t) already exists. + +``` + +可以看到使用where条件对a2列为null值创建了唯一索引并存储为true值,上面第二条插入null值语句报错。 + +下面是对组合索引使用同样的方法 + +```sql +omm=# drop table if exists abc; +DROP TABLE +omm=# +omm=# create table abc(a1 int4, a2 varchar(10)); +CREATE TABLE +omm=# +omm=# CREATE UNIQUE INDEX idx_abc_1 ON abc (a1,a2) WHERE a2 IS NOT NULL; +CREATE INDEX +omm=# +omm=# insert into abc values(1,'1'); +INSERT 0 1 +omm=# +omm=# insert into abc values(1,'1'); +ERROR: duplicate key value violates unique constraint "idx_abc_1" +DETAIL: Key (a1, a2)=(1, 1) already exists. +omm=# +omm=# CREATE UNIQUE INDEX idx_abc_2 ON abc (a1,(a2 is null)) WHERE a2 is NULL; +CREATE INDEX +omm=# +omm=# insert into abc values(1,null); +INSERT 0 1 +omm=# +omm=# insert into abc values(1,null); +ERROR: duplicate key value violates unique constraint "idx_abc_2" +DETAIL: Key (a1, (a2 IS NULL))=(1, t) already exists. +omm=# +``` diff --git "a/content/zh/post/pengchong/MogDB\346\225\260\346\215\256\345\272\223\345\270\270\350\247\201\351\227\256\347\255\224.md" "b/content/zh/post/pengchong/MogDB\346\225\260\346\215\256\345\272\223\345\270\270\350\247\201\351\227\256\347\255\224.md" new file mode 100644 index 0000000000000000000000000000000000000000..465304cf32821fd1d2c4b0fa563d24e7bdcd34bb --- /dev/null +++ "b/content/zh/post/pengchong/MogDB\346\225\260\346\215\256\345\272\223\345\270\270\350\247\201\351\227\256\347\255\224.md" @@ -0,0 +1,112 @@ ++++ + +title = "MogDB数据库常见问答" + +date = "2022-05-18" + +tags = ["MogDB数据库常见问答"] + +archives = "2022-05" + +author = "彭冲" + +summary = "MogDB数据库常见问答" + +img = "/zh/post/pengchong/title/img9.png" + +times = "10:20" ++++ + +# MogDB数据库常见问答 + +本文出处:[https://www.modb.pro/db/393741](https://www.modb.pro/db/393741) + +本文将不断汇总MogDB日常使用过程中遇到的一些常见问题。 + +### Q1.创建普通用户之后,为什么没有创建表的权限? + +参考下面这段代码 + +``` +[omm@mogdb ~]$ gsql -Uomm postgres -r +gsql ((openGauss 3.0.0 build 02c14696) compiled at 2022-04-01 18:12:34 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +openGauss=# create database mydb1; +CREATE DATABASE +openGauss=# create user user1 password 'Admin@1234'; +CREATE ROLE +openGauss=# alter database mydb1 owner to user1; +ALTER DATABASE +openGauss=# \q +[omm@mogdb ~]$ gsql -d mydb1 -U user1 -r --password='Admin@1234' +gsql ((openGauss 3.0.0 build 02c14696) compiled at 2022-04-01 18:12:34 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +mydb1=> create table t1(id int); +ERROR: permission denied for schema public +DETAIL: N/A +mydb1=> create table user1.t1(id int); +ERROR: schema "user1" does not exist + +``` + +分析:MogDB里创建用户时会自动创建一个同名的schema,按理来说用户可以在自己的schema下创建对象,为什么上面create table user1.t1(id int)执行不成功呢,从提示来看,当前连接的database下并没有这个schema,因为自动创建用户的schema是在当前连接的数据库里。我们前面使用omm管理用户所做的操作都在默认的postgres数据库下。 + +第一个报错ERROR: permission denied for schema public也很正常 + +``` +mydb1=> show search_path; + search_path +---------------- + "$user",public +(1 row) +``` + +用户的增删改查操作会参照默认查询路径设置值的顺序去查找,用户同名的schema在当前mydb1里没有,就查找public模式,上面的create操作就自然创建到public模式下了,而public模式的默认权限为空,也就是需要对public模式赋权后才能使用。 + +针对上面的问题,我们使用管理用户连接mydb1,然后手工创建用户的schema,并设置绑定给user1之后就可以正常使用了。 + +``` +[omm@mogdb ~]$ gsql -Uomm mydb1 -r +gsql ((openGauss 3.0.0 build 02c14696) compiled at 2022-04-01 18:12:34 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +mydb1=# create schema user1 authorization user1; +CREATE SCHEMA +mydb1=# \q +``` + +### Q2.不同的database之间可以跨库访问吗? + +不可以,数据库连接不能跨database,例如gsql访问其它数据库需要使用\connect切换连接 + +``` +openGauss=# \connect mydb1 Non-SSL connection (SSL connection is recommended when requiring high-security) You are now connected to database "mydb1" as user "omm". mydb1=# \conninfo You are connected to database "mydb1" as user "omm" via socket in "/tmp" at port "3000". +``` + +跨库访问需要使用dblink或者postgres_fdw,参考:[MogDB插件之跨库访问](https://www.modb.pro/db/336337) + +### Q3.database的owner为什么对schema里的对象没有权限? + +database的权限与表、视图等对象权限之间隔着一层schema的权限。database的owner只代表对其直属对象:schema、event trigger等有全部权限,并不具有schema里表、视图等对象的权限。 + +### Q4.为什么function不像schema那样需要授权就能直接访问呢? + +参考下面的操作步骤,schema需要授予权限后才可以访问,而function就可以直接调用呢? +![image.png](../images/20220413-52da4846-5d24-4a95-b24e-86dc22032a03.png) +因为不同的数据库对象有不同的默认权限,可以参考下面PG文档里的这张图 +![image.png](../images/20220413-ba76382b-6682-4b86-8830-f93d14f127a0.png) + +### Q5.如何像MySQL数据库那样设置readonly只读用户? + +MySQL里的database之间是可以互相访问,没有隔离,MogDB里面database之间是隔离的。MySQL里的database相当于MogDB里面的schema,可以在MogDB的schema级别设置数据只读,使用默认权限: + +``` +alter default privileges for role user1 in schema s1 grant select on tables to user2; +``` + +上面是对于user1用户在schema s1下新建的表,user2用户都有select权限。注意:并不能实现以后任何用户(以后新增用户)在s1下新建的表,user2用户都有权限。 diff --git "a/content/zh/post/pengchong/MogDB\346\225\260\346\215\256\345\272\223\346\224\257\346\214\201R2DBC\345\223\215\345\272\224\345\274\217\345\215\217\350\256\256\350\256\277\351\227\256.md" "b/content/zh/post/pengchong/MogDB\346\225\260\346\215\256\345\272\223\346\224\257\346\214\201R2DBC\345\223\215\345\272\224\345\274\217\345\215\217\350\256\256\350\256\277\351\227\256.md" new file mode 100644 index 0000000000000000000000000000000000000000..7135a3dce9311646418126795f0f303dbc1138a1 --- /dev/null +++ "b/content/zh/post/pengchong/MogDB\346\225\260\346\215\256\345\272\223\346\224\257\346\214\201R2DBC\345\223\215\345\272\224\345\274\217\345\215\217\350\256\256\350\256\277\351\227\256.md" @@ -0,0 +1,75 @@ ++++ + +title = "MogDB数据库支持R2DBC响应式协议访问" + +date = "2022-04-12" + +tags = ["MogDB数据库支持R2DBC响应式协议访问"] + +archives = "2022-04" + +author = "彭冲" + +summary = "MogDB数据库支持R2DBC响应式协议访问" + +img = "/zh/post/pengchong/title/img9.png" + +times = "10:20" ++++ + +# MogDB数据库支持R2DBC响应式协议访问 + +本文出处:https://www.modb.pro/db/232405 + + + +我们知道使用JDBC协议是阻塞式的连接,为了解决这个问题,出现了两个标准,一个是oracle提出的 ADBC (Asynchronous Database Access API),另一个就是Pivotal提出的R2DBC (Reactive Relational Database Connectivity)。 + +目前有部分关系型数据库实现了R2DBC协议,包括mysql、mssql、postgresql等。MogDB数据库兼容PostgreSQL R2DBC Driver,下面通过样例进行测试。 + +### 首先快速搭建MogDB环境 + +使用docker命令一键搭建 + +``` +docker run --name mogdb \ --privileged=true \ --detach \ --env GS_PASSWORD=Admin@1234 \ --publish 15400:5432 \ swr.cn-east-3.myhuaweicloud.com/enmotech/mogdb:2.0.1_amd +``` + +### 数据库结构化准备 + +```sql +create database productdb; +\c productdb +create user moguser password 'Admin@1234'; +\c productdb moguser + +CREATE TABLE product +( +id integer, +description character varying(255), +price numeric, +PRIMARY KEY (id) +); + +insert into product values(1,'PostgreSQL',0), +(2,'MogDB',1); + +``` + +### Java项目工程 + +参考如下链接:https://github.com/vinsguru/vinsguru-blog-code-samples/tree/master/r2dbc/crud + +### 配置MogDB数据库连接信息 + +修改工程项目下的application.properties文件 + +```java +spring.r2dbc.url=r2dbc:postgresql://192.168.137.227:15400/productdb +spring.r2dbc.username=moguser +spring.r2dbc.password=Admin@1234 +运行程序进行测试点击R2dbcApplication文件,Run As运行 +然后我们可以打开浏览器,输入接口地址进行测试: +下面是查询所有产品(http://localhost:8080/product/all) +更多R2DBC用法可以参考github上pgjdbc/r2dbc-postgresql +``` diff --git "a/content/zh/post/pengchong/PostgreSQL-openGauss\346\225\260\346\215\256\345\272\223\346\230\223\347\212\257\347\232\204\345\215\201\345\244\247\351\224\231\350\257\257.md" "b/content/zh/post/pengchong/PostgreSQL-openGauss\346\225\260\346\215\256\345\272\223\346\230\223\347\212\257\347\232\204\345\215\201\345\244\247\351\224\231\350\257\257.md" new file mode 100644 index 0000000000000000000000000000000000000000..9e9582207dacb383fd5769a182d3a69f8d13682a --- /dev/null +++ "b/content/zh/post/pengchong/PostgreSQL-openGauss\346\225\260\346\215\256\345\272\223\346\230\223\347\212\257\347\232\204\345\215\201\345\244\247\351\224\231\350\257\257.md" @@ -0,0 +1,213 @@ ++++ + +title = "PostgreSQL/openGauss数据库易犯的十大错误" + +date = "2021-06-30" + +tags = ["PostgreSQL/openGauss数据库易犯的十大错误"] + +archives = "2021-06" + +author = "彭冲" + +summary = "PostgreSQL/openGauss数据库易犯的十大错误" + +img = "/zh/post/pengchong/title/img9.png" + +times = "10:30" + ++++ + +# PostgreSQL/openGauss数据库易犯的十大错误 + +总结十点PostgreSQL/openGauss数据库中容易犯的错误。 + +## 1.同时设置日志行前缀和csvlog格式 + +比较常见大家同时配置下面这两个参数 + +``` +log_line_prefix = '%m %u %d %p' +log_destination='csvlog' +``` + +- %m是带毫秒的时间戳 +- %u是用户名 +- %d是数据库名 +- %p是进程ID + +然后当我们配置为csvlog日志时,日志行的内容项是固定的,所以当我们需要配置日志前缀,精简日志行的内容项时,log\_destination不能配置为csvlog。下面是正确的配置: + +``` +log_destination='stderr' +log_line_prefix = '%m %u %d %p' +``` + +## 2.不符合预期的日志轮换策略 + +日志轮换策略可以通过log\_rotation\_size参数按日志文件大小控制或者通过log\_rotation\_age参数按时间控制,但下面这四个参数需要合理组合使用。 + +``` +log_filename +log_truncate_on_rotation +log_rotation_age +log_rotation_size +``` + +- 方案一:每天生成一个新的日志文件 + + ``` + log_filename='postgresql-%Y-%m-%d.log' + log_truncate_on_rotation=off + log_rotation_age=1d + log_rotation_size=0 + ``` + +- 方案二:写满固定大小(如10MB),则进行切换 + + ``` + log_filename='postgresql-%Y-%m-%d_%H%M%S.log' + log_truncate_on_rotation=off + log_rotation_age=0 + log_rotation_size=10MB + ``` + + 这种方案我们一般是为了根据时间去查看日志,文件名根据日志量可以设置到时分秒,但这里设置log\_rotation\_size并不能严格控制固定大小。 + +- 方案三:保留固定天数的日志并循环覆盖,例如固定一周或者固定一个月 + + ``` + log_filename='postgresql-%u.log' + log_truncate_on_rotation=on + log_rotation_age=1d + log_rotation_size=0 + ``` + + +log\_filename常见的通配符变量 + +- %u是星期的数字表示,范围是\[1,7\],1代表星期一 +- %w也是星期的数字表示,范围是\[0,6\],0代表星期天 +- %d是月份中的天数表示,范围是\[01,31\] + +生产环境第三种方案更合适一些。 + +## 3.同步复制表的序列 + +看看下面这个例子,我们创建test表使用serial自增序列类型,系统帮我们生成了test\_id\_seq序列。 + +``` +postgres=# create table test(id serial primary key,name varchar unique); +CREATE TABLE +postgres=# \d test + Table "public.test" + Column | Type | Collation | Nullable | Default +--------+-------------------+-----------+----------+---------------------------------- + id | integer | | not null | nextval('test_id_seq'::regclass) + name | character varying | | | +Indexes: + "test_pkey" PRIMARY KEY, btree (id) + "test_name_key" UNIQUE CONSTRAINT, btree (name) +``` + +当我们复制t\_test表时,test表的序列引用也同时复制过来了,可以使用虚拟生成列来解决这个问题。 + +``` +postgres=# create table t_test(like test including all); +CREATE TABLE +postgres=# \d t_test + Table "public.t_test" + Column | Type | Collation | Nullable | Default +--------+-------------------+-----------+----------+---------------------------------- + id | integer | | not null | nextval('test_id_seq'::regclass) + name | character varying | | | +Indexes: + "t_test_pkey" PRIMARY KEY, btree (id) + "t_test_name_key" UNIQUE CONSTRAINT, btree (name) +``` + +openGauss对PG的这个问题做了修复,下面是openGauss复制t\_test时,序列按表名做了区分。 + +``` +omm=# \d t_test + Table "public.t_test" + Column | Type | Modifiers +--------+-------------------+----------------------------------------------------- + id | integer | not null default nextval('t_test_id_seq'::regclass) + name | character varying | +Indexes: + "t_test_pkey" PRIMARY KEY, btree (id) TABLESPACE pg_default + "t_test_name_key" UNIQUE CONSTRAINT, btree (name) TABLESPACE pg_default +``` + +## 4.跳变的序列值 + +创建序列seq1,设置cache为10,session A获取下一个值为1. + +``` +postgres=# create sequence seq1 cache 10; +CREATE SEQUENCE +postgres=# select nextval('seq1'); + nextval +--------- + 1 +(1 row) +``` + +session B查询获取下一个值为11 + +``` +postgres=# select nextval('seq1'); + nextval +--------- + 11 +(1 row) +``` + +序列值插入为了保证连续性,要设置cache为1。 + +## 5.从任意库查询pg\_stat\_statements模块统计信息 + +pg\_stat\_statements模块用来跟踪SQL语句的执行统计信息,我们如果把该模块安装到postgres数据库,就只能连到postgres数据库进行查询,除非其它数据库也安装了该模块,否则会提示报错找不到。 + +无论任何操作,都需要连接到一个数据库,即使我们只想创建一个全局的数据库用户,所以选对数据库特别重要。 + +## 6.truncate操作理解为DML语句 + +log\_statement参数控制日志记录级别,有4个选项:none、ddl、mod、all。开启ddl,它会记录 create、alter和drop相关的语句,但不记录truncate。 + +truncate在Oracle中属于DDL语句,在PostgreSQL中属于DML语句。因此,当我们使用DDL日志记录语句时,无法记录到Truncate。 + +## 7.认为数据库的owner可以管理其下所有对象 + +数据库、模式、表的都有自己的owner,他们都属于实例中的对象,数据库owner只是具有数据库这个对象的CTc权限。数据库的默认权限为: + +- 允许public角色连接,即允许任何人连接。 +- 不允许除了超级用户和owner之外的任何人在数据库中创建schema。 +- 会自动创建名为public的schema,这个schema的所有权限已经赋予给public角色,即允许任何人在里面创建对象。 + +schema使用注意事项: + +schema的owner默认是该schema下的所有对象的owner,但是允许用户在别人的schema下创建对象,所以一个对象的owner和schema的owner可能不同,都有drop对象的权限。 + +## 8.认为public模式下的对象可以互相访问 + +public模式只是允许任何人在里面创建对象并管理自己的对象,并不能查看别人创建的对象。 + +## 9.创建索引时起名为表名称 + +单个数据库里,索引和表的名称不能重复,因为他们都属于relation。 + +``` +postgres=# create index a on a(id); +ERROR: relation "a" already exists +``` + +## 10.把walsender当作主库 + +通常我们从操作系统层查看主库有walsender,备库有walreceiver,并且walsender信息中可以看到备库的IP地址,可以初步判断主备状态正常。 + +但请注意有walsender或者数据库中能查到pg\_stat\_replication视图并不能断定是主库,仅在一主一备环境可以这样简单判断,下面的图可以看出,虽然有walsender,但它也是个备库。 + +![](figures/20210603-9b70ba89-658c-4902-818a-099c359808b4.png) + diff --git a/content/zh/post/pengchong/images/20220113-26ad3133-6232-463a-babd-c0199c1cb88e.png b/content/zh/post/pengchong/images/20220113-26ad3133-6232-463a-babd-c0199c1cb88e.png new file mode 100644 index 0000000000000000000000000000000000000000..5223e47f48f0f24ce73fbe7b48aa9572d84e9f33 Binary files /dev/null and b/content/zh/post/pengchong/images/20220113-26ad3133-6232-463a-babd-c0199c1cb88e.png differ diff --git a/content/zh/post/pengchong/images/20220113-52ebc3c8-4185-4422-b3c7-4caf1ee2e52e.png b/content/zh/post/pengchong/images/20220113-52ebc3c8-4185-4422-b3c7-4caf1ee2e52e.png new file mode 100644 index 0000000000000000000000000000000000000000..9a957f7cc54f6d396575c4357fb2206024a19497 Binary files /dev/null and b/content/zh/post/pengchong/images/20220113-52ebc3c8-4185-4422-b3c7-4caf1ee2e52e.png differ diff --git a/content/zh/post/pengchong/images/20220113-7d865eca-1576-4443-9d09-a0859b736b9b.png b/content/zh/post/pengchong/images/20220113-7d865eca-1576-4443-9d09-a0859b736b9b.png new file mode 100644 index 0000000000000000000000000000000000000000..960efca2768c4eb06a384749c8cc7f245a5d7117 Binary files /dev/null and b/content/zh/post/pengchong/images/20220113-7d865eca-1576-4443-9d09-a0859b736b9b.png differ diff --git a/content/zh/post/pengchong/images/20220113-9b619d9d-d87c-4b6e-aaa4-a5472f95b7ea.png b/content/zh/post/pengchong/images/20220113-9b619d9d-d87c-4b6e-aaa4-a5472f95b7ea.png new file mode 100644 index 0000000000000000000000000000000000000000..704c667b0ebca927648aa29c85ad1a57e1fdc1d7 Binary files /dev/null and b/content/zh/post/pengchong/images/20220113-9b619d9d-d87c-4b6e-aaa4-a5472f95b7ea.png differ diff --git a/content/zh/post/pengchong/images/20220113-cd0cf621-d2ff-41e4-b01f-aabce51fbe44.png b/content/zh/post/pengchong/images/20220113-cd0cf621-d2ff-41e4-b01f-aabce51fbe44.png new file mode 100644 index 0000000000000000000000000000000000000000..5b9bf7118f575b93063382165b9ac9b2e4a52ad7 Binary files /dev/null and b/content/zh/post/pengchong/images/20220113-cd0cf621-d2ff-41e4-b01f-aabce51fbe44.png differ diff --git a/content/zh/post/pengchong/images/20220301-fea20923-c6e0-4fa8-92c6-81979a109dcf.png b/content/zh/post/pengchong/images/20220301-fea20923-c6e0-4fa8-92c6-81979a109dcf.png new file mode 100644 index 0000000000000000000000000000000000000000..303970a86ac446a8b422a4efbc28d0d3638b646a Binary files /dev/null and b/content/zh/post/pengchong/images/20220301-fea20923-c6e0-4fa8-92c6-81979a109dcf.png differ diff --git a/content/zh/post/pengchong/images/20220413-52da4846-5d24-4a95-b24e-86dc22032a03.png b/content/zh/post/pengchong/images/20220413-52da4846-5d24-4a95-b24e-86dc22032a03.png new file mode 100644 index 0000000000000000000000000000000000000000..aaed112d095ff388a33ff1840a5a5c200d7e1ba2 Binary files /dev/null and b/content/zh/post/pengchong/images/20220413-52da4846-5d24-4a95-b24e-86dc22032a03.png differ diff --git a/content/zh/post/pengchong/images/20220413-ba76382b-6682-4b86-8830-f93d14f127a0.png b/content/zh/post/pengchong/images/20220413-ba76382b-6682-4b86-8830-f93d14f127a0.png new file mode 100644 index 0000000000000000000000000000000000000000..2302c775be057bd4f9d2029bc581a59fbf169d6d Binary files /dev/null and b/content/zh/post/pengchong/images/20220413-ba76382b-6682-4b86-8830-f93d14f127a0.png differ diff --git a/content/zh/post/pengchong/images/20220506-950bbc00-54c2-4d4b-b00e-b391fe3218ea.png b/content/zh/post/pengchong/images/20220506-950bbc00-54c2-4d4b-b00e-b391fe3218ea.png new file mode 100644 index 0000000000000000000000000000000000000000..0b1a6354a5cdb1dc76f422e2d681cfca1afc2ee8 Binary files /dev/null and b/content/zh/post/pengchong/images/20220506-950bbc00-54c2-4d4b-b00e-b391fe3218ea.png differ diff --git a/content/zh/post/pengchong/images/20220506-9fe0185e-139e-48cc-8c01-51b3b40c3863.png b/content/zh/post/pengchong/images/20220506-9fe0185e-139e-48cc-8c01-51b3b40c3863.png new file mode 100644 index 0000000000000000000000000000000000000000..d86bad035d762ae0b7c69f7df5949001b7f85e1f Binary files /dev/null and b/content/zh/post/pengchong/images/20220506-9fe0185e-139e-48cc-8c01-51b3b40c3863.png differ diff --git a/content/zh/post/pengchong/images/20220506-c82e9ad3-60c5-4041-9539-970abf9072e8.png b/content/zh/post/pengchong/images/20220506-c82e9ad3-60c5-4041-9539-970abf9072e8.png new file mode 100644 index 0000000000000000000000000000000000000000..78e6fc0746143f80ea155a841d355dee9b798281 Binary files /dev/null and b/content/zh/post/pengchong/images/20220506-c82e9ad3-60c5-4041-9539-970abf9072e8.png differ diff --git a/content/zh/post/pengchong/images/20220506-d68be5cb-1ba1-4ff6-b61a-e85733d3fa88.png b/content/zh/post/pengchong/images/20220506-d68be5cb-1ba1-4ff6-b61a-e85733d3fa88.png new file mode 100644 index 0000000000000000000000000000000000000000..c3e22ff5e23a1f5e5cdebd78a285c3c686350aec Binary files /dev/null and b/content/zh/post/pengchong/images/20220506-d68be5cb-1ba1-4ff6-b61a-e85733d3fa88.png differ diff --git a/content/zh/post/pengchong/images/20220506-df18f7fb-340a-4564-9f65-36efa85cf789.png b/content/zh/post/pengchong/images/20220506-df18f7fb-340a-4564-9f65-36efa85cf789.png new file mode 100644 index 0000000000000000000000000000000000000000..7e853c70a0362253e755b228c49fad0520b78e1f Binary files /dev/null and b/content/zh/post/pengchong/images/20220506-df18f7fb-340a-4564-9f65-36efa85cf789.png differ diff --git "a/content/zh/post/pengchong/openGaussMogDB\345\244\247\345\257\271\350\261\241LargeObject\345\255\230\345\217\226\346\265\213\350\257\225.md" "b/content/zh/post/pengchong/openGaussMogDB\345\244\247\345\257\271\350\261\241LargeObject\345\255\230\345\217\226\346\265\213\350\257\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..53c7ac1b2031178d70cb02ef2d4a15c09657379c --- /dev/null +++ "b/content/zh/post/pengchong/openGaussMogDB\345\244\247\345\257\271\350\261\241LargeObject\345\255\230\345\217\226\346\265\213\350\257\225.md" @@ -0,0 +1,237 @@ ++++ + +title = "openGauss/MogDB大对象LargeObject存取测试" + +date = "2022-04-15" + +tags = ["openGauss/MogDB大对象LargeObject存取测试"] + +archives = "2022-04" + +author = "彭冲" + +summary = "openGauss/MogDB大对象LargeObject存取测试" + +img = "/zh/post/pengchong/title/img9.png" + +times = "10:20" ++++ + +# openGauss/MogDB大对象LargeObject存取测试 + +本文出处:https://www.modb.pro/db/214758 + +
    + +openGauss/MogDB数据库里bytea二进制类型受segment size编译参数限制,默认不能超过1GB,如果字段存储数据超过1GB可以使用lo(Large Object)扩展类型。 + +### lo类型需要先创建lo extension + +```sql +$ gsql -p5432 -Uomm postgres -r +gsql ((MogDB 2.0.1 build f892ccb7) compiled at 2021-07-09 16:15:21 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +postgres=# create extension lo; +CREATE EXTENSION +``` + +创建完lo扩展,我们新建test_lo表,info字段使用lo类型。 + +```sql +postgres=# create table test_lo(id int,info lo); +CREATE TABLE +``` + +创建test_lo表管理触发器,对update和delete操作使用lo_manage函数管理,不然会产生孤立大对象。 + +```sql +postgres=# create trigger test_lo before UPDATE OR DELETE ON test_lo FOR EACH ROW EXECUTE procedure lo_manage(info); +WARNING: Trigger function with non-plpgsql type is not recommended. +DETAIL: Non-plpgsql trigger function are not shippable by default. +HINT: Unshippable trigger may lead to bad performance. +CREATE TRIGGER +``` + +使用dd生成2GB文件 + +``` +postgres=# \! dd if=/dev/zero of=test_lo bs=1M count=2048 && sync +记录了2048+0 的读入 +记录了2048+0 的写出 +2147483648字节(2.1 GB,2.0 GiB)已复制,0.805435 s,2.7 GB/s +``` + +### 测试lo_import函数导入数据到数据表 + +``` +postgres=# insert into test_lo values(1,lo_import('/home/omm/test_lo')); INSERT 0 1 +``` + +可以看到数据可以正常导入,如果不使用lo类型,使用bytea类型会提示下面的报错。 + +``` +ERROR: requested length too large +``` + +### 测试lo_export函数导出数据表数据到文件 + +```sql +postgres=# select lo_export(test_lo.info,'/home/omm/test_ext_lo') from test_lo where id=1; + lo_export +----------- + 1 +(1 row) +``` + +可以看到数据正常导出。 + +查看导入导出的数据文件,也可以使用diff命令进行比对。 + +``` +postgres=# \! ls -lh test_* +-rw-r--r-- 1 omm dbgrp 2.0G 12月 17 13:00 test_ext_lo +-rw------- 1 omm dbgrp 2.0G 12月 17 12:58 test_lo +``` + +### 查看数据表大对象字段大小 + +分两步进行,首先查大对象字段的oid(lo类型字段在用户表里面只存储一个oid引用指针,并不实际存数据) + +```sql +postgres=# select loid,pg_size_pretty(sum(octet_length(data))) +from pg_largeobject +where loid =16392 +group by loid; + loid | pg_size_pretty +-------+---------------- + 16392 | 2048 MB +(1 row) +``` + +实际数据使用多条bytea记录存储在pg_largeobject表,可以根据oid查询统计字段的大小 + +``` +postgres=# select loid,pg_size_pretty(sum(octet_length(data))) from pg_largeobject where loid =16392 group by loid; loid | pg_size_pretty -------+---------------- 16392 | 2048 MB (1 row) +``` + +也可以使用如下函数来查询 + +```plsql +create or replace function get_lo_size(oid) +returns bigint +volatile strict +as $function$ +declare + fd integer; + sz bigint; +begin + fd := lo_open($1, x'40000'::int); + perform lo_lseek64(fd, 0, 2); + sz := lo_tell64(fd); + perform lo_close(fd); + return sz; +end; +$function$ language plpgsql; +``` + +查询结果如下 + +```sql +postgres=# select pg_size_pretty(get_lo_size(16392)); + pg_size_pretty +---------------- + 2048 MB +(1 row) + +``` + +再来测试JDBC应用层的使用 + +### JDBC-Java文件入库 + +```java + public static void main(String[] args) throws Exception{ + Class.forName("org.postgresql.Driver"); + + Connection conn = DriverManager.getConnection("jdbc:postgresql://ip:port/dbname","username","password"); + + conn.setAutoCommit(false); + + LargeObjectManager lobj = conn.unwrap(org.postgresql.PGConnection.class).getLargeObjectAPI(); + + long oid = lobj.createLO(LargeObjectManager.READ | LargeObjectManager.WRITE); + + LargeObject obj = lobj.open(oid, LargeObjectManager.WRITE); + + File file = new File("c:/work/test_lo"); + FileInputStream fis = new FileInputStream(file); + + byte buf[] = new byte[10*1024*1024]; + int s, tl = 0; + while ((s = fis.read(buf, 0, 2048)) > 0) + { + obj.write(buf, 0, s); + tl += s; + } + + obj.close(); + + PreparedStatement ps = conn.prepareStatement("INSERT INTO test_lo VALUES (?, ?)"); + ps.setInt(1, 100); + ps.setLong(2, oid); + ps.executeUpdate(); + ps.close(); + fis.close(); + + conn.commit(); + conn.close(); + + } + +``` + +### JDBC-Java读数据输出到文件 + +```java + public static void main(String[] args) throws Exception{ + Class.forName("org.postgresql.Driver"); + + Connection conn = DriverManager.getConnection("jdbc:postgresql://ip:port/dbname","username","password"); + + conn.setAutoCommit(false); + + LargeObjectManager lobj = conn.unwrap(org.postgresql.PGConnection.class).getLargeObjectAPI(); + + PreparedStatement ps = conn.prepareStatement("SELECT info FROM test_lo WHERE id = ?"); + ps.setInt(1, 100); + ResultSet rs = ps.executeQuery(); + + File file = new File("c:/work/test_out_lo"); + FileOutputStream fos = new FileOutputStream(file); + + while (rs.next()) + { + long oid = rs.getLong(1); + LargeObject obj = lobj.open(oid, LargeObjectManager.READ); + + byte buf[] = new byte[10*1024*1024]; + int s, tl = 0; + while ((s = obj.read(buf, 0, 2048)) > 0) + { + fos.write(buf, 0, s); + tl += s; + } + + obj.close(); + } + rs.close(); + ps.close(); + fos.close(); + + conn.commit(); + conn.close(); + + } +``` diff --git "a/content/zh/post/pengchong/openGaussMogDB\351\233\266\345\255\227\350\212\202\351\227\256\351\242\230\345\244\204\347\220\206.md" "b/content/zh/post/pengchong/openGaussMogDB\351\233\266\345\255\227\350\212\202\351\227\256\351\242\230\345\244\204\347\220\206.md" new file mode 100644 index 0000000000000000000000000000000000000000..b9a142ac6e8b53edb566ebb19b4ab08c72f62ed9 --- /dev/null +++ "b/content/zh/post/pengchong/openGaussMogDB\351\233\266\345\255\227\350\212\202\351\227\256\351\242\230\345\244\204\347\220\206.md" @@ -0,0 +1,201 @@ ++++ + +title = "openGauss/MogDB零字节问题处理" + +date = "2022-04-12" + +tags = ["openGauss/MogDB零字节问题处理"] + +archives = "2022-04" + +author = "彭冲" + +summary = "openGauss/MogDB零字节问题处理" + +img = "/zh/post/pengchong/title/img6.png" + +times = "10:20" ++++ + +# openGauss/MogDB零字节问题处理 + +本文出处:https://www.modb.pro/db/196647 + + + +问题描述:java应用端程序调用GZIP压缩类对数据进行编码压缩后入库 ,然后从数据库取出进行解压,原来再mysql数据库中是正常的,但迁移到openGauss/mogdb之后,解压出来的数据是乱码,不正常。 + +mysql端表结构如下: + +```sql +CREATE TABLE `test` ( + `id` bigint(20) NOT NULL, + `info` varchar(20) NOT NULL, + `info2` mediumtext CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci ROW_FORMAT=DYNAMIC; +``` + +迁移到MogDB后表结构如下: + +```sql +create table test( id int, info text, info2 text ); +``` + +java压缩接口方法如下: + +```java + public static String compress(String str) throws IOException { + if (null == str || str.length() <= 0) { + return str; + } + GZIPOutputStream gzip = null; + // 创建一个新的输出流 + ByteArrayOutputStream out = new ByteArrayOutputStream(); + try { + // 使用默认缓冲区大小创建新的输出流 + gzip = new GZIPOutputStream(out); + // 将字节写入此输出流 + gzip.write(str.getBytes("utf-8")); + // 因为后台默认字符集有可能是GBK字符集,所以此处需指定一个字符集 + gzip.close(); + // 使用指定的 charsetName,通过解码字节将缓冲区内容转换为字符串 + return out.toString("ISO-8859-1"); + } finally { + closeQuietly(gzip); + closeQuietly(out); + } + } + +``` + +java解压接口方法如下: + +```java + public static String unCompress(String str) throws IOException { + GZIPInputStream gzip = null; + if (null == str || str.length() <= 0) { + return str; + } + // 创建一个新的输出流 + ByteArrayOutputStream out = new ByteArrayOutputStream(); + // 创建一个 ByteArrayInputStream,使用 buf 作为其缓冲 区数组 + ByteArrayInputStream in = new ByteArrayInputStream(str.getBytes("ISO-8859-1")); + try { + // 使用默认缓冲区大小创建新的输入流 + gzip = new GZIPInputStream(in); + byte[] buffer = new byte[256]; + int n = 0; + // 将未压缩数据读入字节数组 + while ((n = gzip.read(buffer)) >= 0) { + out.write(buffer, 0, n); + } + // 使用指定的 charsetName,通过解码字节将缓冲区内容转换为字符串 + return out.toString("utf-8"); + } finally { + closeQuietly(gzip); + closeQuietly(in); + closeQuietly(out); + } + } + +``` + +测试用例部分关键代码参考如下: +1.对UTF8编码的字符串数据进行压缩,然后存到数据库中 + +```java +String str = "{\"name\":\"jerome\",\"familyName\":\"peng\",\"company\":\"enmotech\"}"; + +System.out.println("input:"+str); + +String compress_java = GZipUtils.compress(str); + + try{ + ps = conn.prepareStatement(sql); + ps.setInt(1, 100); + ps.setString(2, str); + ps.setString(3, compress_java); + ps.execute(); + } catch (Exception e) { + e.printStackTrace(); + } + +``` + +2.从数据库中取出字段进行解密 + +```java + sql = " select info,info2 from test where id=100"; + ResultSet rs = null; + try{ + ps = conn.prepareStatement(sql); + rs = ps.executeQuery(); + while (rs.next()) { + String compress_db = rs.getString(2); + String unCompress= GZipUtils2.unCompress(compress_db ); + System.out.println("output:"+unCompress); + } + } catch (Exception e) { + e.printStackTrace(); + } +``` + +期望结果是从数据库中取出来的字符串能够解压出原始数据。也就是上面的unCompress变量输出的结果应该要与上面的str变量输出结果一致,应为: + +``` +{"name":"jerome","familyName":"peng","company":"enmotech"} +``` + +如果我们在pg数据库里进行测试,上面测试第一步会报错提示无法对0字节进行存储 + +``` +org.postgresql.util.PSQLException: ERROR: invalid byte sequence for encoding "UTF8": 0x00 +``` + +但在openGauss/MogDB里面,数据可以正常存储,不会报错,但是压缩接口进行解码时数据显示乱码。 + +下面我们对比入库前和入库后的字节序列(以hex字符形式显示,两个字符表示一个字节): +入库前的hex字符串 + +``` +1f8b0800000000000000ab56ca4bcc4d55b252ca4a2dca07327494d2127333732afd20a205a979e940b1e4fcdc82c4bc4aa0406a5e6e7e496a7286522d003efb28273a000000 +``` + +入库后的hex字符串 + +``` +1f8b0820202020202020ab56ca4bcc4d55b252ca4a2dca07327494d2127333732afd20a205a979e940b1e4fcdc82c4bc4aa0406a5e6e7e496a7286522d203efb28273a202020 +``` + +我们发现其实是00与20的差异,所有的hex 00被转义为了hex 20,也就是0字节被转义为了空格。 + +既然知道了这个差异,那我们对取出的数据做一次反向替换,应该可以解决这个问题。 + +我们可以按字节进行读取,如果数值是32(hex 20对应十进制32)的字节,那我们就替换为0字节。 + +```java +if(bytes_src[i]==32) { + bytes_dest[i]=(byte)0; +}else { + bytes_dest[i]=bytes_src[i]; +} +``` + +这样修改之后测试发现还是有问题,因为压缩后的字节数据里可能也包含hex 20,这样我们会把不该替换的字节也做了误处理。 + +进一步修正为只对首尾固定的部分进行处理,思路来源与GZIP公共类。 + +``` +//头部10个字节或者尾部8个字节还原0字节 if((i<=10 || i>=len-1-8) && bytes_src[i]==32) { bytes_dest[i]=(byte)0; }else { bytes_dest[i]=bytes_src[i]; } +``` + +这样处理后,测试数据可以正常解压,测试结果如下: + +```java +input:{"name":"jerome","familyName":"peng","company":"enmotech"} +HEX_ja:1f8b0800000000000000ab56ca4bcc4d55b252ca4a2dca07327494d2127333732afd20a205a979e940b1e4fcdc82c4bc4aa0406a5e6e7e496a7286522d003efb28273a000000 +HEX_db:1f8b0820202020202020ab56ca4bcc4d55b252ca4a2dca07327494d2127333732afd20a205a979e940b1e4fcdc82c4bc4aa0406a5e6e7e496a7286522d203efb28273a202020 +HEX_cv:1f8b0800000000000000ab56ca4bcc4d55b252ca4a2dca07327494d2127333732afd20a205a979e940b1e4fcdc82c4bc4aa0406a5e6e7e496a7286522d003efb28273a000000 +output:{"name":"jerome","familyName":"peng","company":"enmotech"} +可以看到输入与输出内容是一致的。 +``` diff --git "a/content/zh/post/pengchong/openGauss\351\205\215\347\275\256IPv6.md" "b/content/zh/post/pengchong/openGauss\351\205\215\347\275\256IPv6.md" new file mode 100644 index 0000000000000000000000000000000000000000..6ccdee427e1f5d5306222e28e0d4a335012ba594 --- /dev/null +++ "b/content/zh/post/pengchong/openGauss\351\205\215\347\275\256IPv6.md" @@ -0,0 +1,236 @@ ++++ + +title = "openGauss配置IPv6" + +date = "2022-04-02" + +tags = ["openGauss配置IPv6"] + +archives = "2022-04" + +author = "彭冲" + +summary = "openGauss配置IPv6" + +img = "/zh/post/pengchong/title/img9.png" + +times = "11:29" + ++++ + +# openGauss配置IPv6 + +openGauss/MogDB支持多种网络接口,假如我们想在支持IPv6的网络上部署使用,只需简单操作即可,本文将介绍在Centos上如何配置使用。 + +### 关于IPv6 + +IPv6(Internet Protocol Version 6),是Internet Engineering Task Force (IETF)设计用于替代IPv4的下一代IP协议,使用IPv6能解决网络地址资源数量的问题。 + +我们使用ipconfig /all命令查看windows网络接口,会看到IPv6地址。 + +``` +以太网适配器 以太网 7: + + 本地链接 IPv6 地址. . . . . . . . : fe80::828a:5e20:53cb:7719%6(首选) + IPv4 地址 . . . . . . . . . . . . : 192.168.137.68(首选) +``` + +Centos下使用ip addr命令查看linux网络接口,也会看到IPv6地址。 + +``` +# ip addr +1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever +2: enp0s3: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 + link/ether 08:00:27:b5:54:32 brd ff:ff:ff:ff:ff:ff + inet 192.168.137.101/24 brd 192.168.137.255 scope global enp0s3 + valid_lft forever preferred_lft forever + inet6 fe80::a00:27ff:feb5:5432/64 scope link + valid_lft forever preferred_lft forever +``` + +### IPv6分类 + +#### 1.本地关联IPv6 + +本地关联的IPv6,是以fe80开头,与网卡的物理地址(MAC地址)有关,不需要通过DHCP自动分配或者手工设置。 + +#### 2.全局IPv6 + +如果需要跨网络或者跨路由器进行通信,则需要使用全局的IPv6。 + +### 创建全局IPv6 + +创建全局IPv6有多种的方式,例如DHCPv6、Stateless address autoconfiguration (SLAAC) 以及手工配置。 + +手工配置可以使用ip命令来配置: + +``` +# ip -6 addr add 2022:1:0:0::db1/64 dev enp0s3 +``` + +或者使用ifconfig命令来配置: + +``` +# ifconfig enp0s3 inet6 add 2022:1:0:0::db1/64 +``` + +通过上面任意一种方式配置后,可以看到enp0s3网络接口将增加一个inet6,并且是global属性的。 + +``` +# ip addr +1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever +2: enp0s3: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 + link/ether 08:00:27:b5:54:32 brd ff:ff:ff:ff:ff:ff + inet 192.168.137.101/24 brd 192.168.137.255 scope global enp0s3 + valid_lft forever preferred_lft forever + inet6 2022:1::db1/64 scope global + valid_lft forever preferred_lft forever + inet6 fe80::a00:27ff:feb5:5432/64 scope link + valid_lft forever preferred_lft forever +``` + +注意:上面IPv6字符串配置中有db1,这符合HEX字符规则,非HEX字符则不允许设置,比如我们把db换成dx,则会提示下面的错误信息。 + +``` +# ifconfig enp0s3 inet6 add 2022:1:0:0::dx1/64 +2022:1:0:0::dx1: Resolver Error 0 (no error) +``` + +### IPv6连通性测试 + +在本地使用ping6进行连通性测试,先使用全局IPv6进行测试 + +``` +# ping6 2022:1::db1 -c3 +PING 2022:1::db1(2022:1::db1) 56 data bytes +64 bytes from 2022:1::db1: icmp_seq=1 ttl=64 time=0.027 ms +64 bytes from 2022:1::db1: icmp_seq=2 ttl=64 time=0.047 ms +64 bytes from 2022:1::db1: icmp_seq=3 ttl=64 time=0.028 ms + +--- 2022:1::db1 ping statistics --- +3 packets transmitted, 3 received, 0% packet loss, time 2000ms +rtt min/avg/max/mdev = 0.027/0.034/0.047/0.009 ms +``` + +再使用本地关联IPv6进行测试,此时需要带上网络接口名称 + +``` +# ping6 fe80::a00:27ff:feb5:5432%enp0s3 -c3 +PING fe80::a00:27ff:feb5:5432%enp0s3(fe80::a00:27ff:feb5:5432%enp0s3) 56 data bytes +64 bytes from fe80::a00:27ff:feb5:5432%enp0s3: icmp_seq=1 ttl=64 time=0.040 ms +64 bytes from fe80::a00:27ff:feb5:5432%enp0s3: icmp_seq=2 ttl=64 time=0.041 ms +64 bytes from fe80::a00:27ff:feb5:5432%enp0s3: icmp_seq=3 ttl=64 time=0.022 ms + +--- fe80::a00:27ff:feb5:5432%enp0s3 ping statistics --- +3 packets transmitted, 3 received, 0% packet loss, time 2000ms +rtt min/avg/max/mdev = 0.022/0.034/0.041/0.010 ms +``` + +### openGauss/MogDB配置IPv6 + +编辑postgresql.conf文件,修改监听参数 + +``` +listen_addresses = '*' +``` + +修改完后重启服务,数据库将监听本机所有的网络接口。 + +编辑pg_hba.conf文件,添加数据库客户端连接的IPv6认证条目 + +``` +host all all fe80::a00:27ff:feb5:5432/128 md5 +host all all 2022:1::db1/128 md5 +``` + +### 使用gsql客户端进行测试 + +1.使用本地关联IPv6进行测试,此时需要带上网络接口名称 + +``` +$ gsql -h fe80::a00:27ff:feb5:5432%enp0s3 -Umoguser postgres -r -p6432 + +postgres=> \conninfo +You are connected to database "postgres" as user "moguser" on host "fe80::a00:27ff:feb5:5432%enp0s3" at port "6432". +postgres=> SELECT datname,usename, client_addr FROM pg_stat_activity where usename='moguser'; + datname | usename | client_addr +----------+---------+-------------------------- + postgres | moguser | fe80::a00:27ff:feb5:5432 +(1 row) +``` + +2.使用全局IPv6进行测试 + +``` +$ gsql -h 2022:1::db1 -Umoguser postgres -r -p6432 + +postgres=> \conninfo +You are connected to database "postgres" as user "moguser" on host "2022:1::db1" at port "6432". +postgres=> SELECT datname,usename, client_addr FROM pg_stat_activity where usename='moguser'; + datname | usename | client_addr +----------+---------+------------- + postgres | moguser | 2022:1::db1 +(1 row) +``` + +### 使用java jdbc进行测试 + +通过java程序test.jar包进行测试,test.jar需要三个入参,分别是jdbc url、jdbc username、jdbc password。 + +1.使用普通的IPv4进行测试 + +``` +$ java -jar test.jar jdbc:postgresql://192.168.137.101:6432/postgres moguser Admin@1234 +``` + +执行结果如下,可以看到数据库连接测试成功 + +``` +Input jdbc url:jdbc:postgresql://192.168.137.101:6432/postgres +Input jdbc username:moguser +Connection test successfully. +``` + +2.使用本地关联IPv6进行测试,进行测试 + +``` +$ java -jar test.jar jdbc:postgresql://fe80::a00:27ff:feb5:5432:6432/postgres moguser Admin@1234 +``` + +执行结果如下,可以看到数据库连接测试成功 + +``` +Input jdbc url:jdbc:postgresql://fe80::a00:27ff:feb5:5432:6432/postgres +Input jdbc username:moguser +Connection test successfully +``` + +3.使用全局IPv6进行测试 + +``` +$ java -jar test.jar jdbc:postgresql://2022:1::db1:6432/postgres moguser Admin@1234 +``` + +执行结果如下,可以看到数据库连接测试成功 + +``` +Input jdbc url:jdbc:postgresql://2022:1::db1:6432/postgres +Input jdbc username:moguser +Connection test successfully. +``` + +### 总结 + +1.openGauss/MogDB配置IPv6只需简单修改listen_addresses = ‘*’ 即可。 +2.使用gsql客户端进行连接时,本地关联IPv6还需要使用网络接口名进行访问,全局IPv6不需要。 +3.使用jdbc客户端进行连接时,无论是本地关联IPv6还是全局IPv6,直接使用地址即可。 \ No newline at end of file diff --git a/content/zh/post/pengchong/title/img6.png b/content/zh/post/pengchong/title/img6.png new file mode 100644 index 0000000000000000000000000000000000000000..2ddddfa2858d77999b4cfec8e97e4f29ac0cab79 Binary files /dev/null and b/content/zh/post/pengchong/title/img6.png differ diff --git a/content/zh/post/pengchong/title/img9.png b/content/zh/post/pengchong/title/img9.png new file mode 100644 index 0000000000000000000000000000000000000000..9823959ac5ca3b9310e1cfa321f5d65728efe5fb Binary files /dev/null and b/content/zh/post/pengchong/title/img9.png differ diff --git "a/content/zh/post/pengchong/\345\275\223JDBC\351\201\207\344\270\212\350\277\224\345\233\236Cursor.md" "b/content/zh/post/pengchong/\345\275\223JDBC\351\201\207\344\270\212\350\277\224\345\233\236Cursor.md" new file mode 100644 index 0000000000000000000000000000000000000000..1fa1e98b56b6492ef6897eafbdf50c9d691a68cd --- /dev/null +++ "b/content/zh/post/pengchong/\345\275\223JDBC\351\201\207\344\270\212\350\277\224\345\233\236Cursor.md" @@ -0,0 +1,245 @@ ++++ + +title = "当JDBC遇上返回Cursor" + +date = "2022-05-18" + +tags = ["当JDBC遇上返回Cursor"] + +archives = "2022-05" + +author = "彭冲" + +summary = "当JDBC遇上返回Cursor" + +img = "/zh/post/pengchong/title/img9.png" + +times = "10:20" ++++ + +# 当JDBC遇上返回Cursor + +本文出处:[https://www.modb.pro/db/400426](https://www.modb.pro/db/400426) + +使用jdbc访问PostgreSQL或者MogDB(openGauss)数据库里的cursor游标时,官方文档可查的资料较少,下面的示例供参考。 + +### 测试环境 + +- JDBC:postgresql-42.3.5.jar +- PG: 14.2 +- MogDB(openGauss): 3.0.0 + +### 测试背景 + +针对function和procedure返回cursor游标类型,通过jdbc如何调用。 + +测试function:curtest1,通过returns返回游标类型 + +``` +create or replace function curtest1() +returns refcursor +language plpgsql +as $function$ +declare + cur refcursor; +begin + open cur for select id,data from fiverows; + return cur; +end; +$function$; +``` + +测试procedure:curtest2,通过out参数返回游标类型 + +``` +create or replace procedure curtest2(out cur refcursor) +language plpgsql +as $procedure$ +begin + open cur for select id,data from fiverows; +end; +$procedure$; +``` + +测试procedure:curtest3,通过out参数返回多个游标类型 + +``` +create or replace procedure curtest3(out cur1 refcursor,out cur2 refcursor) +language plpgsql +as $procedure$ +begin + open cur1 for select id,data from fiverows where id between 1 and 3; + open cur2 for select id,data from fiverows where id between 4 and 5; +end; +$procedure$; +``` + +表结构及数据 + +``` +create table fiverows(id serial primary key,data text); +insert into fiverows(data) values('one'),('two'), + ('three'),('four'),('five'); +``` + +### 测试一:function通过returns返回游标 + +function返回游标在PostgreSQL或者MogDB(openGauss)数据库里访问的代码参考如下: + +``` + public static void main(String[] args) throws Exception{ + Class.forName("org.postgresql.Driver"); + Connection conn = DriverManager.getConnection("jdbc:postgresql://192.168.137.251:1402/postgres","postgres","admin"); + CallableStatement stmt = null; + ResultSet resultSet = null; + try{ + conn.setAutoCommit(false); + stmt = conn.prepareCall("{? = call public.curtest1()}");; + stmt.registerOutParameter(1, Types.REF_CURSOR); + stmt.setCursorName("mycur1"); + stmt.execute(); + resultSet = (ResultSet) stmt.getObject(1); + while(resultSet.next()){ + Integer id = (Integer)resultSet.getInt(1); + String data = (String) resultSet.getString(2); + System.out.println("id = "+id+", data = "+data); + } + + } catch (Exception e) { + e.printStackTrace(); + } + conn.close(); + } +``` + +注意下面这几点: +1.使用CallableStatement +2.使用Types.REF_CURSOR注册输出参数 +3.Statement执行后,再通过Statement获取结果集 + +### 测试二:procedure通过out参数返回游标 + +procedure返回游标在PostgreSQL或者MogDB(openGauss)数据库里有一点差异:主要是escapeSyntaxCallMode参数。 + +#### PostgreSQL + +procedure返回游标的jdbc代码跟上面function测试类似: + +``` + public static void main(String[] args) throws Exception{ + Class.forName("org.postgresql.Driver"); + Connection conn = DriverManager.getConnection("jdbc:postgresql://192.168.137.251:1402/postgres?escapeSyntaxCallMode=call", "postgres","admin"); + CallableStatement stmt = null; + ResultSet resultSet = null; + try{ + conn.setAutoCommit(false); + stmt = conn.prepareCall("{call public.curtest2(?)}");; + stmt.registerOutParameter(1, Types.REF_CURSOR); + + stmt.execute(); + resultSet = (ResultSet) stmt.getObject(1); + while(resultSet.next()){ + Integer id = (Integer)resultSet.getInt(1); + String data = (String) resultSet.getString(2); + System.out.println("id = "+id+", data = "+data); + } + } catch (Exception e) { + e.printStackTrace(); + } + conn.close(); + } +``` + +不过执行上面的代码客户端会报下面的错误: +![image.png](../images/20220506-c82e9ad3-60c5-4041-9539-970abf9072e8.png) + +错误信息提示并不是很明确,查找资料发现大概与escapeSyntaxCallMode参数有关,该参数的官方链接如下:https://jdbc.postgresql.org/documentation/head/connect.html + +> Specifies how the driver transforms JDBC escape call syntax into underlying SQL, for invoking procedures or functions. In escapeSyntaxCallMode=select mode (the default), the driver always uses a SELECT statement (allowing function invocation only). In escapeSyntaxCallMode=callIfNoReturn mode, the driver uses a CALL statement (allowing procedure invocation) if there is no return parameter specified, otherwise the driver uses a SELECT statement. In escapeSyntaxCallMode=call mode, the driver always uses a CALL statement (allowing procedure invocation only). + +自PostgreSQL v11开始支持procedure,PostgreSQL JDBC驱动也配套引入了escapeSyntaxCallMode参数,该参数有三种值,默认是第一种func,系统理解为function来调用;第二种是call,系统理解为procedure来调用;第三种是系统根据定义自动猜测。 + +上面的connection连接串加上escapeSyntaxCallMode参数,修改成: + +``` +jdbc:postgresql://192.168.137.251:1402/postgres?escapeSyntaxCallMode=call +``` + +则可正常执行 + +![image.png](../images/20220506-9fe0185e-139e-48cc-8c01-51b3b40c3863.png) + +#### MogDB(openGauss) + +procedure返回游标的jdbc代码与function测试类似,并且也不需要设置escapeSyntaxCallMode参数。 +![image.png](../images/20220506-df18f7fb-340a-4564-9f65-36efa85cf789.png) + +procedure创建语法有差异,代码如下: + +``` +create or replace procedure curtest2(out cur refcursor) as begin open cur for select id,data from fiverows; end; +``` + +### 测试三:procedure通过out参数返回多个游标 + +返回多个游标与测试二类似,也是同样支持,在PostgreSQL也是需要设置escapeSyntaxCallMode参数,MogDB(openGauss)不需要设置。 + +#### PostgreSQL + +jdbc代码如下: + +``` + public static void main(String[] args) throws Exception{ + Class.forName("org.postgresql.Driver"); + Connection conn = DriverManager.getConnection("jdbc:postgresql://192.168.137.251:1402/postgres?escapeSyntaxCallMode=call","postgres","admin"); + CallableStatement stmt = null; + ResultSet resultSet = null; + try{ + conn.setAutoCommit(false); + stmt = conn.prepareCall("{call public.curtest3(?,?)}");; + stmt.registerOutParameter(1, Types.REF_CURSOR); + stmt.registerOutParameter(2, Types.REF_CURSOR); + stmt.execute(); + resultSet = (ResultSet) stmt.getObject(1); + System.out.println("cursor1 data:"); + while(resultSet.next()){ + Integer id = (Integer)resultSet.getInt(1); + String data = (String) resultSet.getString(2); + System.out.println("id = "+id+", data = "+data); + } + resultSet = (ResultSet) stmt.getObject(2); + System.out.println("cursor2 data:"); + while(resultSet.next()){ + Integer id = (Integer)resultSet.getInt(1); + String data = (String) resultSet.getString(2); + System.out.println("id = "+id+", data = "+data); + } + + } catch (Exception e) { + e.printStackTrace(); + } + conn.close(); + } +``` + +测试结果如下: +![image.png](../images/20220506-950bbc00-54c2-4d4b-b00e-b391fe3218ea.png) + +#### MogDB(openGauss) + +与测试二类似,不需要设置escapeSyntaxCallMode参数。 +![image.png](../images/20220506-d68be5cb-1ba1-4ff6-b61a-e85733d3fa88.png) + +procedure创建语法有差异,代码如下: + +``` +create or replace procedure curtest3(out cur1 refcursor,out cur2 refcursor) +as +begin + open cur1 for select id,data from fiverows where id between 1 and 3; + open cur2 for select id,data from fiverows where id between 4 and 5; +end; +``` + +结论通过jdbc访问function里的returns cursor或者procedure里的out cursor类型都支持。 +区别在于procedure的out cursor类型,在PostgreSQL需要设置escapeSyntaxCallMode=call,MogDB(openGauss)数据库不需要。 diff --git a/content/zh/post/shine/figures/zh-cn_image_0305491362.png b/content/zh/post/shine/figures/zh-cn_image_0305491362.png new file mode 100644 index 0000000000000000000000000000000000000000..d245d48dc07e2b01734e21ec1952e89fa9269bdb Binary files /dev/null and b/content/zh/post/shine/figures/zh-cn_image_0305491362.png differ diff --git a/content/zh/post/shine/figures/zh-cn_image_0305491430.png b/content/zh/post/shine/figures/zh-cn_image_0305491430.png new file mode 100644 index 0000000000000000000000000000000000000000..069d09f018ebb9d24d11603941aee4987e4df724 Binary files /dev/null and b/content/zh/post/shine/figures/zh-cn_image_0305491430.png differ diff --git a/content/zh/post/shine/figures/zh-cn_image_0305491436.png b/content/zh/post/shine/figures/zh-cn_image_0305491436.png new file mode 100644 index 0000000000000000000000000000000000000000..f4d46d9dbb5402adfa07bdc2eb6c1298013c1732 Binary files /dev/null and b/content/zh/post/shine/figures/zh-cn_image_0305491436.png differ diff --git a/content/zh/post/shine/figures/zh-cn_image_0305491445.png b/content/zh/post/shine/figures/zh-cn_image_0305491445.png new file mode 100644 index 0000000000000000000000000000000000000000..6bd563ba8a98381bc23ce90a508ded1395e2aa83 Binary files /dev/null and b/content/zh/post/shine/figures/zh-cn_image_0305491445.png differ diff --git a/content/zh/post/shine/figures/zh-cn_image_0305491449.png b/content/zh/post/shine/figures/zh-cn_image_0305491449.png new file mode 100644 index 0000000000000000000000000000000000000000..a32856aa08e459ed0f51f8fcf4c2f51511c12095 Binary files /dev/null and b/content/zh/post/shine/figures/zh-cn_image_0305491449.png differ diff --git "a/content/zh/post/shine/figures/\345\215\207\347\272\247\346\265\201\347\250\213\345\233\276.png" "b/content/zh/post/shine/figures/\345\215\207\347\272\247\346\265\201\347\250\213\345\233\276.png" new file mode 100644 index 0000000000000000000000000000000000000000..61269ac24d324281efc6118cd27087a2d63dd345 Binary files /dev/null and "b/content/zh/post/shine/figures/\345\215\207\347\272\247\346\265\201\347\250\213\345\233\276.png" differ diff --git "a/content/zh/post/shine/openGauss\345\215\207\347\272\247\346\214\207\345\257\274\344\271\246.md" "b/content/zh/post/shine/openGauss\345\215\207\347\272\247\346\214\207\345\257\274\344\271\246.md" index 15864b035c69c1ba715e4afcab1c7e4e7082f854..28cf924294694eb03b2228f55e42207b072bf9f1 100644 --- "a/content/zh/post/shine/openGauss\345\215\207\347\272\247\346\214\207\345\257\274\344\271\246.md" +++ "b/content/zh/post/shine/openGauss\345\215\207\347\272\247\346\214\207\345\257\274\344\271\246.md" @@ -1,250 +1,584 @@ +++ -title = "openGauss升级指导书" -date = "2020-12-31" +title = "openGauss升级指导书" -tags = ["openGauss升级指导书"] +date = "2021-03-09" -archives = "2020-12" +tags = ["openGauss升级指导书"] -author = "shine" +archives = "2021-03" + +author = "shine" summary = "openGauss升级指导书" img = "/zh/post/shine/title/img28.png" -times = "8:30" +times = "15:40" +++ -**概述** +# 前 言 + +## 概述 + +本文档详细的描述了版本升级、回滚流程、以及具体的操作指导,同时提供了常见的问题解答及故障处理方法。 + +## 读者对象 + +本文档主要适用于升级的操作人员。操作人员必须具备以下经验和技能: + +- 熟悉当前网络的组网和相关网元的版本信息。 +- 有该设备维护经验,熟悉设备的操作维护方式。 + + +# 升级前必读 + + +## 升级方案 + +本节为指导用户选择升级方式。 + +用户根据openGauss提供的新特性和数据库现状,确定是否对现有系统进行升级。 + +当前支持的升级模式为就地升级和灰度升级。升级方式的策略又分为大版本升级和小版本升级。 + +用户挑选升级方式后,系统会自动判断并选择合适的升级策略。 + +就地升级:升级期间需停止业务进行,一次性升级所有节点。 + +灰度升级:灰度升级支持全业务操作,也是一次性升级所有节点。\(openGauss1.1.0版本之后的版本支持该功能\) + +## 升级前的版本要求(升级路径) + +openGauss升级版本要求如[表1](#table7961729)所示。 + +**表 1** 升级前的版本要求(升级路径) + + + + + + + + + + + + + + + + +

    版本

    +

    升级说明

    +

    openGauss1.0.1版本之前的版本

    +

    可以升级到openGauss1.0.1之前的任意版本

    +

    openGauss1.0.1版本

    +

    可以升级到openGauss1.1.0版本

    +

    openGauss1.1.0版本之后的版本

    +

    可以升级到openGauss1.1.0之后的任意版本

    +
    + + + +>![](public_sys-resources/icon-note.gif) **说明:** +>升级前版本,可以通过执行如下工具查看。 +> +>``` +>gsql -V | --version +>``` + +## 升级影响和升级约束 + +升级过程需要注意以下事项。 + +- 升级操作不能和扩容、缩容同时执行。 +- 不支持虚拟IP。 +- 升级过程中,不允许对wal\_level,max\_connections,max\_prepared\_transactions,max\_locks\_per\_transaction这四个GUC参数的值进行修改。如果修改,会导致回滚后实例启动异常。 +- 建议在数据库系统空闲情况下进行升级,尽量避开业务繁忙的时间段(可按照经验判断,如节假日等)。 +- 升级前尽可能保证数据库正常。可以通过gs\_om -t status查询,查询结果的cluster\_state为Normal代表数据库正常。 +- 升级前保证数据库互信正常,可以在任意节点上,通过ssh hostname命令,连接另外一个节点进行验证。如果各机器间互连不用输入密码,说明互信正常(通常数据库状态正常时,互信一般都是正常的)。 +- 升级前后,数据库的部署方式(配置文件)不能发生变化。升级前会对部署方式进行校验,如果改变,会报错。 +- 升级前要保证操作系统处于健康状态,通过gs\_checkos工具可以完成操作系统状态检查。 +- 就地升级需要停止业务,灰度升级支持全业务操作。 +- 数据库运行正常且主DN的数据完全同步到备DN。 +- 升级过程中不允许打开kerberos开关。 +- 请不要修改安装包中解压出来的version.cfg文件。 +- 如果升级过程中出现异常导致升级失败,需用户手动回滚,并且必须回滚成功后才能进行下一次升级。 +- 如果升级回滚成功后,再次升级成功,未提交阶段设置的GUC参数将失效。 +- 执行升级的过程中请不要手动设置GUC参数。 +- 灰度升级中,升级的时候都会产生不超过10s的业务中断 +- 升级过程中,必须保持内核版本与om版本一致才可执行om操作。这里的一致是指,内核代码和om代码都来自同一个软件包。如果执行了升级包的前置脚本却没有升级,或者升级回滚后没有执行基线包的前置脚本,就会造成内核代码和om代码的不一致。 +- 升级过程中如果系统表新增了字段,升级后通过**\\d**命令将查看不到这些新增的字段。此时通过**select**命令可以查到这些新增的字段。 +- 升级需要guc参数enable\_stream\_replication=on,该参数为off时不允许升级。 +- 灰度升级中,业务并发要小于200并发读加200并发写的情况。 +- 若在openGauss2.1.0之前的版本中使用了MOT表,则不支持升级到openGauss2.1.0版本。 +- 执行gs_upgradectl -t auto-upgrade 之后,没有提交之前,不能执行快照生成,即升级过程中不能执行快照生成。 + +# # 升级 + + +## 升级流程 + +本章介绍升级到该版本的主要升级过程。 + +**图 1** 升级流程图 + + +>![](public_sys-resources/icon-note.gif) **说明:** +>本文档中描述的时间仅供参考,实际操作时间以现场情况为准。 + +**表 1** 升级流程执行效率估计 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

    步骤

    +

    建议起始时间

    +

    耗时(天/小时/分钟)

    +

    业务中断时长

    +

    备注

    +

    升级前准备与检查

    +

    升级操作前一天

    +

    约2~3小时。

    +

    对业务无影响。

    +

    升级前检查和备份数据、校验软件包等操作。

    +

    升级操作

    +

    业务空闲期

    +

    耗时主要集中在数据库的启动和停止以及每个database的系统表修改处。升级操作耗时一般不会超过30分钟。

    +

    与操作时长一致,一般不会超过30分钟。

    +

    依据指导书开始升级。

    +

    升级验证

    +

    业务空闲期

    +

    约30分钟。

    +

    与操作时长一致,约30分钟。

    +

    -

    +

    提交升级

    +

    业务空闲期

    +

    提交升级耗时一般不超过10分钟。

    +

    与操作时长一致,一般不超过10分钟。

    +

    -

    +

    升级版本回滚

    +

    业务空闲期

    +

    版本回滚耗时一般不会超过30分钟。

    +

    与操作时长一致,一般不会超过30分钟。

    +

    -

    +
    + + + + +## 升级前准备与检查 + + +### 升级前准备与检查清单 + +**表 1** 升级前准备清单 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

    序号

    +

    升级准备项目项目

    +

    准备内容

    +

    建议起始时间

    +

    耗时(天/小时/分钟)

    +

    1

    +

    收集节点信息

    +

    收集到数据库涉及节点的名称、IP地址,root、omm用户密码等环境信息。

    +

    升级前一天

    +

    1小时

    +

    2

    +

    设置root用户远程登录

    +

    设置配置文件,允许root用户远程登录。

    +

    升级前一天

    +

    2小时

    +

    3

    +

    备份数据

    +

    参考《管理员指南》中的“备份与恢复”章节进行。

    +

    升级前一天

    +

    备份数据量和方案不同,耗时也不同

    +

    4

    +

    获取并校验升级包

    +

    获取升级软件包,进行完整性校验。

    +

    升级前一天

    +

    0.5小时

    +

    5

    +

    健康检查

    +

    使用gs_checkos工具完成操作系统状态检查。

    +

    升级前一天

    +

    0.5小时

    +

    6

    +

    检查数据库节点磁盘使用率

    +

    使用df命令查看磁盘使用率。

    +

    升级前一天

    +

    0.5小时

    +

    7

    +

    检查数据库状态

    +

    使用gs_om工具完成数据库状态检查。

    +

    升级前一天

    +

    0.5小时

    +
    + + +>![](public_sys-resources/icon-note.gif) **说明:** +>“耗时”依不同环境(包括现场数据量、服务器性能等原因)会存在一定差异。 + +### 收集节点信息 + +联系数据库系统管理员,获取数据库涉及节点的节点名称、节点IP地址。节点的root、omm用户密码等环境信息。如[表1](#toc218487220)。 + +**表 1** 节点信息 + + + + + + + + + + + + + + + + + + +

    序号

    +

    节点名称

    +

    节点IP

    +

    root用户密码

    +

    omm用户密码

    +

    备注

    +

    1

    +

    -

    +

    -

    +

    -

    +

    -

    +

    -

    +
    + +### 备份数据 + +升级一旦失败,有可能会影响到业务的正常开展。提前备份数据,就可以在风险发生后,尽快的恢复业务。 + +请参考《管理员指南》中的“备份与恢复”章节,完成数据的备份。 + +### 获取升级包 + +[https://opengauss.org/zh/download.html](https://opengauss.org/zh/download.html)在该网站获取想要升级的升级包。 + +### 健康检查 + +通过gs\_checkos工具可以完成操作系统状态检查。 + +#### 前提条件 + +- 当前的硬件和网络环境正常。 +- 各主机间root互信状态正常。 +- 只能使用root用户执行gs\_checkos命令。 + +#### 操作步骤 + +1. 以root用户身份登录服务器。 +2. 执行如下命令对服务器的OS参数进行检查。 + + ``` + gs_checkos -i A + ``` + + 检查服务器的OS参数的目的是为了保证数据库正常通过预安装,并且在安装成功后可以安全高效的运行。详细的检查项目请参见《工具参考》中的“服务端工具 \> gs\_checkos”工具的“表1 操作系统检查项”。 + + +### 检查数据库节点磁盘使用率 + +建议数据库节点磁盘使用率低于80%时再执行升级操作。 + +### 检查数据库状态 + +本节介绍数据库状态查询的具体操作。 + +#### 验证步骤 + +1. 以数据库用户(如omm)登录节点,source环境变量。 +2. 执行如下命令查看数据库状态。 + + ``` + gs_om -t status + ``` + +3. 保证数据库状态正常。 + +## 升级操作 + +介绍就地升级和灰度升级的详细操作。 + +#### 操作步骤 + +1. 以root身份登录节点。 +2. 创建新包目录。 + + ``` + mkdir -p /opt/software/gaussdb_upgrade + ``` + +3. 将需要更新的新包上传至目录“/opt/software/gaussdb\_upgrade”并解压。 +4. 进入安装包解压出的script目录下: + + ``` + cd /opt/software/gaussdb_upgrade/script + ``` + +5. 在就地升级或灰度升级前执行前置脚本gs\_preinstall。 + + ``` + ./gs_preinstall -U omm -G dbgrp -X /opt/software/GaussDB_Kernel/clusterconfig.xml + ``` + +6. 切换至omm用户。 + + ``` + su - omm + ``` + +7. 数据库状态正常时,使用如下命令进行就地升级或者灰度升级。 + + 示例一:使用gs\_upgradectl脚本执行就地升级。 + + ``` + gs_upgradectl -t auto-upgrade -X /opt/software/GaussDB_Kernel/clusterconfig.xml + ``` + + 示例二:使用gs\_upgradectl脚本执行灰度升级。 + + ``` + gs_upgradectl -t auto-upgrade -X /opt/software/GaussDB_Kernel/clusterconfig.xml --grey + ``` + + +## 升级验证 + +本章介绍升级完成后的验证操作。给出验证的用例和详细操作步骤。 + + +### 验证项目的检查表 + +**表 1** 验证项目的检查表 + + + + + + + + + + + + + + + + + + + + + + + + +

    序号

    +

    验证项目

    +

    检查标准

    +

    检查结果

    +

    1

    +

    版本查询

    +

    查询升级后版本是否正确

    +

    -

    +

    2

    +

    健康检查

    +

    使用gs_checkos工具完成操作系统状态检查。

    +

    -

    +

    3

    +

    数据库状态

    +

    使用gs_om工具完成数据库状态检查。

    +

    -

    +
    + +### 升级版本查询 + +本节介绍版本查询的具体操作。 + +#### 验证步骤 + +1. 以数据库用户(如omm)登录节点,source环境变量。 +2. 执行如下命令查看所有节点的版本信息。 + + ``` + gs_ssh -c "gsql -V" + ``` + + +### 检查升级数据库状态 + +本节介绍数据库状态查询的具体操作。 + +#### 验证步骤 + +1. 以数据库用户(如omm)登录节点。 +2. 执行如下命令查看数据库状态。 + + ``` + gs_om -t status + ``` + + 查询结果的cluster\_state为Normal代表数据库正常。 + + +## 提交升级 + +升级完成后,如果验证也没问题。接下来就可以提交升级。 + +>![](public_sys-resources/icon-note.gif) **说明:** +>一旦提交操作完成,则不能再执行回滚操作。 + +#### 操作步骤 + +1. 以数据库用户(如omm)登录节点。 +2. 执行如下命令完成升级提交。 + + ``` + gs_upgradectl -t commit-upgrade -X /opt/software/GaussDB_Kernel/clusterconfig.xml + ``` + + +## 升级版本回滚 + +本章介绍版本回滚方法。 + +#### 操作步骤 + +1. 以数据库用户(如omm)登录节点。 +2. 执行如下命令完成版本回滚(回滚内核代码)。回滚完成,如果需要保持内核和om代码的版本一致,可以执行一下旧包的前置命令(参见[执行前置脚本gs\_preinstall。](#升级操作))。 + + ``` + gs_upgradectl -t auto-rollback -X /opt/software/GaussDB_Kernel/clusterconfig.xml + ``` + + >![](public_sys-resources/icon-note.gif) **说明:** + >- 如果数据库异常,需要强制回滚,可以使用如下命令。 + > ``` + > gs_upgradectl -t auto-rollback -X /opt/software/GaussDB_Kernel/clusterconfig.xml --force + > ``` + +3. 查看回滚之后的版本号。 + + ``` + gs_om -V | --version + ``` - 本文档介绍了升级、回滚流程以及具体的操作指导,同时提供了常见的问题解答及故障处理方法。 -**修改记录** +# 异常处理 -文档版本|发布日期|修改说明 ---|--|-- -01|2020-12-31|第一次正式发布 +如果升级失败,请按照如下方式进行处理: -[toc] +1. 排查是否有环境问题。 -# 1升级前必读 -## 1.1升级方案 + 如磁盘满、网络故障等,或者升级包、升级版本号是否正确。排除问题后,可以尝试重入升级。 - 本文指导用户根据openGauss提供的新特性和数据库现状,确定是否对现有系统进行升级。 - 当前支持的升级模式为就地升级。 - 就地升级:升级期间需要停止业务,一次性升级所有节点。 +2. 如果没有发现环境问题,或者重入升级失败,需要收集相关日志,找技术支持工程师定位。 -## 升级路径 + 收集日志命令: -> openGauss的升级路径如表1-1所示 - -表1-1升级路径 - -版本|升级说明 ---|-- -openGauss1.0.1版本及之前的版本|可以升级到1.0.1之前的任意版本 -openGauss1.0.1版本|升级到openGauss1.1.0之后的版本 - ->说明 - - 升级前的版本可以使用如下命令查看: - gsql -V | --version - -## 升级影响和约束 ->升级过程中需要注意以下事项 -- 升级操作不能和节点替换、扩容、缩容同时执行 -- 不支持虚拟IP -- 升级过程中,不允许对wal_level, max_connection, max_prepared_transactions, max_locks_per_transaction这四个GUC参数的值进行修改。如果修改,会导致回滚后,集群启动异常 -- 需要在数据库无业务的情况下进行升级 -- 升级前保证集群正常。可以通过gs_om -t status查询,查询结果的cluster_state为normal代表数据库正常 -- 升级前保证数据库互信正常 -- 升级前后,数据库的部署方式(配置文件)不能发生变化 -- 升级前要保证操作系统处于健康状态,通过gs_checkos工具可以完成操作系统状态检查 -- 数据库运行正常且主DN的数据完全同步到备DN -- 升级过程中不允许打开kerberos开关 -- 不要擅自修改安装包中解压出来的version.cfg文件 -- 如果升级过程中出现异常导致升级失败,需要用户手动执行回滚,必须回滚成功才能执行下一次升级 -- 升级回滚成功后,升级期间设置的GUC参数将失效,建议用户升级过程中尽量不要手动修改guc参数值,对修改的值在升级操作执行完成后建议进行double check -- 升级回滚成功后,若想执行其他om操作,需要保证om代码与内核代码版本一致。使用gsql -V(查询内核版本)和gs_om -V(查询om版本)进行查询对比 -- 升级成功后,若系统表相对于老版本新增了字段,使用\d命令查看表时,无法查看到新增字段,select表可以查到 -- 升级前需要设置guc参数enable_stream_replication值为on - -# 2 就地升级说明 - -## 2.1 就地升级流程 - ->升级流程执行时间估计 - -表2-1 升级流程执行时间估计 - -步骤|建议起始时间|耗时|业务中断时常|备注 ---|--|--|--|-- -就地升级前准备与检查|升级操作前|约2-3小时|对业务无影响|升级前检查和备份数据、获取软件包等操作 -就地升级操作|业务空闲期|耗时主要集中在数据库的启动和停止以及每个database的系统对象的修改。升级操作耗时一般不会超过30分钟|与操作时常一致,一般不会超过30分钟|依据指导书执行升级,该操作要求暂停业务 -就地升级验证|业务空闲期|约30分钟|与操作时常一致,约30分钟|- -提交升级|业务空闲期|提交升级耗时一般不超过10分钟|与操作时常一致,约10分钟|- -就地升级版本回滚|业务空闲期|版本回滚耗时一般不会超过30分钟|与操作时常一致,约30分钟|- - -- 说明 -``` -本文档中的描述的时间仅供参考,实际操作时间以现场情况为准 -``` - -## 2.2 就地升级前准备与检查 -### 2.2.1 升级前准备与检查清单 - -> 表2-2-1 升级前准备清单 - - -序号|升级准备项目|准备内容|建议起始时间|耗时 ---|--|--|--|-- -1|手机节点信息|收集到数据库涉及节点的名称、IP地址、root、omm用户密码等环境信息|升级前一天|1小时 -2|备份数据|提前备份数据|升级前一天|2小时 -3|获取升级包|获取待升级包|升级前一天|0.5小时 -4|健康检查|使用gs_checkos工具完成操作系统状态检查|升级前一天|0.5小时 -5|检查数据库节点磁盘使用率|查看磁盘使用率|升级前一天|0.5小时 -6|检查数据库状态|使用gs_om工具完成数据库状态检查|升级前|0.1小时 - - -### 2.2.2 收集节点信息 - -``` -联系数据库管理员,获取数据库涉及节点的名称、IP地址、root、omm用户密码等环境信息 -``` -### 2.2.3 备份数据 - -``` -升级一旦失败,有可能会影响到业务的正常开展。提前备份数据,就可以在风险发生后,尽快恢复业务 -``` -### 2.2.4 获取升级包 - -单机安装包超链接跳转获取安装包 -[安装包](https://opengauss.org/zh/download.html) - -### 2.2.5 健康检查 -```markdown -通过gs_checkos工具可以完成操作系统健康检查 -``` -> 前提条件 -- 当前的硬件和网络环境正常 -- 各主机间root互信状态正常 -- 只能使用root用户执行gs_checkos命令 -> 操作步骤 -- 步骤1 以root用户身份登录服务器 -- 步骤2 执行如下命令对服务器的OS参数进行检查 -```markdown -gs_checkos -i A -检查服务器的OS参数的目的是为了保证数据库正常通过预安装,并且在安装成功后可以安全高校的运行 -``` -### 2.2.6 检查数据库节点磁盘使用率 -```markdown -数据库节点磁盘使用率低于80%才可以执行升级 -``` - -### 2.2.7 检查数据库状态 -> 验证步骤 -- 步骤1 以数据库用户登录节点 -- 步骤2 执行如下命令查看数据库状态 -```markdown -gs_om -t status -``` -- 步骤3 保证数据库状态正常 - -## 2.3 执行就地升级操作 -> 操作步骤 -- 步骤1 以root用户登录节点 -- 步骤2 创建新包目录 -```markdown -mkdir -p /opt/software/gaussdb_upgrade -``` -- 步骤3 将待升级的安装包上传至目录"/opt/software/gaussdb_upgrade"并解压 -- 步骤4 进入安装包解压的script目录下 -```markdown -cd /opt/software/gaussdb_upgrade/script -``` -- 步骤5 执行前置脚本gs_preinstall -```markdown -./gs_preinstall -U omm -G dbgrp -X /opt/software/open_gauss/clusterconfig.xml -``` -- 步骤6 切换至omm用户 -```markdown -su - omm -``` -- 步骤7 数据库状态正常时,执行以下命令进行升级 -```markdown -gs_upgradectl -t auto-upgrade -X /opt/software/open_gauss/clusterconfig.xml -``` -## 2.4 就地升级验证 -### 2.4.1 验证项目的检查表 -> 表2-1 验证项目的检查表 - -序号|验证项目|检查标准 ---|--|-- -1|版本查询|查询升级后版本是否正确 -2|健康检查|使用gs_checkos工具完成操作系统状态检查 -3|数据库状态|使用gs_om工具完成数据库状态检查 - -### 2.4.2 就地升级版本查询 -> 操作步骤 -- 步骤1 以数据库用户登录节点 -- 步骤2 执行如下命令查看所有节点的内核版本信息 -``` -gs_ssh -c "gsql -V" -``` - -### 2.4.3 检查就地升级数据库状态 -> 验证步骤 -- 步骤1 以数据库用户登录节点 -- 步骤2 执行如下命令查看数据库状态 -```markdown -gs_om -t status -查询结果的cluster_state为Normal代表数据库状态正常 -``` - -## 2.5 提交升级 -- 说明 升级一旦提交,则不能执行回滚操作 -```markdown -升级完成后,如果验证没有问题,接下来就可以提交升级 -``` -> 操作步骤 -- 步骤1 以数据库用户登录节点 -- 步骤2 执行如下命令完成升级提交 -```markdown -gs_upgradectl -t commit-upgrade -X /opt/software/open_gauss/clusterconfig.xml -``` - -## 2.6 就地升级版本回滚 -> 操作步骤 -- 步骤1 以数据库用户登录节点 -- 步骤2 执行如下命令完成内核版本回滚 -```markdown -gs_upgradectl -t auto-rollback -X /opt/software/open_gauss/clusterconfig.xml -如果数据库状态异常,可以使用强制回滚命令 -gs_upgradectl -t auto-rollback -X /opt/software/open_gauss/clusterconfig.xml --force -``` -- 步骤3 回滚om版本 -```markdown -为了保持内核与om版本一致,需要执行以下旧包的前置命令 -``` -- 步骤4 检查回滚后的内核与om版本 -```markdown -om版本:gs_om -V | --version -内核版本:gsql -V | --version -``` -# 3 异常处理 -```markdown -如果升级失败,请按照如下方式进行处理 -``` -- 步骤1 检查是否有环境问题 -```markdown -如磁盘满、网络故障等,或者升级包不正确,排查问题后,可以进行升级重入操作 -``` -- 步骤2 如果没有发现环境问题,或者重入升级失败,需要收集相关日志,找技术支持工程师定位。 -```markdown -收集日志命令 -gs_collector --begin-time='20201231 00:00' --end-time='20201231 12:00' -如果条件允许,建议保留环境 -``` + gs\_collector --begin-time='_20200724 00:00_' --end-time='_20200725 00:00_' + 如果条件允许,建议保留环境。 diff --git "a/content/zh/post/tangzuliang/MogDB OpenGauss\346\225\260\346\215\256\345\272\223\344\270\255\351\200\232\350\277\207\345\217\202\346\225\260\346\216\247\345\210\266\346\212\223\345\217\226\346\205\242sql.md" "b/content/zh/post/tangzuliang/MogDB OpenGauss\346\225\260\346\215\256\345\272\223\344\270\255\351\200\232\350\277\207\345\217\202\346\225\260\346\216\247\345\210\266\346\212\223\345\217\226\346\205\242sql.md" new file mode 100644 index 0000000000000000000000000000000000000000..f241bbad41b5e791cf8b5eec3a4c8927015f4017 --- /dev/null +++ "b/content/zh/post/tangzuliang/MogDB OpenGauss\346\225\260\346\215\256\345\272\223\344\270\255\351\200\232\350\277\207\345\217\202\346\225\260\346\216\247\345\210\266\346\212\223\345\217\226\346\205\242sql.md" @@ -0,0 +1,59 @@ ++++ + +title = "MogDB/OpenGauss数据库中通过参数控制抓取慢sql" + +date = "2022-04-14" + +tags = ["MogDB/OpenGauss数据库中通过参数控制抓取慢sql"] + +archives = "2022-04" + +author = "唐祖亮" + +summary = "MogDB/OpenGauss数据库中通过参数控制抓取慢sql" + +img = "/zh/post/tangzuliang/title/img6.png" + +times = "10:20" ++++ + +# MogDB/OpenGauss数据库中通过参数控制抓取慢sql + +本文出处:https://www.modb.pro/db/221556 + +
    + +mogdb数据库中可以通过打开相应的参数抓取慢sql,该参数为log_min_duratuion_statement。 + +**log_min_duration_statement** +**参数说明:** 当某条语句的持续时间大于或者等于特定的毫秒数时,log_min_duration_statement参数用于控制记录每条完成语句的持续时间。 +设置log_min_duration_statement可以很方便地跟踪需要优化的查询语句。对于使用扩展查询协议的客户端,语法分析、绑定、执行每一步所花时间被独立记录。 +指定该参数的值可以设置慢sql的抓取阈值,例如: + +``` +gs_ctl reload -I all -N all -c"log_min_duratuion_statement=20ms" +``` + +该语句表示把集群内所有节点的log_min_duratuion_statement参数都设置为20ms,这时候执行时间超过20ms的sql都被定义为慢sql,并被记录到dbe_perf.statement_history这个表中。 + +![image.png](../images/20211223-de236193-6d32-4d76-bd4f-974ce8d215b9.png) + +![image.png](../images/20211223-48387a14-605d-450f-a26d-f4cd79e1c77a.png) +该表会记录sql的详细信息,执行时间,cpu时间,解析时间等等,需要注意的是该表只在主库可读,备库没有该表。该表中的信息保留时间默认为7天,保留时间收参数track_stmt_retention_time的影响。 + +**track_stmt_retention_time** +**参数说明:** 组合参数,控制全量/慢SQL记录的保留时间。以60秒为周期读取该参数,并执行清理超过保留时间的记录,仅sysadmin用户可以访问。 +该参数属于SIGHUP类型参数,请参考表1中对应设置方法进行设置。 +**取值范围:** 字符型 +该参数分为两部分,形式为’full sql retention time, slow sql retention time’ +full sql retention time为全量SQL保留时间,取值范围为0 ~ 86400 +slow sql retention time为慢SQL的保留时间,取值范围为0 ~ 604800 +**默认值:** 3600,604800 + +该参数的值单位为秒,全量sql的保留时间默认为一小时,慢sql默认保留七天,如果慢sql的量比较大,建议修改慢sql的保留时间为两天或者一天。 + +``` +gs_guc set -I all -N all -c"track_stmt_retention_time='3600,172800'" +``` + +如上语句为设置全量sql保留1小时,慢sql保留两天。 diff --git a/content/zh/post/tangzuliang/images/20211223-48387a14-605d-450f-a26d-f4cd79e1c77a.png b/content/zh/post/tangzuliang/images/20211223-48387a14-605d-450f-a26d-f4cd79e1c77a.png new file mode 100644 index 0000000000000000000000000000000000000000..34db779334a61c4e691208769ad6443e7446dfde Binary files /dev/null and b/content/zh/post/tangzuliang/images/20211223-48387a14-605d-450f-a26d-f4cd79e1c77a.png differ diff --git a/content/zh/post/tangzuliang/images/20211223-de236193-6d32-4d76-bd4f-974ce8d215b9.png b/content/zh/post/tangzuliang/images/20211223-de236193-6d32-4d76-bd4f-974ce8d215b9.png new file mode 100644 index 0000000000000000000000000000000000000000..ba365b852db4bd6c5de5c9d4d40b898c124fbeda Binary files /dev/null and b/content/zh/post/tangzuliang/images/20211223-de236193-6d32-4d76-bd4f-974ce8d215b9.png differ diff --git a/content/zh/post/totaj/images/chameleon_create_database.png b/content/zh/post/totaj/images/chameleon_create_database.png new file mode 100644 index 0000000000000000000000000000000000000000..e8f4da03dba81f84f91734c0f921c0d1a3319417 Binary files /dev/null and b/content/zh/post/totaj/images/chameleon_create_database.png differ diff --git a/content/zh/post/totaj/images/chameleon_create_mysql_user.png b/content/zh/post/totaj/images/chameleon_create_mysql_user.png new file mode 100644 index 0000000000000000000000000000000000000000..58ce782e10725bdd4897b464d62606e448905039 Binary files /dev/null and b/content/zh/post/totaj/images/chameleon_create_mysql_user.png differ diff --git a/content/zh/post/totaj/images/chameleon_init_replica.png b/content/zh/post/totaj/images/chameleon_init_replica.png new file mode 100644 index 0000000000000000000000000000000000000000..4f9d7982fefdc947b5636ca9d91cb1e171206ce1 Binary files /dev/null and b/content/zh/post/totaj/images/chameleon_init_replica.png differ diff --git a/content/zh/post/totaj/images/chameleon_mysql_start_replica.png b/content/zh/post/totaj/images/chameleon_mysql_start_replica.png new file mode 100644 index 0000000000000000000000000000000000000000..255f95d49479404d1a9b1386cb1cf633dd014eca Binary files /dev/null and b/content/zh/post/totaj/images/chameleon_mysql_start_replica.png differ diff --git a/content/zh/post/totaj/images/chameleon_start_replica.png b/content/zh/post/totaj/images/chameleon_start_replica.png new file mode 100644 index 0000000000000000000000000000000000000000..f915816df32c3b6e159e9cc98a759a8eaf4e0b40 Binary files /dev/null and b/content/zh/post/totaj/images/chameleon_start_replica.png differ diff --git a/content/zh/post/totaj/images/connect_failed.png b/content/zh/post/totaj/images/connect_failed.png new file mode 100644 index 0000000000000000000000000000000000000000..9556350434dbc078d00e8e8b81acf69f147a7d24 Binary files /dev/null and b/content/zh/post/totaj/images/connect_failed.png differ diff --git a/content/zh/post/totaj/images/create_database.png b/content/zh/post/totaj/images/create_database.png new file mode 100644 index 0000000000000000000000000000000000000000..40b9f7e011e53b7ae30f016dba638c6d435439e3 Binary files /dev/null and b/content/zh/post/totaj/images/create_database.png differ diff --git a/content/zh/post/totaj/images/docker_run.png b/content/zh/post/totaj/images/docker_run.png new file mode 100644 index 0000000000000000000000000000000000000000..0e1fb1807b4cb6269977c413391c9fc87a490e02 Binary files /dev/null and b/content/zh/post/totaj/images/docker_run.png differ diff --git a/content/zh/post/totaj/images/opengauss.loader.png b/content/zh/post/totaj/images/opengauss.loader.png new file mode 100644 index 0000000000000000000000000000000000000000..59b2ccd7786f7e547629a83c414a99cb62bcf2fa Binary files /dev/null and b/content/zh/post/totaj/images/opengauss.loader.png differ diff --git a/content/zh/post/totaj/images/opengauss_results.png b/content/zh/post/totaj/images/opengauss_results.png new file mode 100644 index 0000000000000000000000000000000000000000..8fa0a7245fb1245f37e2e0c03593b4dd869b5f21 Binary files /dev/null and b/content/zh/post/totaj/images/opengauss_results.png differ diff --git a/content/zh/post/totaj/images/pgloder.png b/content/zh/post/totaj/images/pgloder.png new file mode 100644 index 0000000000000000000000000000000000000000..fd14b3dcdcb6b67b44159f282cf02de8f00d4d34 Binary files /dev/null and b/content/zh/post/totaj/images/pgloder.png differ diff --git a/content/zh/post/totaj/title/img.png b/content/zh/post/totaj/title/img.png new file mode 100644 index 0000000000000000000000000000000000000000..86a420b92fb8289658d807d49f137b6d13862f6d Binary files /dev/null and b/content/zh/post/totaj/title/img.png differ diff --git "a/content/zh/post/totaj/\345\246\202\344\275\225\344\275\277\347\224\250pg_chameleon\350\277\201\347\247\273MySQL\346\225\260\346\215\256\345\272\223\350\207\263openGauss.md" "b/content/zh/post/totaj/\345\246\202\344\275\225\344\275\277\347\224\250pg_chameleon\350\277\201\347\247\273MySQL\346\225\260\346\215\256\345\272\223\350\207\263openGauss.md" new file mode 100644 index 0000000000000000000000000000000000000000..e1da67136ab4eae41969a3bf0e66b693a976b97d --- /dev/null +++ "b/content/zh/post/totaj/\345\246\202\344\275\225\344\275\277\347\224\250pg_chameleon\350\277\201\347\247\273MySQL\346\225\260\346\215\256\345\272\223\350\207\263openGauss.md" @@ -0,0 +1,207 @@ ++++ +title = "如何使用pg_chameleon迁移MySQL数据库至openGauss" +date = "2021-06-16" +tags = ["openGauss数据迁移"] +archives = "2021-06" +author = "彭炯" +summary = "如何使用pg_chameleon迁移MySQL数据库至openGauss" +img = "/zh/post/totaj/title/img.png" +times = "17:30" ++++ + +# pg\_chameleon介绍 + +pg\_chameleon是一个用Python 3编写的实时复制工具,经过内部适配,目前支持MySQL迁移到openGauss。工具使用mysql-replication库从MySQL中提取rowimages,这些rowimages将以jsonb格式被存储到openGauss中。在openGauss中会执行一个pl/pgsql函数,解码jsonb并将更改重演到openGauss。同时,工具通过一次初始化配置,使用只读模式,将MySQL的全量数据拉取到openGauss,使得该工具提供了初始全量数据的复制以及后续增量数据的实时在线复制功能。pg\_chameleon的特色包括: + +- 通过读取MySQL的binlog,提供实时在线复制的功能。 + +- 支持从多个MySQL schema读取数据,并将其恢复到目标openGauss数据库中。源schema和目标schema可以使用不同的名称。 + +- 通过守护进程实现实时复制,包含两个子进程,一个负责读取MySQL侧的日志,一个负责在openGauss侧重演变更。 + +使用pg\_chameleon将MySQL数据库迁移至openGauss,通过pg\_chameleon的实时复制能力,可大大降低系统切换数据库时的停服时间。 + +# pg\_chameleon在openGauss上的使用注意事项 + +1. pg\_chameleon依赖psycopg2,psycopg2内部通过pg\_config检查PostgreSQL版本号,限制低版本PostgreSQL使用该驱动。而openGauss的pg\_config返回的是openGauss的版本号(当前是 openGauss 2.0.0),会导致该驱动报版本错误,"Psycopg requires PostgreSQL client library (libpq) >= 9.1"。解决方案为通过源码编译使用psycopg2,并去掉源码头文件 **psycopg/psycopg.h** 中的相关限制。 + +2. pg\_chameleon通过设置 **LOCK\_TIMEOUT** GUC参数限制在PostgreSQL中的等锁的超时时间。openGauss不支持该参数(openGauss支持类似的GUC参数 **lockwait\_timeout** ,但是需要管理员权限设置)。需要将pg\_chameleon源码中的相关设置去掉。 + +3. pg\_chameleon用到了upsert语法,用来指定发生违反约束时的替换动作。openGauss支持的upsert功能语法与PostgreSQL的语法不同。openGauss的语法是 **ON DUPLICATE KEY UPDATE { column\_name = { expression | DEFAULT } } [, ...]**。PostgreSQL的语法是 **ON CONFLICT [conflict\_target] DO UPDATE SET { column\_name = { expression | DEFAULT } }**。两者在功能和语法上略有差异。需要修改pg\_chameleon源码中相关的upsert语句。 + +4. pg\_chameleon用到了CREATE SCHEMA IF NOT EXISTS、CREATE INDEX IF NOT EXISTS语法。openGauss不支持SCHEMA和INDEX的IF NOT EXISTS选项。需要修改成先判断SCHEMA和INDEX是否存在,然后再创建的逻辑。 + +5. penGauss对于数组的范围选择,使用的是 column\_name[start, end] 的方式。而PostgreSQL使用的是 column\_name[start : end] 的方式。需要修改pg\_chameleon源码中关于数组的范围选择方式。 + +6. pg\_chameleon使用了继承表(INHERITS)功能,而当前openGauss不支持继承表。需要改写使用到继承表的SQL语句和表。 + +接下来我们将演示如何使用pg\_chameleon迁移MySQL数据库至openGauss。 + +# 配置pg\_chameleon + +pg\_chameleon通过 **~/.pg\_chameleon/configuration** 下的配置文件config-example.yaml定义迁移过程中的各项配置。整个配置文件大约分成四个部分,分别是全局设置、类型重载、目标数据库连接设置、源数据库设置。全局设置主要定义log文件路径、log等级等。类型重载让用户可以自定义类型转换规则,允许用户覆盖已有的默认转换规则。目标数据库连接设置用于配置连接至openGauss的连接参数。源数据库设置定义连接至MySQL的连接参数以及其他复制过程中的可配置项目。 + +详细的配置项解读,可查看官网的说明: + +[https://pgchameleon.org/documents\_v2/configuration\_file.html](https://pgchameleon.org/documents_v2/configuration_file.html) + +下面是一份配置文件示例: +``` +# global settings +pid_dir: '~/.pg_chameleon/pid/' +log_dir: '~/.pg_chameleon/logs/' +log_dest: file +log_level: info +log_days_keep: 10 +rollbar_key: '' +rollbar_env: '' + +# type_override allows the user to override the default type conversion +# into a different one. +type_override: +"tinyint(1)": + override_to: boolean + override_tables: + - "*" + +# postgres destination connection +pg_conn: + host: "1.1.1.1" + port: "5432" + user: "opengauss_test" + password: "password_123" + database: "opengauss_database" + charset: "utf8" + +sources: + mysql: + db_conn: + host: "1.1.1.1" + port: "3306" + user: "mysql_test" + password: "password123" + charset: 'utf8' + connect_timeout: 10 + schema_mappings: + mysql_database:sch_mysql_database + limit_tables: + skip_tables: + grant_select_to: + - usr_migration + lock_timeout: "120s" + my_server_id: 1 + replica_batch_size: 10000 + replay_max_rows: 10000 + batch_retention: '1 day' + copy_max_memory: "300M" + copy_mode: 'file' + out_dir: /tmp + sleep_loop: 1 + on_error_replay: continue + on_error_read: continue + auto_maintenance: "disabled" + gtid_enable: false + type: mysql +keep_existing_schema: No +``` +以上配置文件的含义是,迁移数据时,MySQL侧使用的用户名密码分别是 **mysql\_test** 和 **password123** 。MySQL服务器的IP和port分别是 **1.1.1.1** 和 **3306**,待迁移的数据库是 **mysql\_database** 。 + +openGauss侧使用的用户名密码分别是 **opengauss\_test** 和 **password\_123** 。openGauss服务器的IP和port分别是 **1.1.1.1** 和 **5432** ,目标数据库是 **opengauss\_database**,同时会在 **opengauss\_database**下创建 **sch\_mysql\_database** schema,迁移的表都将位于该schema下。 + +需要注意的是,这里使用的用户需要有远程连接MySQL和openGauss的权限,以及对对应数据库的读写权限。同时对于openGauss,运行pg\_chameleon所在的机器需要在openGauss的远程访问白名单中。对于MySQL,用户还需要有RELOAD、REPLICATION CLIENT、REPLICATION SLAVE的权限。 + +下面开始介绍整个迁移的步骤。 + +# 创建用户及database + +在openGauss侧创建迁移时需要用到的用户以及database。 + +![](../images/chameleon_create_database.png) + +在MySQL侧创建迁移时需要用到的用户并赋予相关权限。 + +![](../images/chameleon_create_mysql_user.png) + +# 开启MySQL的复制功能 + +修改MySQL的配置文件,一般是/etc/my.cnf或者是 /etc/my.cnf.d/ 文件夹下的cnf配置文件。在[mysqld] 配置块下修改如下配置(若没有mysqld配置块,新增即可): +``` +[mysqld] +binlog_format= ROW +log_bin = mysql-bin +server_id = 1 +binlog_row_image=FULL +expire_logs_days = 10 +``` +修改完毕后需要重启MySQL使配置生效。 + +# 运行pg\_chameleon进行数据迁移 + +创建python虚拟环境并激活: + + **python3 -m venv venv** + **source venv/bin/activate** + +下载安装psycopg2和pg\_chameleon: + +更新pip: **pip install pip --upgrade** + +将openGauss的 pg\_config 工具所在文件夹加入到 $PATH 环境变量中。例如: + + **export PATH={openGauss-server}/dest/bin:$PATH** + +下载psycopg2源码(https://github.com/psycopg/psycopg2 ),去掉检查PostgreSQL版本的限制,使用 **python setup.py install** 编译安装。 + +下载pg\_chameleon源码(https://github.com/the4thdoctor/pg_chameleon ),修改前面提到的在openGauss上的问题,使用 **python setup.py install** 编译安装。 + +创建pg\_chameleon配置文件目录: + + **chameleon set\_configuration\_files** + +修改pg\_chameleon配置文件: + + **cd ~/.pg\_chameleon/configuration** + + **cp config-example.yml default.yml** + +根据实际情况修改 default.yml 文件中的内容。重点修改pg\_conn和mysql中的连接配置信息,用户信息,数据库信息,schema映射关系。前面已给出一份配置文件示例供参考。 + +初始化复制流: + +**chameleon create\_replica\_schema --config default** + +**chameleon add\_source --config default --source mysql** + +此步骤将在openGauss侧创建用于复制过程的辅助schema和表。 + +复制基础数据: + +**chameleon init\_replica --config default --source mysql** + +做完此步骤后,将把MySQL当前的全量数据复制到openGauss。 + +可以在openGauss侧查看全量数据复制后的情况。 + +![](../images/chameleon_init_replica.png) + +开启在线实时复制: + +**chameleon start\_replica --config default --source mysql** + +开启实时复制后,在MySQL侧插入一条数据: + +![](../images/chameleon_mysql_start_replica.png) + +在openGauss侧查看 test\_decimal 表的数据: + +![](../images/chameleon_start_replica.png) + +可以看到新插入的数据在openGauss侧成功被复制过来了。 + +停止在线复制: + +**chameleon stop\_replica --config default --source mysql** + +**chameleon detach\_replica --config default --source mysql** + +**chameleon drop\_replica\_schema --config default** \ No newline at end of file diff --git "a/content/zh/post/totaj/\345\246\202\344\275\225\344\275\277\347\224\250pgloader\350\277\201\347\247\273MySQL\346\225\260\346\215\256\345\272\223\350\207\263openGauss.md" "b/content/zh/post/totaj/\345\246\202\344\275\225\344\275\277\347\224\250pgloader\350\277\201\347\247\273MySQL\346\225\260\346\215\256\345\272\223\350\207\263openGauss.md" new file mode 100644 index 0000000000000000000000000000000000000000..3b2694f72ec08000f8e097b28482a7535b48d1de --- /dev/null +++ "b/content/zh/post/totaj/\345\246\202\344\275\225\344\275\277\347\224\250pgloader\350\277\201\347\247\273MySQL\346\225\260\346\215\256\345\272\223\350\207\263openGauss.md" @@ -0,0 +1,128 @@ ++++ +title = "如何使用pgloader迁移MySQL数据库至openGauss" +date = "2021-06-15" +tags = ["openGauss数据迁移"] +archives = "2021-06" +author = "彭炯" +summary = "如何使用pgloader迁移MySQL数据库至openGauss" +img = "/zh/post/totaj/title/img.png" +times = "11:30" ++++ + +# pgloader介绍 + +pgloader是一个数据导入工具,使用COPY命令将数据导入到PostgreSQL。pgloader有两种工作模式,一种是从文件导入,一种是迁移数据库。pgloader在两种情况下都使用PostgreSQL的COPY协议高效的传输数据。 + +openGauss兼容PostgreSQL的通信协议以及绝大部分语法,可使用pgloader将MySQL数据库迁移至openGauss。 + +# pgloader在openGauss上的问题 + +由于openGauss 对原生PostgreSQL的通信协议进行了安全加固,这导致与PostgreSQL的默认通信协议互相不兼容了,因此,使用pgloader的PostgreSQL原生版本默认是不能连接openGauss的。会报类似下述错误: + +![](../images/connect_failed.png) + +处理方式是通过修改GUC进行规避,涉及的GUC参数是password\_encryption\_type,PostgreSQL默认的加密方式是md5,由于md5已经不安全了,为了提高openGauss的安全能力,openGauss支持sha256, 并且默认是sha256的加密方式,这就导致了上述报错。但是openGauss并没有删除md5的加密和验证逻辑,因此,是可以通过修改该GUC参数开启md5加密方式的。 + +开启方法: +```shell + gs_guc reload -D $PGDATA -c "password_encryption_type = 1" +``` + +**一定要在设置完上述参数后,再新建用户。** 然后就可以使用该新建用户登录数据库了。 + +接下来我们将演示如何使用pgloader迁移MySQL数据库至openGauss。 + +# 安装pgloader + +您可以直接从 [apt.postgresql.org](https://wiki.postgresql.org/wiki/Apt) 和官方 debian 存储库 [packages.debian.org/pgloader](https://packages.debian.org/search?keywords=pgloader) 安装 pgloader。 +```shell +$ apt-get install pgloader +``` + +同时,您也可以通过 docker image 使用pgloader。 +```shell +$ docker pull dimitri/pgloader +$ docker run --rm --name pgloader dimitri/pgloader:latest pgloader --version +$ docker run --rm --name pgloader dimitri/pgloader:latest pgloader --help +``` + +# 配置pgloader + +pgloader提供丰富的配置项,您可以自由定义迁移时的各类动作,如通过include drop,删除目标数据库中名称出现在MySQL数据库中的所有表,以允许连续多次使用同一命令,从干净的环境自动启动。 + +这里简单介绍几个常用的配置项。 + +**FROM** :源数据库的连接URL,格式如下: +``` +mysql://[user[:password]@][netloc][:port][/dbname][?option=value&...] +``` +**INTO** :目标数据库的连接URL,格式如下: +``` +postgresql://[user[:password]@][netloc][:port][/dbname][?option=value&...] +``` +**WITH** :从MySQL数据库加载时的选项。有 **include drop、create tables、create indexes** 等选项。 +**CAST** :用户自定义类型转换规则。允许用户覆盖已有的默认转换规则或者使用特殊情况修改它们。 + +部分迁移:用户可以通过 **including only table names matching** 和 **excluding table names matching** 实现只迁移特定的表或者在迁移过程中排除特定的表。 + +详细的配置项解读,可查看官网的说明: + +[https://pgloader.readthedocs.io/en/latest/ref/mysql.html](https://pgloader.readthedocs.io/en/latest/ref/mysql.html) + +下面是一份从MySQL迁移到openGauss的配置文件示例: + +``` +LOAD DATABASE +FROM mysql://mysql_test:password123@1.1.1.1:3306/mysql_database +INTO postgresql://opengauss_test:password_123@1.1.1.1:5432/opengauss_database +WITH include drop, create tables, create indexes, reset no sequences, + workers = 8, concurrency = 1, + multiple readers per thread, rows per range = 50000 +CAST +type varchar when(= 1 precision) to "boolean" drop typemod keep default keep not null; +``` +以上配置文件的含义是,迁移数据时,MySQL侧使用的用户名密码分别是 **mysql\_test** 和 **password123** 。MySQL服务器的IP和port分别是 **1.1.1.1** 和 **3306** ,待迁移的数据库是 **mysql\_database** 。 + +openGauss侧使用的用户名密码分别是 **opengauss\_test** 和 **password\_123** 。openGauss服务器的IP和port分别是 **1.1.1.1** 和 **5432** ,目标数据库是 **opengauss\_database** 。 + +需要注意的是,这里使用的用户需要有远程连接MySQL和openGauss的权限,以及对对应数据库的读写权限。同时对于openGauss,运行pgloader所在的机器需要在openGauss的远程访问白名单中。 + +# 创建用户及database + +在openGauss侧创建迁移时需要用到的用户以及database。 + +![](../images/create_database.png) + +# 运行pgloader进行数据迁移 + +以下演示基于使用docker image方式安装的pgloader。将前面准备好的配置文件命名为 openGauss.loader。 + +![](../images/opengauss.loader.png) + +启动docker: +```shell +docker run -tid --name pgloader_test dimitri/pgloader +``` + +复制配置文件到docker: +```shell +docker cp ./openGauss.loader pgloader_test:/ +``` + +进入docker环境: +```shell +docker exec -it pgloader_test /bin/bash +``` + +![](../images/docker_run.png) + +启动pgloader,等待数据迁移完成,查看迁移结果报告: +```shell +pgloader openGauss.loader +``` + +![](../images/pgloder.png) + +在openGauss侧查看迁移结果: + +![](../images/opengauss_results.png) \ No newline at end of file diff --git "a/content/zh/post/totaj/\345\246\202\344\275\225\350\267\221\345\242\236\351\207\217\344\273\243\347\240\201\350\246\206\347\233\226\347\216\207.md" "b/content/zh/post/totaj/\345\246\202\344\275\225\350\267\221\345\242\236\351\207\217\344\273\243\347\240\201\350\246\206\347\233\226\347\216\207.md" new file mode 100644 index 0000000000000000000000000000000000000000..ca39a61b817f6c7dbae525d091908bdce9855599 --- /dev/null +++ "b/content/zh/post/totaj/\345\246\202\344\275\225\350\267\221\345\242\236\351\207\217\344\273\243\347\240\201\350\246\206\347\233\226\347\216\207.md" @@ -0,0 +1,99 @@ ++++ +title = "如何跑增量代码覆盖率" +date = "2022-05-05" +tags = ["覆盖率"] +archives = "2022-05" +author = "彭炯" +summary = "如何跑增量代码覆盖率" +img = "/zh/post/totaj/title/img.png" +times = "17:30" ++++ + +全量代码覆盖率统计: +1. 下载安装lcov工具(版本 >= 1.14), http://ltp.sourceforge.net/coverage/lcov.php +```shell +[pengjiong@localhost ~]$ lcov -v +lcov: LCOV version 1.14 +``` +2. 增加插桩函数用于收集结果:在代码根目录下执行: +```shell +sed -i '/NotifyProcessActive();/i __gcov_flush();' src/gausskernel/process/postmaster/postmaster.cpp +sed -i 'N;256 i extern "C" void __gcov_flush();' src/gausskernel/process/postmaster/postmaster.cpp +``` +执行完可以git diff src/gausskernel/process/postmaster/postmaster.cpp查看插桩是否成功。 +```shell +[pengjiong@localhost openGauss-server]$ git diff src/gausskernel/process/postmaster/postmaster.cpp +diff --git a/src/gausskernel/process/postmaster/postmaster.cpp b/src/gausskernel/process/postmaster/postmaster.cpp +index ea2c611f2..61d044431 100755 +--- a/src/gausskernel/process/postmaster/postmaster.cpp ++++ b/src/gausskernel/process/postmaster/postmaster.cpp +@@ -252,6 +252,7 @@ static bool isNeedGetLCName = true; + + #define IS_FD_TO_RECV_GSSOCK(fd) \ + ((fd) == t_thrd.postmaster_cxt.sock_for_libcomm || (fd) == t_thrd.libpq_cxt.listen_fd_for_recv_flow_ctrl) ++extern "C" void __gcov_flush(); + + /* These two are only here before of the SSL multithread initialization of OpenSSL component */ + #include "ssl/gs_openssl_client.h" +@@ -4546,6 +4547,7 @@ static void pmdie(SIGNAL_ARGS) + break; + } + ++__gcov_flush(); + NotifyProcessActive(); + + gs_signal_setmask(&t_thrd.libpq_cxt.UnBlockSig, NULL); +``` +3. configure时增加编译选项:标红的是必须要新增的选项(非标红项请根据实际情况调整) +./configure CC=g++ CFLAGS='-O0 -fprofile-arcs -ftest-coverage' --prefix=/XXX --enable-debug --enable-cassert --enable-thread-safety --without-readline --without-zlib --gcc-version=7.3.0 --3rd=/XXX LDFLAGS='-lgcov' +4. 正常执行make && make install +5. 然后正常执行fastcheck或者其他LLT用例即可 +6. 用例运行完后,执行 gs_ctl stop -D xxx 停止gaussdb,开始准备收集覆盖率信息 +7. 在代码根目录下执行: +```shell +lcov --capture --directory . --output-file coverage.info +``` +然后会在根目录生成coverage.info文件 +```shell +[pengjiong@localhost openGauss-server]$ ll coverage.info +-rw-------. 1 pengjiong pengjiong 20M Apr 26 16:31 coverage.info +``` +8. 编辑coverage.info文件,找到/xxx/src/test/whitebox/knl_whitebox_test.cpp这一行,将其修改为文件的正确路径 +9. 接下来,使用生成的info文件转换为可视化的html文件,在代码根目录执行: +```shell +genhtml --no-prefix --no-sort coverage.info -o results +``` +10. 执行成功显示覆盖率如下: +``` +Writing directory view page. +Overall coverage rate: + lines......: 15.1% (144084 of 957019 lines) + functions..: 18.9% (12059 of 63944 functions) +``` +11. 同时在根目录下生成results文件夹,将results文件夹下载到windows上,用浏览器打开results目录下的index.html文件,即可看到此次覆盖率的全部情况。也可以点击文件夹或者文件查看到具体文件,具体函数的覆盖率。 + +增量代码覆盖率统计: +在全量代码覆盖率结果的基础上,通过增量代码的diff文件,生成增量代码覆盖率。 +1. 得到基线代码和修改后的新代码的diff文件,建议拉取两份完全干净的代码进行比较,以免其他文件影响最终结果。 +``` +diff -r -N -x ".git" -x "*.gcov" -u /home/workspace/openGauss-server_base /home/workspace/openGauss-serveer_increment >> diff.txt +``` +-r 表示递归,子目录也产生输出 +-N 文件不存在当做空文件,比如新版本增加了一个文件,此选项会将每一行都输出 +-x 表示排除,比如.git不需要 +-u 是lcov需要的格式 +2. 由全量覆盖率结果(coverage.info)和diff文件,生成增量覆盖率结果。addlcov工具下载地址:https://github.com/Dragonliu2018/addlcov +``` +addlcov --rc lcov_branch_coverage=1 --diff coverage.info diff.txt -o increment.info --strip $dep --path $new_Addr +``` +coverage.info:前面得到的全量覆盖率文件路径 +increment.info:输出的增量覆盖率文件路径 +$dep:$new_Addr代码路径深度,即分隔符”/”的个数(包括结尾的),可通过以下命令获取 +```shell +dep=`echo $new_Addr |sed -e 's/\/*$//g' | awk -F"/" '{print NF}'` +``` +注意,所有文件路径全部使用绝对路径,如果coverage.info文件中部分文件在增量修改后被删除了,在coverage.info里面把那一行删除 +3. 收集增量覆盖率结果,同全量的操作一样: +```shell +genhtml --rc lcov_branch_coverage=1 --no-prefix --no-sort increment.info -o incremen_results +``` \ No newline at end of file diff --git "a/content/zh/post/tracy/MogDB\346\225\260\346\215\256\345\272\223\345\257\274\345\207\272\346\214\207\345\256\232schema\346\225\260\346\215\256\345\271\266\346\201\242\345\244\215\345\210\260\345\205\266\344\273\226\346\225\260\346\215\256\345\272\223.md" "b/content/zh/post/tracy/MogDB\346\225\260\346\215\256\345\272\223\345\257\274\345\207\272\346\214\207\345\256\232schema\346\225\260\346\215\256\345\271\266\346\201\242\345\244\215\345\210\260\345\205\266\344\273\226\346\225\260\346\215\256\345\272\223.md" new file mode 100644 index 0000000000000000000000000000000000000000..e3637be4b38b897fa467a7fd4b2a2d1325070f89 --- /dev/null +++ "b/content/zh/post/tracy/MogDB\346\225\260\346\215\256\345\272\223\345\257\274\345\207\272\346\214\207\345\256\232schema\346\225\260\346\215\256\345\271\266\346\201\242\345\244\215\345\210\260\345\205\266\344\273\226\346\225\260\346\215\256\345\272\223.md" @@ -0,0 +1,104 @@ ++++ + +title = "MogDB数据库导出指定schema数据并恢复到其他数据库" + +date = "2022-04-12" + +tags = ["MogDB数据库导出指定schema数据并恢复到其他数据库"] + +archives = "2022-04" + +author = "tracy" + +summary = "MogDB数据库导出指定schema数据并恢复到其他数据库" + +img = "/zh/post/tracy/title/img20.png" + +times = "10:20" ++++ + +# MogDB数据库导出指定schema数据并恢复到其他数据库 + +本文出处:https://www.modb.pro/db/76290 + + + +## 1.环境概述 + +MogDB版本:1.1.0 +操作系统版本:Centos7.6 + +## 2.备份及恢复步骤 + +### 2.1. 备份OA业务数据 + +**1.omm用户,使用gs_dump备份数据:** + +```sql +–执行命令前,先确认omm用户对目录/opt/dump_oa/具有写权限 +$ ll /opt/|grep dump +drwxr-xr-x 2 omm dbgrp 4096 May 28 15:16 dump_oa +–执行如下命令进行数据导出: +$ nohup gs_dump mogdb -n oa -n schema2 -F c -f /opt/dump_oa/schema_2.dmp > /opt/dump_oa/schema_2.log 2>&1 & +``` + +–查看日志,出现如下字样,表示导出成功: + +```sql +$ tail schema_2.log +nohup: ignoring input +gs_dump[port=‘5432’][mogdb][2021-05-28 15:07:56]: The total objects number is 1516. +gs_dump[port=‘5432’][mogdb][2021-05-28 15:07:57]: [100.00%] 1516 objects have been dumped. +gs_dump[port=‘5432’][mogdb][2021-05-28 15:12:39]: dump database mogdb successfully +gs_dump[port=‘5432’][mogdb][2021-05-28 15:12:39]: total time: 285166 ms +``` + +**2.导出创建同义词SQL脚本** + +```sql +$gsql -d mogdb -t -c “select ‘create synonym ‘||n.nspname||’.’||s.synname||’ for ||s.synobjschema||’.’||s.synobjname||’;’ from pg_synonym s,pg_namespace n,pg_user u where u.usesysid=s.synowner and n.oid=s.synnamespace and n.nspname in( ‘oa’,‘schema2’);” > /opt/dump_oa/create_synonym.sql +``` + +### 2.2. 创建恢复数据库 + +```sql +$gsql -d mogdb -r +mogdb=# CREATE DATABASE dump_oa DBCOMPATIBILITY ‘PG’ OWNER oa; +CREATE DATABASE +``` + +### 2.3. 恢复数据 + +**1.使用gs_restore命令导入数据:** + +```sql +$ nohup gs_restore -d dump_oa -v /opt/dump_oa/schema_2.dmp > /opt/dump_oa/out_restore_schema.log 2>&1 & +``` + +–查看日志,出现如下字样,表示导出成功: + +```sql +[omm@DC8VDJNK2-R730 dump_oa]$ tail out_restore_schema.log +setting owner and privileges for FK CONSTRAINT “oa.fk_myoa_approve_rule_param_1” +setting owner and privileges for FK CONSTRAINT “oa.fk_myoa_approve_task_node” +setting owner and privileges for FK CONSTRAINT “oa.fk_myoa_cussystem_bus_type_1” +setting owner and privileges for FK CONSTRAINT “oa.fk_myoa_handle_group_1” +setting owner and privileges for FK CONSTRAINT “oa.fk_myoa_movement_1” +setting owner and privileges for FK CONSTRAINT “oa.fk_myoa_procedure_1” +setting owner and privileges for FK CONSTRAINT “oa.fk_um_organ__parent_id” +setting owner and privileges for FK CONSTRAINT “oa.groupid” +restore operation successful +total time: 703605 ms +``` + +**2.导入同义词:** + +```sql +$ gsql -d dump_oa -U oa -f /ulic/soft/mogdb/mtk/oa_create_synonym.sql +Password for user oa: +… +CREATE SYNONYM +CREATE SYNONYM +total time: 481 ms +``` + diff --git "a/content/zh/post/tracy/openGauss\346\225\260\346\215\256\345\272\223\345\257\271\350\261\241\345\261\236\344\270\273\345\217\230\346\233\264\345\220\216\344\274\232\350\207\252\345\212\250\350\260\203\346\225\264\345\257\271\350\261\241\346\235\203\351\231\220\345\220\227.md" "b/content/zh/post/tracy/openGauss\346\225\260\346\215\256\345\272\223\345\257\271\350\261\241\345\261\236\344\270\273\345\217\230\346\233\264\345\220\216\344\274\232\350\207\252\345\212\250\350\260\203\346\225\264\345\257\271\350\261\241\346\235\203\351\231\220\345\220\227.md" new file mode 100644 index 0000000000000000000000000000000000000000..156df16b820a457244e801bd7fd013fec1a8d8b5 --- /dev/null +++ "b/content/zh/post/tracy/openGauss\346\225\260\346\215\256\345\272\223\345\257\271\350\261\241\345\261\236\344\270\273\345\217\230\346\233\264\345\220\216\344\274\232\350\207\252\345\212\250\350\260\203\346\225\264\345\257\271\350\261\241\346\235\203\351\231\220\345\220\227.md" @@ -0,0 +1,75 @@ ++++ + +title = "OpenGauss数据库对象属主变更后会自动调整对象权限吗?" + +date = "2022-04-06" + +tags = ["OpenGauss数据库对象属主变更后会自动调整对象权限吗?"] + +archives = "2022-04" + +author = "tracy" + +summary = "OpenGauss数据库对象属主变更后会自动调整对象权限吗?" + +img = "/zh/post/tracy/title/img20.png" + +times = "11:37" + ++++ + +# OpenGauss数据库对象属主变更后会自动调整对象权限吗? + +OpenGauss数据库创建了数据库对象之后,可以使用alter命令修改对象的属主。 +以表为例,修改属主的命令如下: + +```sql +ALTER TABLE OWNER TO ; +``` + +接下来就测试一下修改表的属主,观察一下表的权限调整情况: +创建测试用户: + +```sql +create user test password ‘xxxx’; +create user test1 password ‘xxxx’; +create user test5 password ‘xxxx’; +``` + + +使用test用户登录数据库创建表t: + +```sql +create table t (id int); +``` + +将表test.t的查询权限赋给test5用户: + +```sql +grant usage on schema test to test5; +grant select on test.t to test5; +``` + +查看表t的权限分配情况: + +```sql +\dp test.t +``` + +修改表t的属主为test1: + +```sql +ALTER TABLE test.t OWNER TO test1; +``` + +再次查看表t的权限分配情况: + +```sql +\dp test.t +``` + +在OpenGauss中修改表属主的测试结果: + + ![image.png](https://oss-emcsprod-public.modb.pro/image/editor/20220111-9e35cdfa-1cf6-467f-a17b-80c4b6559869.png) + +**这里我们注意到,表修改属主前后,表的访问权限的被赋权者/赋权者会被自动调整,被赋权者/赋权者如果是之前的属主会改为当前的属主** diff --git "a/content/zh/post/tracy/openGauss\346\225\260\346\215\256\345\272\223\345\260\206\347\243\201\347\233\230\350\241\250\350\275\254\346\215\242\344\270\272MOT.md" "b/content/zh/post/tracy/openGauss\346\225\260\346\215\256\345\272\223\345\260\206\347\243\201\347\233\230\350\241\250\350\275\254\346\215\242\344\270\272MOT.md" new file mode 100644 index 0000000000000000000000000000000000000000..422001287820663ea8300dd902d1981be2d44ed3 --- /dev/null +++ "b/content/zh/post/tracy/openGauss\346\225\260\346\215\256\345\272\223\345\260\206\347\243\201\347\233\230\350\241\250\350\275\254\346\215\242\344\270\272MOT.md" @@ -0,0 +1,150 @@ ++++ + +title = "openGauss数据库将磁盘表转换为MOT" + +date = "2022-04-06" + +tags = ["openGauss数据库将磁盘表转换为MOT"] + +archives = "2022-04" + +author = "tracy" + +summary = "openGauss数据库将磁盘表转换为MOT" + +img = "/zh/post/tracy/title/img20.png" + +times = "11:37" + ++++ + +# openGauss数据库将磁盘表转换为MOT + +一、将磁盘表转换为MOT方法 + +磁盘表直接转换为MOT尚不能实现,这意味着尚不存在将基于磁盘的表转换为MOT的ALTER TABLE语句。目前MOT表也不支持rename,create as select以及insert select(普通表)的操作。将基于磁盘的表转换为MOT方法,可以使用gs_dump工具导出数据,再使用gs_restore工具导入数据的方法。 + +步骤如下: +1.暂停应用程序活动。 +2.使用gs_dump工具将表数据转储到磁盘的物理文件中。请确保使用data only。 +3.重命名原始基于磁盘的表。 +4.创建同名同模式的MOT。 +5.使用gs_restore将磁盘文件的数据加载/恢复到数据库表中。 +6.浏览或手动验证所有原始数据是否正确导入到新的MOT中。 +7.恢复应用程序活动。 + +二、操作示例:将表enmo.customer_t1转换为MOT表 + +1.确认MOT表支持表customer_t1所有列的数据类型 + +```sql +enmo=> \d +List of relations +Schema | Name | Type | Owner | Storage +--------+--------------+-------+-------+---------------------------------- +enmo | all_data | table | enmo | {orientation=row,compression=no} +enmo | customer_t1 | table | enmo | {orientation=row,compression=no} +enmo | cux_setting | table | enmo | {orientation=row,compression=no} +enmo | data_studio1 | table | enmo | {orientation=row,compression=no} +enmo | table2 | table | enmo | {orientation=row,compression=no} +public | table1 | table | enmo | {orientation=row,compression=no} +(6 rows) + +enmo=> \d+ customer_t1 +Table "enmo.customer_t1" +Column | Type | Modifiers | Storage | Stats target | Description +-----------------+-----------------------+-----------+----------+--------------+------------- +c_customer_sk | integer | | plain | | +c_customer_name | character varying(32) | | extended | | +Has OIDs: no +Options: orientation=row, compression=no + +enmo=> +``` + +2.暂停表customer_t1相关的应用程序操作后,使用gs_dump命令导出表数据(仅数据): + +```sql +$ gs_dump -U enmo -h 192.168.229.52 -p 15400 enmo -a --table customer_t1 -F c -f /home/omm/dump/customer_t1_data_only.bak +Password: +gs_dump[port='15400'][enmo][2021-03-28 10:11:42]: dump database enmo successfully +gs_dump[port='15400'][enmo][2021-03-28 10:11:42]: total time: 8732 ms +``` + +3.重命名原表customer_t1 为 customer + +```sql +enmo=> alter table customer_t1 rename to customer; +ALTER TABLE +``` + +4.创建与原表相同数据格式的MOT表customer_t1 + +```sql +--首先给enmo用于赋予创建和访问MOT(DDL、DML、SELECT)权限: +enmo=> GRANT USAGE ON FOREIGN SERVER mot_server TO enmo; +GRANT +--创建外部表 +enmo=> CREATE foreign TABLE customer_t1(c_customer_sk INTEGER, c_customer_name VARCHAR(32)); +CREATE FOREIGN TABLE +Time: 9.408 ms +enmo=> \d+ +List of relations +Schema | Name | Type | Owner | Size | Storage | Description +--------+--------------+---------------+-------+------------+----------------------------------+------------- +enmo | all_data | table | enmo | 8192 bytes | {orientation=row,compression=no} | +enmo | customer | table | enmo | 8192 bytes | {orientation=row,compression=no} | +enmo | customer_t1 | foreign table | enmo | 16 kB | | +enmo | cux_setting | table | enmo | 160 kB | {orientation=row,compression=no} | +enmo | data_studio1 | table | enmo | 8192 bytes | {orientation=row,compression=no} | +enmo | table2 | table | enmo | 8192 bytes | {orientation=row,compression=no} | +public | table1 | table | enmo | 8192 bytes | {orientation=row,compression=no} | +(7 rows) + +enmo=> \d+ customer_t1 +Foreign table "enmo.customer_t1" +Column | Type | Modifiers | FDW Options | Storage | Stats target | Description +-----------------+-----------------------+-----------+-------------+----------+--------------+------------- +c_customer_sk | integer | | | plain | | +c_customer_name | character varying(32) | | | extended | | +Server: mot_server +FDW permition: read/write +Has OIDs: no + +enmo=> select * from customer_t1; +c_customer_sk | c_customer_name +---------------+----------------- +(0 rows) + +Time: 0.782 ms +enmo=> +``` + +5.使用gs_restore将磁盘文件的数据加载/恢复到数据库表中 + +```sql +$ gs_restore -U enmo -h 192.168.229.52 -p 15400 -d enmo /home/omm/dump/customer_t1_data_only.bak +Password: +start restore operation ... +table customer_t1 complete data imported ! +Finish reading 3 SQL statements! +end restore operation ... +restore operation successful +total time: 3697 ms +``` + +6.浏览或手动验证所有原始数据是否正确导入到新的MOT中 + +```sql +enmo=> select * from customer_t1; +c_customer_sk | c_customer_name +---------------+----------------- +0 | data 0 +2 | data 2 +1 | new Data +(3 rows) + +Time: 0.587 ms +enmo=> +``` + diff --git "a/content/zh/post/tracy/\344\275\277\347\224\250PGLOADER\345\267\245\345\205\267\345\220\221MogDB\346\225\260\346\215\256\345\272\223\345\257\274\345\205\245csv\346\240\274\345\274\217\346\225\260\346\215\256.md" "b/content/zh/post/tracy/\344\275\277\347\224\250PGLOADER\345\267\245\345\205\267\345\220\221MogDB\346\225\260\346\215\256\345\272\223\345\257\274\345\205\245csv\346\240\274\345\274\217\346\225\260\346\215\256.md" new file mode 100644 index 0000000000000000000000000000000000000000..d7f82b7c6d9b436153e0302c242e8f9f58979abc --- /dev/null +++ "b/content/zh/post/tracy/\344\275\277\347\224\250PGLOADER\345\267\245\345\205\267\345\220\221MogDB\346\225\260\346\215\256\345\272\223\345\257\274\345\205\245csv\346\240\274\345\274\217\346\225\260\346\215\256.md" @@ -0,0 +1,235 @@ ++++ + +title = "使用PGLOADER工具向mogdb数据库导入csv格式数据" + +date = "2022-04-18" + +tags = ["使用PGLOADER工具向mogdb数据库导入csv格式数据"] + +archives = "2022-04" + +author = "tracy" + +summary = "使用PGLOADER工具向mogdb数据库导入csv格式数据" + +img = "/zh/post/tracy/title/img20.png" + +times = "10:20" ++++ + +# 使用PGLOADER工具向mogdb数据库导入csv格式数据 + +本文出处:https://www.modb.pro/db/81349 + +操作系统版本:centos7.6 +数据库版本:mogdb2.0.1 + +## 一、安装pgloader工具 + +### 0.准备工作 + +下载pgloader安装包: +[pgloader-3.6.2.tar.gz](https://github.com/dimitri/pgloader/releases) + +修改数据库参数: + +``` +gs_guc reload -D $PGDATA -c "password_encryption_type=1" +``` + +设置数据库白名单: + +``` +gs_guc reload -D $PGDATA -h "host all all 192.168.0.0/16 md5" +``` + +### 1.生成rmp包 + +a.Install the EPEL repo. + +``` + # yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm +``` + +b.Install rpmbuild dependencies: + +``` + # yum -y install yum-utils rpmdevtools @"Development Tools" +``` + +c.Install pgloader build dependencies: + +``` + # tar-zxvf pgloader-3.6.2.tar.gz + + # cd pgloader-3.6.2 + + # yum-builddep pgloader.spec +``` + +d.Download pgloader source: + +``` + # spectool -g -R pgloader.spec +``` + +此步骤如果下载文件失败,参照报错信息,手动创建目录,手动下载文件并改名: + +``` +# mkdir -p /root/rpmbuild/SOURCES/ # cd /root/rpmbuild/SOURCES/ # wget https://github.com/dimitri/pgloader/archive/v3.6.1.tar.gz # mv pgloader-3.6.1.tar.gz v3.6.1.tar.gz +``` + +e.Build the source and binary RPMs (see rpmbuild --help for other build options): + +``` + # rpmbuild -ba pgloader.spec +``` + +### 2.安装pgloader RPM包 + +``` +# rpm -ivh /root/rpmbuild/RPMS/x86_64/pgloader-3.6.1-22.el7.x86_64.rpm +``` + +### 3.安装完成,查看pgloader 版本: + +``` +[root@og201 pgloader-3.6.2]# pgloader --version pgloader version "3.6.1" +``` + +## 二、导入CSV格式数据 + +### 1.准备pgloader配置文件和csv数据文件 + +csv.load + +```sql +LOAD CSV + FROM 'path/to/file.csv' (x, y, a, b, c, d) + INTO postgresql://:@:/?csv (a, b, d, c) + + WITH truncate, + skip header = 1, + fields optionally enclosed by '"', + fields escaped by double-quote, + fields terminated by ',' + + SET client_encoding to 'latin1', + work_mem to '12MB', + standard_conforming_strings to 'on' + + BEFORE LOAD DO + $$ drop table if exists csv; $$, + $$ create table csv ( + a bigint, + b bigint, + c char(2), + d text + ); + $$; + +``` + +file.csv + +```sql +Header, with a © sign +"2.6.190.56","2.6.190.63","33996344","33996351","GB","United Kingdom" +"3.0.0.0","4.17.135.31","50331648","68257567","US","United States" +"4.17.135.32","4.17.135.63","68257568","68257599","CA","Canada" +"4.17.135.64","4.17.142.255","68257600","68259583","US","United States" +"4.17.143.0","4.17.143.15","68259584","68259599","CA","Canada" +"4.17.143.16","4.18.32.71","68259600","68296775","US","United States" + +``` + +### 2.导入: + +```sql +# pgloader csv.load +2021-07-09T16:50:22.013000+08:00 LOG pgloader version "3.6.1" +2021-07-09T16:50:22.026000+08:00 WARNING pgloader always talk to PostgreSQL in utf-8, client_encoding has been forced to 'utf8'. +2021-07-09T16:50:22.543000+08:00 LOG report summary reset + table name errors rows bytes total time +----------------------- --------- --------- --------- -------------- + fetch 0 0 0.012s + before load 0 2 0.038s +----------------------- --------- --------- --------- -------------- + "pg"."csv" 0 6 0.2 kB 0.079s +----------------------- --------- --------- --------- -------------- + Files Processed 0 1 0.036s +COPY Threads Completion 0 2 0.099s +----------------------- --------- --------- --------- -------------- + Total import time ✓ 6 0.2 kB 0.135s + +``` + +### 3.登录数据库查看导入数据: + +```sql +pgloader=# \c - pg +Password for user pg: +Non-SSL connection (SSL connection is recommended when requiring high-security) +You are now connected to database "pgloader" as user "pg". +pgloader=> \dt + List of relations + Schema | Name | Type | Owner | Storage +--------+------+-------+-------+---------------------------------- + pg | csv | table | pg | {orientation=row,compression=no} +(1 row) + +pgloader=> table csv; + a | b | c | d +----------+----------+----+---------------- + 33996344 | 33996351 | GB | United Kingdom + 50331648 | 68257567 | US | United States + 68257568 | 68257599 | CA | Canada + 68257600 | 68259583 | US | United States + 68259584 | 68259599 | CA | Canada + 68259600 | 68296775 | US | United States +(6 rows) + +pgloader=> + +``` + +### 4.导入数据存在错误时,错误信息会记录在/tmp/pgloader/csv目录下 + +``` +# cat path/to/file.csv +Header, with a © sign +"2.6.190.56","2.6.190.63","33996344","33996351","GB","United Kingdom" +"3.0.0.0","4.17.135.31","50331648","68257567","US","United States" +"4.17.135.32","4.17.135.63","68257568","68257599","CA","Canada" +"4.17.135.64","4.17.142.255","68257600","68259583","US","United States" +"4.17.143.0","4.17.143.15","68259584","68259599","CA","Canada" +"4.17.143.16","4.18.32.71","68259600","68296775","US","United States" +"4.18.32.71","68259600","68296775","US","United States" +# pgloader csv.load +2021-07-14T08:25:45.025000+08:00 LOG pgloader version "3.6.1" +2021-07-14T08:25:45.123000+08:00 WARNING pgloader always talk to PostgreSQL in utf-8, client_encoding has been forced to 'utf8'. +2021-07-14T08:25:45.657000+08:00 ERROR PostgreSQL ["\"public\".\"csv\""] Database error 22P02: invalid input syntax for integer: "US" +CONTEXT: COPY csv, line 7, column b: "US" +2021-07-14T08:25:45.667000+08:00 LOG report summary reset + table name errors rows bytes total time +----------------------- --------- --------- --------- -------------- + fetch 0 0 0.010s + before load 0 2 0.052s +----------------------- --------- --------- --------- -------------- + "public"."csv" 1 6 0.2 kB 0.072s +----------------------- --------- --------- --------- -------------- + Files Processed 0 1 0.028s +COPY Threads Completion 0 2 0.087s +----------------------- --------- --------- --------- -------------- + Total import time 1 6 0.2 kB 0.115s +[root@centos-7-pg12 pgloader]# ls -l /tmp/pgloader/csv/ +total 8 +-rw-r--r-- 1 root root 30 Jul 14 08:25 csv.dat +-rw-r--r-- 1 root root 103 Jul 14 08:25 csv.log +[root@centos-7-pg12 pgloader]# cat /tmp/pgloader/csv/csv.dat +68296775 US \N United States + +[root@centos-7-pg12 pgloader]# cat /tmp/pgloader/csv/csv.log +Database error 22P02: invalid input syntax for integer: "US" +CONTEXT: COPY csv, line 7, column b: "US" +``` diff --git "a/content/zh/post/tracy/\344\275\277\347\224\250gs_probackup\345\267\245\345\205\267\350\277\234\347\250\213\345\244\207\344\273\275.md" "b/content/zh/post/tracy/\344\275\277\347\224\250gs_probackup\345\267\245\345\205\267\350\277\234\347\250\213\345\244\207\344\273\275.md" new file mode 100644 index 0000000000000000000000000000000000000000..837f09497898544c096e03ba98dcccfb19189948 --- /dev/null +++ "b/content/zh/post/tracy/\344\275\277\347\224\250gs_probackup\345\267\245\345\205\267\350\277\234\347\250\213\345\244\207\344\273\275.md" @@ -0,0 +1,166 @@ ++++ + +title = "使用gs_probackup工具远程备份" + +date = "2022-04-19" + +tags = ["使用gs_probackup工具远程备份"] + +archives = "2022-04" + +author = "tracy" + +summary = "使用gs_probackup工具远程备份" + +img = "/zh/post/tracy/title/img20.png" + +times = "10:20" ++++ + +# 使用gs_probackup工具远程备份 + +本文出处:[https://www.modb.pro/db/336103](https://www.modb.pro/db/336103) + +
    + +## gs_probackup简介 + +gs_probackup是一个用于管理openGauss数据库备份和恢复的工具。它对openGauss实例进行定期备份,以便在数据库出现故障时能够恢复服务器。 +•可用于备份单机数据库或者主节点数据库,为物理备份。 +•可备份外部目录的内容,如脚本文件、配置文件、日志文件、dump文件等。 +•支持增量备份、定期备份和远程备份。 +•可设置备份的留存策略。 + +前提条件 +•可以正常连接openGauss数据库。 +•若要使用PTRACK增量备份,需在postgresql.conf中手动添加参数“enable_cbm_tracking = on”。 +•为了防止xlog在传输结束前被清理,请适当调高postgresql.conf文件中wal_keep_segments的值。 + +限制说明 +•备份必须由运行数据库服务器的用户执行。 +•备份和恢复的数据库服务器的主版本号必须相同。 +•如果要通过ssh在远程模式下备份数据库,需要在本地和远程主机安装相同主版本的数据库,并通过ssh-copy-id remote_user@remote_host命令设置本地主机备份用户和远程主机数据库用户的无密码ssh连接。 +•远程模式下只能执行add-instance、backup、restore子命令。 +•使用restore子命令前,应先停止gaussdb进程。 +•当存在用户自定义表空间时,备份的时候要加上 --external-dirs 参数,否则,该表空间不会被备份。 +•当备份的规模比较大时,为了防止备份过程中timeout发生,请适当调整postgresql.conf文件的参数 session_timeout、wal_sender_timeout。并且在备份的命令行参数中适当调整参数–rw-timeout的值。 +•恢复时,使用-T选项把备份中的外部目录重定向到新目录时,请同时指定参数–external-mapping。 +•增量备份恢复后,之前创建的逻辑复制槽不可用,需删除重建。 + +本文主要介绍如何使用gs_probackup工具进行远程备份。 + +## gs_probackup远程备份相关参数 + +进行远程备份时,主要需要设置两个远程连接参数:(其他远程连接参数,使用默认值即可。) +•–remote-host=destination :指定要连接的远程主机的IP地址或主机名。 +•–remote-user=username :指定SSH连接的远程主机用户。如果省略此参数,则使用当前发起SSH连接的用户。默认值:当前用户 + +## gs_probackup远程备份举例 + +### 初始化备份目录 + +``` +$ gs_probackup init -B /home/omm2/backup1 +INFO: Backup catalog '/home/omm2/backup1' successfully inited +``` + +### 添加一个新的备份实例 + +```sql +$ /u01/mogdb2.1.0/app/bin/gs_probackup add-instance -B /home/omm2/backup1 --instance=dn_6001 --remote-host=192.168.2.150 --remote-user=omm2 -D /u01/mogdb2.1.0/data/db1 -Urepl -p30000 -dpostgres +LOG: Start SSH client process, pid 10597 +INFO: Instance 'dn_6001' successfully inited + +LOG: Start SSH client process, pid 28068 +INFO: Instance 'dn_6001' successfully inited +``` + +### 数据库全备 + +``` +$ gs_probackup backup -B /home/omm2/backup1 --instance=dn_6001 -b FULL --remote-host=192.168.2.150 --remote-user=omm2 +INFO: Backup start, gs_probackup version: 2.4.2, instance: dn_6001, backup ID: R80P6X, backup mode: FULL, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 +LOG: Backup destination is initialized +Password for user repl: +WARNING: This openGauss instance was initialized without data block checksums. gs_probackup have no way to detect data block corruption without them. Reinitialize PGDATA with option '--data-checksums'. +LOG: Start SSH client process, pid 10732 +LOG: Database backup start +LOG: started streaming WAL at 0/D000000 (timeline 1) +[2022-02-28 21:52:14]: check identify system success +[2022-02-28 21:52:14]: send START_REPLICATION 0/D000000 success +[2022-02-28 21:52:14]: keepalive message is received +[2022-02-28 21:52:14]: keepalive message is received +LOG: SSH process 10732 is terminated with status 0 +INFO: PGDATA size: 619MB +INFO: Start transferring data files +LOG: Start SSH client process, pid 10736 +LOG: Creating page header map "/home/omm2/backup1/backups/dn_6001/R80P6X/page_header_map" +[2022-02-28 21:52:17]: keepalive message is received +[2022-02-28 21:52:20]: keepalive message is received +[2022-02-28 21:52:20]: keepalive message is received +[2022-02-28 21:52:23]: keepalive message is received +[2022-02-28 21:52:26]: keepalive message is received +[2022-02-28 21:52:26]: keepalive message is received +LOG: SSH process 10736 is terminated with status 0 +INFO: Data files are transferred, time elapsed: 14s +[2022-02-28 21:52:29]: keepalive message is received +INFO: wait for pg_stop_backup() +INFO: pg_stop backup() successfully executed +LOG: stop_lsn: 0/D0001E8 +LOG: Looking for LSN 0/D0001E8 in segment: 00000001000000000000000D +LOG: Found WAL segment: /home/omm2/backup1/backups/dn_6001/R80P6X/database/pg_xlog/00000001000000000000000D +LOG: Thread [0]: Opening WAL segment "/home/omm2/backup1/backups/dn_6001/R80P6X/database/pg_xlog/00000001000000000000000D" +LOG: Found LSN: 0/D0001E8 +LOG: finished streaming WAL at 0/E000000 (timeline 1) +LOG: Getting the Recovery Time from WAL +LOG: Thread [0]: Opening WAL segment "/home/omm2/backup1/backups/dn_6001/R80P6X/database/pg_xlog/00000001000000000000000D" +INFO: Syncing backup files to disk +INFO: Backup files are synced, time elapsed: 0 +INFO: Validating backup R80P6X +INFO: Backup R80P6X data files are valid +INFO: Backup R80P6X resident size: 635MB +INFO: Backup R80P6X completed +``` + +### 显示备份信息 + +``` +$ gs_probackup show -B /home/omm2/backup1 + +BACKUP INSTANCE 'dn_6001' +================================================================================================================================= + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status +================================================================================================================================= + dn_6001 9.2 R80P6X 2022-02-28 21:52:28+08 FULL STREAM 1/0 24s 619MB 16MB 1.00 0/D000028 0/D0001E8 OK + +$ gs_probackup show -B /home/omm2/backup1 --instance dn_6001 -i R80P6X +#Configuration +backup-mode = FULL +stream = true +compress-alg = none +compress-level = 1 +from-replica = false + +#Compatibility +block-size = 8192 +xlog-block-size = 8192 +checksum-version = 0 +program-version = 2.4.2 +server-version = 9.2 + +#Result backup info +timelineid = 1 +start-lsn = 0/D000028 +stop-lsn = 0/D0001E8 +start-time = '2022-02-28 21:52:09+08' +end-time = '2022-02-28 21:52:33+08' +recovery-xid = 42848 +recovery-time = '2022-02-28 21:52:28+08' +recovery-name = 'backup R80P6X' +data-bytes = 649266705 +wal-bytes = 16777216 +uncompressed-bytes = 649218794 +pgdata-bytes = 649218794 +status = OK +content-crc = 4089751389 +``` diff --git "a/content/zh/post/vector524/\345\215\216\344\270\272OpenGauss\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\344\270\216\344\275\277\347\224\250.md" "b/content/zh/post/vector524/\345\215\216\344\270\272OpenGauss\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\344\270\216\344\275\277\347\224\250.md" new file mode 100644 index 0000000000000000000000000000000000000000..1c95a4e35f0cded095bce8c786792541a8d8e174 --- /dev/null +++ "b/content/zh/post/vector524/\345\215\216\344\270\272OpenGauss\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205\344\270\216\344\275\277\347\224\250.md" @@ -0,0 +1,829 @@ ++++ + +title = "华为OpenGauss数据库安装与使用" +date = "2021-12-01" +tags = ["华为OpenGauss数据库安装与使用"] +archives = "2021-12" +author = "vector" +summary = "华为OpenGauss数据库安装与使用" +times = "17:30" + ++++ + +主要参考博客:[opengauss单机部署-墨天轮](https://www.modb.pro/doc/4705) + +      [企业版安装 | openGauss](https://opengauss.org/zh/docs/2.0.0/docs/installation/企业版安装.html) + +## 1. 虚拟机安装 + +​ 先做安装准备,点击链接[下载](https://download3.vmware.com/software/wkst/file/VMware-workstation-full-16.1.2-17966106.exe)VMware Workstation Pro16,此处为Windows 10使用。 + +### 1.1 VMware安装 + +​ 打开下载好的exe文件,即开始安装: + + + +​ 安装位置默认在C盘,点击**更改**可以修改安装位置,我安装到了`E:\VMware\`下,安装路径尽量不要有中文,记得**勾选**PATH按钮,这样不用自己再添加环境变量,可勾选增强型键盘驱动程序,此功能可更好地处理国际键盘和带有额外按键的键盘: + + + +​ 一直点击下一步: + + + +![0.png](https://pic.imgdb.cn/item/615c11832ab3f51d914222dd.png) + +![1.png](https://pic.imgdb.cn/item/615c11832ab3f51d914222e9.png) + +![2.png](https://pic.imgdb.cn/item/615c11832ab3f51d91422301.png) + +​ 点击输入许可证,密钥可以自己购买,或者百度搜索以下,多尝试几个,下面是我当时安装使用的密钥,不知道现在失效没有: + + + +​ 安装后可能要求重启系统,重启后进入软件。依次点击导航栏中的 `帮助 -> 关于 VMware Workstation` ,查看许可证信息的状态,如下图所示即为激活成功。 + + + +### 1.2 虚拟机部署centos + +​ 可以在官方网站下载centos7,只有centos7.6支持安装opengauss,如果找不到7.6版本的centos,也可安装稍高版本的centos,安装完之后需要在系统文件中做相关修改,我下载的是[centos7.9](https://mirrors.aliyun.com/centos/7.9.2009/isos/x86_64/),文件太大了,需要下一段时间,记得更改下载保存的位置,我放在了`E:\Linux\`下。`我第一次安装时不知道必须安装centos7,安装成了centos8,而重新安装时部分截图忘记保存,所以下面部分截图出现的centos8,大家视为centos7就好`。 + + + +​ 下载完成,打开VMware选择新建虚拟机: + + + +![](https://pic.imgdb.cn/item/615c191d2ab3f51d914d3f25.png) + +​ 浏览文件,选择centos7的下载目录,选择镜像文件: + + + +​ 设置虚拟机的名称和账户名,以及密码: + + + +​ 选择虚拟机的安装位置: + + + +​ 设置磁盘的容量,默认为20GB,我修改为了40GB,点击下一步即可: + + + +​ 自定义硬件可以根据自己的需求,修改centos的设置: + + + +​ 内存大小默认为1GB,我设置为了2GB: + + + +​ 网络适配器选择NAT模式,设置完成之后点击确定: + + + + + +​ 等待安装: + + + + + + + + + + + +​ 中间会出现这个页面让你设置,如果你没赶快进行操作,就跳过去了,设置不设置都没有关系,安装完成之后也可以设置: + + + +​ 如下是,点击各个按钮进行时间、显示、输入法的设置: + + + + + + + + + +​ 设置完成之后继续安装,安装完毕,输入设置的密码之后,回车: + + + +​ 安装成功! + + + +### 1.3 centos配置 + +#### 1.3.1 设置系统版本 + +​ 因为opengauss要求的centos版本是7.6,因此我们需要修改`/etc/redhat-release`文件: + + + +```shell +#进入管理员模式 +su +#打开文件,进行编辑 +vi /etc/redhat-release +``` + +​ ![](https://pic.imgdb.cn/item/615c15c42ab3f51d91484ed6.png) + +​ 修改成如下内容`CentOS Linux release 7.6 (Core)`: + +​ + +#### 1.3.2 网络设置 + +​ 使用`ifconfig`或者`ip addr`可以查看自己的ip地址 + +​ + +​ 我的网卡的名字为ens-33,接下来,给网卡增加DNS:`echo 'DNS1=114.114.114.114'>>/etc.sysconfig/network-scripts/ifcfg-ens33` + +​ 重启网卡:`systemctl restart network`,测试是否可以访问:`ping www.baidu.com` + +​ + +​ 如上图所示,则可以访问。 + +#### 1.3.3 修改主机名 + +```shell +echo "vector" > /etc/hostname +echo "192.168.48.128 vector" >>/etc/hostd +``` + +​ 最后系统重启后记得查看主机名是否修改成功: + +``` +cat /etc/hostname +``` + +#### 1.3.4 配置YUM源 + +- 删除系统自带的yum源 + + ```shell + rm -rf /etc/yum.repos.d/* + ``` + +- 下载阿里云yum源 + + ```shell + wget -O /etc/yum.repos.d/CentOS-Base http://mirrors.aliyun.com/repo/Centos7.repo + ``` + +- 生成仓库缓存 + + ```shell + yum makecache + ``` + +- 安装python3.6,一定要装3.6版本 + + ```shell + sudo yum install epel-release + sudo yum install python36 + ``` + +#### 1.3.5 关闭防火墙 + +```shell +sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config +``` + +#### 1.3.6 设置字符集 + +```shell +cat >>/etc/profile< + +​ 在该目录下获取XML文件`script/gspylib/etc/conf/cluster_config_template.xml`,重命名为cluster_config.xml放在`/opt/software/openGauss/`下,并将以下模板修改为自己的信息放入xml文件,第37行的**15400**表示设置了数据库的端口号,以下模板只需要更改两点:**ip地址**和**主机名**: + +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +​ 根据我的ip地址192.168.48.128和我的主机名vector更改之后文件内容如下: + +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +​ 执行以下命令准备安装环境: + +```shell +cd /opt/software/openGauss/script +./gs_preinstall -U omm -G dbgrp -L -X /opt/software/openGauss/cluster_config.xml +``` + +​ + +​ 如上表示预安装成功! + +### 2.2 执行安装 + +#### 2.2.1 检查 + +​ 检查安装包和openGauss配置文件在规划路径下是否已存在,如果没有,重新执行预安装,确保预安装成功,再执行以下步骤。 + +#### 2.2.2 切换用户 + +​ 登录到openGauss的主机,并切换到omm用户: + +```shell +su omm +``` + +- omm指的是前置脚本gs_preinstall中-U参数指定的用户。 +- 安装脚本gs_install必须以前置脚本中指定的omm执行,否则,脚本执行会报错。 + +#### 2.2.3 安装 + +​ 使用gs_install安装openGauss。 + +```shell +gs_install -X /opt/software/openGauss/cluster_config.xml +``` + +​ `/opt/software/openGauss/cluster_config.xml`为openGauss配置文件的路径。在执行过程中,用户需根据提示输入数据库的密码,密码具有一定的复杂度,为保证用户正常使用该数据库,请记住输入的数据库密码。 + +​ 设置的密码要符合复杂度要求: + +- 最少包含8个字符。 +- 不能和用户名、当前密码(ALTER)、或当前密码反序相同。 +- 至少包含大写字母(A-Z),小写字母(a-z),数字,非字母数字字符(限定为~!@#$%^&*()-_=+\|[{}];:,<.>/?)四类字符中的三类字符。 + + + +​ 执行如下命令检查数据库状态是否正常: + +```shell +gs_om -t status +``` + + + +​ `cluster_state` 显示“Normal”表示数据库可正常使用。 + + + +​ 如首次安装数据库不成功,则卸载后重新安装,卸载方式如下: + +```shell +gs_uninstall ‐‐delete‐data +``` + + + +#### 2.2.4 初始化数据库 + +使用SQL语句**创建数据库**database时,指定数据库的字符集为GBK。 + +```shell +#后面跟的是端口号,我的是15400 +gsql -d postgres -p 15400 +``` + +```sql +CREATE DATABASE mydb WITH ENCODING 'GBK' template = template0; +``` + + + +​ 显示如下信息: + +```sql +CREATE DATABASE +``` + +创建**schema**: + +```sql +CREATE SCHEMA tpcds; +``` + +创建表: + +```sql +CREATE TABLE tpcds.warehouse_t1 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2) +); +``` + + + +查看表信息: + +```sql +insert into tpcds.warehouse_t1(w_warehouse_sk,w_warehouse_id) values(12,'000001'); +insert into tpcds.warehouse_t1(w_warehouse_sk,w_warehouse_id) values(25,'000002'); +select w_warehouse_sk, w_warehouse_id from tpcds.warehouse_t1; +``` + +向数据库中添加数据之后查看: + +如果不知道自己的端口号,可根据以下方式查看: + +- 查看自己的cluster_config.xml文件,查看自己将端口号设置为了多少. +- 使用如下命令查看: + +```shell +gs_om -t status --detail +cd /opt/huawei/install/data/dn +``` + + + +### 2.3 JDBC连接数据库 + +#### 2.3.1 准备java环境 + +​ 查看centos的java环境,centos自带java1.8,需要安装配套的javac,注意要是1.8.0版。 + +```shell +yum install java-1.8.0-openjdk-devel.x86_64 +``` + +​ 下载驱动包2.0.0版本[postgresql.jar](https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.0/x86/openGauss-2.0.0-JDBC.tar.gz),放在路径`/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64/jre/lib/ext`下: + +```shell +cp postgresql.jar /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64/jre/lib/ext +``` + + + +​ 测试是否具备运行java代码的环境: + +```shell +java -version +javac -version +``` + +​ + +​ 已具备运行环境! + +#### 2.3.2 准备好连接的java代码 + +​ 记得替换成你设置的用户名、密码、端口号,如果你是按照我前面的操作,用户名应该是omm, + +```java +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.CallableStatement; + +public class test{//keep + public static Connection getConnect(String username, String passwd) + { + //驱动类。 + String driver = "org.postgresql.Driver"; + //数据库连接描述符。将15400替换为自己的端口号 + String sourceURL = "jdbc:postgresql://127.0.0.1:15400/postgres"; + Connection conn = null; + + try + { + //加载驱动。 + Class.forName(driver); + } + catch( Exception e ) + { + e.printStackTrace(); + return null; + } + + try + { + //创建连接。 + conn = DriverManager.getConnection(sourceURL, username, passwd); + System.out.println("Connection succeed!"); + } + catch(Exception e) + { + e.printStackTrace(); + return null; + } + + return conn; + }; + + //try to connect + public static void main(String[] args) + { + // TODO Auto-generated method stub + Connection conn = getConnect("user", "password");//replace by my user and password + //BatchInsertData(conn); + try + { + conn.close(); + } + catch (SQLException e) + { + e.printStackTrace(); + } + } +} +``` + +#### 2.3.3 配置服务端远程连接 + +- 以操作系统用户omm登录数据库。 + +- 配置listen_addresses,即远程客户端连接使用的数据库主节点ip或者主机名。 + + 使用如下命令查看数据库主节点目前的listen_addresses配置。 + + ```shell + gs_guc check -I all -c "listen_addresses" + ``` + +- 使用如下命令把要查询出的ip追加到listen_addresses后面,多个配置项之间用英文逗号分隔。例如,追加ip地址10.11.12.13。 + + ```shell + gs_guc set -I all -c "listen_addresses='localhost,10.11.12.13'" + ``` + +- 执行如下命令重启openGauss + + ```shell + gs_om -t stop && gs_om -t start + ``` + + + +#### 2.3.4 连接 + +- 首先需要启动数据库 + + ```shell + su omm + gs_om -t start + ``` + +- 运行java代码 + + ```shell + javac test.java + java test + ``` + + + +#### 2.3.5 操纵数据 + +​ 使用如下java代码访问并对表中数据进行查询(记得替换用户、密码和端口): + +```java +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.CallableStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +public class gausstest{//keep + public static Connection getConnect(String username, String passwd) + { + //驱动类。 + String driver = "org.postgresql.Driver"; + //数据库连接描述符。 + String sourceURL = "jdbc:postgresql://127.0.0.1:15400/postgres"; + Connection conn = null; + + try + { + //加载驱动。 + Class.forName(driver); + } + catch( Exception e ) + { + e.printStackTrace(); + return null; + } + + try + { + //创建连接。 + conn = DriverManager.getConnection(sourceURL, username, passwd); + System.out.println("Connection succeed!"); + } + catch(Exception e) + { + e.printStackTrace(); + return null; + } + + return conn; + }; + + //try to connect + public static void main(String[] args) throws SQLException + { + // TODO Auto-generated method stub + Connection conn = getConnect("user", "password");//replace by my user and password + //BatchInsertData(conn); + Statement st = conn.createStatement(); + String sql = "select w_warehouse_sk,w_warehouse_id from tpcds.warehouse_t1"; + ResultSet rs = st.executeQuery(sql); + while(rs.next()) { + int w_warehouse_sk = rs.getInt("w_warehouse_sk"); + String w_warehouse_id = rs.getString("w_warehouse_id"); + System.out.println("w_warehouse_sk = " + w_warehouse_sk + "; w_warehouse_id = " + w_warehouse_id); + } + try + { + conn.close(); + st.close(); + rs.close(); + } + catch (SQLException e) + { + e.printStackTrace(); + } + } +} +``` + +​ + +## 3. 遇到的问题 + +​ 我感觉我把所有能遇到的问题都遇到了,最后成功是重装一遍,什么问题没遇到。 + +### 3.1 使用gs_ctl提示找不到命令 + +​ 如下图所示: + + 参看博客[Linux下解决命令未找到的问题 - ML。 - 博客园 (cnblogs.com)](https://www.cnblogs.com/mnote/p/8832806.html),对于本问题主要使用的命令是: + +```shell +#进入管理员模式 +su +which gs_ctl +``` + + + +​ 接下来需要做的是把查找出的路径直接链接到/usr/bin下。操作如下: + +```shell +ln -s xxx/xxx /usr/bin +``` + +​ 以上xxx代表你查出来的路径。 + + + +### 3.2 gs_om命令找不到 + +​ 不得不说极简版安装包下没有gs_om文件,我搜遍了也没有,在企业版中,我因为懒得重装把我同学下载的企业版中的gs_之类的文件全拷过来了,但是后来遇到了其他问题,我又重装了,不知道我这个操作最终会带来什么影响。 + +### 3.3 sudo和su都用不了 + +​ sudo chmod -R 777 / 修改根目录权限问题修复,参考了[ 关于不小心777导致没法sudo权限后的修改解决办法_空木格子的博客-CSDN博客](https://blog.csdn.net/qq_39543212/article/details/84107240) + +​ 我应该是因为sudo用不了提示sudo: must be setuid root,然后我进入根目录下修改了某个文件为777,直接导致su也用不了。这下好了,要用su让我先用sudo修改相关文件,要用sudo让我先用su修改文件! + +​ 解决这个问题需要先进入安全模式,进入方法为:在开机的过程中按shift或ESC键,好像在系统中按F1还是F2也可以。 + +​ 此时,已经进入到具有root权限的字符界面,输入以下命令解决了。 + +```shell +ls -l /usr/bin/sudo +chown root:root /usr/bin/sudo +chmod 4755 /usr/bin/sudo +``` + +### 3.4 预安装失败 + +​ ![](https://pic.imgdb.cn/item/615c53892ab3f51d91a7b1e6.png) + +​ 本问题先参考了链接[openGaussDB 初体验(上) - 云+社区 - 腾讯云 (tencent.com)](https://cloud.tencent.com/developer/article/1675265)以下内容,但是没有解决。 + +​ ![](https://pic.imgdb.cn/item/615c14052ab3f51d9145c37e.png) + +​ 我解决这个问题的过程是这样的: + +​ 找到虚拟网络编辑器,电脑连了自己的热点(我听我同学说她的用校园网就不行),然后还原默认设置: + + + + + +​ 然后配置了静态的ip地址,参考了[ CentOS 7 连接不到网络解决方法(设置静态ip)_gaokcl的博客-CSDN博客_centos7无法连接网络](https://blog.csdn.net/gaokcl/article/details/82834925?utm_medium=distribute.pc_relevant.none-task-blog-2~default~CTRLIST~default-2.no_search_link&depth_1-utm_source=distribute.pc_relevant.none-task-blog-2~default~CTRLIST~default-2.no_search_link)。但是神奇的是,这样就可以了。不过后来还是重装了。 + +### 3.5 重装openGauss时端口被占用 + +​ 报错:[GAUSS-50601] : The port [15400] is occupied or the ip address is incorrectl,有两种方法: + +- 修改xml文件中的端口号 +- 杀掉占用端口的进程 + +### 3.6 右上角网络连接图标消失 + +​ 参考了[centos7右上角网络连接图标消失_shuest的博客-CSDN博客_centos7右上角没有网络图标](https://blog.csdn.net/zs391077005/article/details/106885104?utm_medium=distribute.pc_relevant.none-task-blog-2~default~CTRLIST~default-1.no_search_link&depth_1-utm_source=distribute.pc_relevant.none-task-blog-2~default~CTRLIST~default-1.no_search_link) + +``` +chkconfig network off +chkconfig network on +service NetworkManager stop +service NetworkManager start +``` + +​ 但是有可能遇到后两条命令用不了,然后又去查怎么办,最后也没解决,我重装了。累了累了。 + +### 3.7 循环显示登录界面无法进入 + +​ 看图吧,我最后又进安全模式解决的,最后修改/etc/selinux/config配置,将SELINUX选项由SELINUX=enforcing改成SELINUX=disabled,重启系统后发现就可以正常登陆系统了: + + + +### 3.8 Connection refused + +- ​ 首先需要启动数据库,不启动数据库会出现如下错误:![](https://pic.imgdb.cn/item/615c15482ab3f51d9147a2aa.png) + +- 未设置服务端远程连接也会出现以上问题,见2.3.3 + + + +### 3.9 加载驱动出现问题 + +​ 以下是开发流程: +​ ![img](https://opengauss.org/zh/docs/2.0.0/docs/Developerguide/figures/%E9%87%87%E7%94%A8JDBC%E5%BC%80%E5%8F%91%E5%BA%94%E7%94%A8%E7%A8%8B%E5%BA%8F%E7%9A%84%E6%B5%81%E7%A8%8B.png) + +驱动需要按照2.3.1所说,放在指定文件夹下,不然在加载驱动的时候会出现问题。 + +### 3.10 unreported exception SQLException + +​ 在本地编译java服务的时候,编译报错:未报告的异常错误; 必须对其进行捕获或声明以便抛出。 + +​ 添加代码throw SQLException即可: + diff --git "a/content/zh/post/vector524/\345\215\216\344\270\272OpenGauss\346\225\260\346\215\256\345\272\223\350\241\214\345\255\230\345\202\250\346\272\220\344\273\243\347\240\201\350\247\243\346\236\220.md" "b/content/zh/post/vector524/\345\215\216\344\270\272OpenGauss\346\225\260\346\215\256\345\272\223\350\241\214\345\255\230\345\202\250\346\272\220\344\273\243\347\240\201\350\247\243\346\236\220.md" new file mode 100644 index 0000000000000000000000000000000000000000..96461cdb5bb323b64df8470d809d2de44ea50efe --- /dev/null +++ "b/content/zh/post/vector524/\345\215\216\344\270\272OpenGauss\346\225\260\346\215\256\345\272\223\350\241\214\345\255\230\345\202\250\346\272\220\344\273\243\347\240\201\350\247\243\346\236\220.md" @@ -0,0 +1,306 @@ ++++ + +title = "华为OpenGauss数据库行存储源代码解析" +date = "2021-12-01" +tags = ["华为OpenGauss数据库行存储源代码解析"] +archives = "2021-12" +author = "vector" +summary = "华为OpenGauss数据库行存储源代码解析" +times = "17:30" + ++++ + +# 华为OpenGauss数据库行存储源代码解析 + +​ 根据存储介质和并发控制机制,存储引擎分为磁盘引擎和内存引擎两大类。磁盘引擎主要面向通用的、大容量的业务场景,内存引擎主要面向容量可控的、追求极致性能的业务场景。在磁盘引擎中,为了满足不同业务场景对于数据不同的访问和使用模式,openGauss进一步提供了astore(append-store,追加写优化格式)、cstore(column store,列存储格式)以及可拓展的数据元组和数据页面组织格式。astore为行存储格式,向上提供元组形式的读、写。 + +## 1. 数据库表的创建 + +​ 查找到了关于数据库创建表的内容:在opengauss数据库源码中位于`src\gausskernel\optimizer\commands\tablecmds.cpp`,DefineRelation此函数是最终创建表结构的函数,最主要的参数是CreateStmt这个结构,该结构如下: + +```c++ +typedef struct CreateStmt { + NodeTag type; + RangeVar *relation; /* relation to create */ + List *tableElts; /* column definitions (list of ColumnDef) */ + List *inhRelations; /* relations to inherit from (list of + * inhRelation) */ + TypeName *ofTypename; /* OF typename */ + List *constraints; /* constraints (list of Constraint nodes) */ + List *options; /* options from WITH clause */ + List *clusterKeys; /* partial cluster key for table */ + OnCommitAction oncommit; /* what do we do at COMMIT? */ + char *tablespacename; /* table space to use, or NULL */ + bool if_not_exists; /* just do nothing if it already exists? */ + bool ivm; /* incremental view maintenance is used by materialized view */ + int8 row_compress; /* row compression flag */ + PartitionState *partTableState; /* the PartitionState */ +#ifdef PGXC + DistributeBy *distributeby; /* distribution to use, or NULL */ + PGXCSubCluster *subcluster; /* subcluster of table */ +#endif + + List *tableEltsDup; /* Used for cstore constraint check */ + char *internalData; /* Used for create table like */ + + List *uuids; /* list of uuid, used for create sequence(like 'create table t(a serial))' */ + Oid oldBucket; /* bucketoid of resizing table */ + List *oldNode; /* relfilenode of resizing table */ + List *oldToastNode; /* toastnode of resizing table */ + char relkind; /* type of object */ +} CreateStmt; +``` + +​ 结构中relation中包含了catalogname,schemaname,relname此时的relname就能够顺利的拿到。`DefineRelation`函数中用到的函数的功能和执行流程如下(参考了postgre数据库的执行流程): + +DefineRelation-> + +| Permission check | 进行权限检査,确定当前用户是否有权限创建表。 | +| ------------------------------ | ------------------------------------------------------------ | +| transformRelOptions() | 对表创建语句中的WITH子句进行解析 | +| heap_reloptions() | 调用heap_reloptions对参数进行合法性验证。 | +| MergeAttributes() | 使用MergeAttributes,将继承的属性合并到表属性定义中。 | +| BuildDescForRelation() | 调用BuildDescForRelation利用合并后的属性定义链表创建tupleDesc结构(这个结构用于描述元组各属性结构等信息)。 | +| interpretOidsOption() | 决定是否使用系统属性OID (interpretOidsOption)。 | +| CONSTR_DEFAULT or CONSTR_CHECK | 对属性定义链表中的每一个属性进行处理,査看是否有默认值、表达式或约束检査。 | +| heap_create_with_catalog() | 使用heap_create_with_catalog创建表的物理文件并在相应的系统表中注册。 | +| StoreCatalogInheritance() | 用StoreCataloglnheritance存储表的继承关系。 | +| AddRelationNewConstraints() | 处理表中新增的约束与默认值 | +| ObjectAddressSet() | | + +​ + +## 2. 页面组织和元组结构 + + astore的设计遵从段页式,存储结构以页面为单位,页面大小一般默认为8KB。 + +### 2.1 页面组织结构 + + + +​ `\src\include\storage\buf\bufpage.h` + +```c++ +typedef struct { + PageXLogRecPtr pd_lsn; /* 页面最新一次修改的日志lsn */ + uint16 pd_checksum; /* 页面CRC */ + uint16 pd_flags; /* 标志位 */ + LocationIndex pd_lower; /* 空闲位置开始出(距离页头) */ + LocationIndex pd_upper; /* 空闲位置结尾处(距离页头) */ + LocationIndex pd_special; /* 特殊位置起始处(距离页头) */ + uint16 pd_pagesize_version; + ShortTransactionId pd_prune_xid; + TransactionId pd_xid_base; + TransactionId pd_multi_base; + ItemIdData pd_linp[FLEXIBLE_ARRAY_MEMBER]; +} HeapPageHeaderData; +``` + +1. pd_lsn:该页面最后一次修改操作的预写日志结束位置的下一个字节,用于检查点推进和保持恢复操作的幂等性(幂等指对接口的多次调用所产生的结果和调用一次是一致的)。 +2. pd_checksum:页面的CRC校验值。 +3. pd_flags:页面标记位,用于保存各类页面相关的辅助信息,如页面是否有空闲的元组指针、页面是否已满、页面元组是否都可见、页面是否被压缩、页面是否是批量导入的、页面是否加密、页面采用的CRC校验算法等。 +4. pd_lower:页面中间空洞的起始位置,即当前已使用的元组指针数组的尾部。 +5. pd_upper:页面中间空洞的结束位置,即下一个可以插入元组的起始位置。 +6. pd_special:页面尾部特殊区域的起始位置。该特殊位置位于第一条元组记录和页面结尾之间,用于存储一些变长的页面级元信息,如采用的压缩算法信息、索引的辅助信息等。 +7. pd_pagesize_version:页面的大小和版本号。 +8. pd_prune_xid:页面清理辅助事务号(32位),通常为该页面内现存最老的删除或更新操作的事务号,用于判断是否要触发页面级空闲空间整理。实际使用的64位prune事务号由“pd_prune_xid”字段和“pd_xid_base”字段相加得到。 +9. pd_xid_base:该页面内所有元组的基准事务号(64位)。该页面所有元组实际生效的64位xmin/xmax事务号由“pd_xid_base”(64位)和元组头部的“t_xmin/t_xmax”字段(32位)相加得到。 +10. pd_multi_base:类似“pd_xid_base”字段,当对元组加锁时,会将持锁的事务号写入元组中,该64位事务号由“pd_multi_base”字段(64位)和元组头部的“t_xmax”字段(32位)相加得到。 +11. pd_linp:元组指针变长数组。 + +​ 页面头部分对应HeapPageHeaderData结构体。其中,pd_multi_base以及之前的部分对应定长成员,存储了整个页面的重要元信息;pd_multi_base之后的部分对应元组指针变长数组,其每个数组成员存储了页面中从后往前的、每个元组的起始偏移和元组长度。如图所示,真正的元组内容从页面尾部开始插入,向页面头部扩展;相应的,记录每条元组的元组指针从页面头定长成员之后插入,往页面尾部扩展;整个页面中间形成一个空洞,供后续插入的元组和元组指针使用。 +​ 对于一个一条具体元组,有一个全局唯一的逻辑地址,即元组头部的t_ctid,其由元组所在的页面号和页面内元组指针数组下标组成;该逻辑地址对应的物理地址,则由ctid和对应的元组指针成员共同给出。通过页面、对应元组指针数组成员、页面内偏移和元组长度的访问顺序,就可以完整获取到一条元组的完整内容。t_ctid结构体和元组指针结构体的定义代码如下。 + +​ `src\include\storage\item\itemid.h` + +```c++ +/* t_ctid结构体*/ +typedef struct ItemPointerData { + BlockIdData ip_blkid; /* 页号 */ + OffsetNumber ip_posid; /* 页面偏移,即对应的页内元组指针下标 */ +} ItemPointerData; +/* 页面内元组指针结构体 */ +typedef struct ItemIdData { + unsigned lp_off : 15, /* 元组起始位置(距离页头) */ + lp_flags : 2, /* 元组指针状态 */ + lp_len : 15; /* 元组长度 */ +} ItemIdData; +``` + +​ 如上两级的元组访问设计,主要有两个优点。 + +- 在索引结构中,只需要保存元组的t_ctid值即可,无须精确到具体字节偏移,从而降低了索引元组的大小(节约两个字节),提升索引查找效率; +- 将页面内元组的地址查找关系自封闭在页面内部的元组指针数组中,和外部索引解耦,从而在某些场景下可以让页面级空闲空间整理对外部索引数据没有影响,降低空闲空间回收的开销和设计复杂度。 + +### 2.2 元组数据部分结构 + + + +​ `src\include\access\htup.h` + +``` +typedef struct HeapTupleFields { + ShortTransactionId t_xmin; /* 插入元组事务的事务号 */ + ShortTransactionId t_xmax; /* 删除元组事务的事务号 */ + union { + CommandId t_cid; /* 插入或删除命令在事务中的命令号 */ + ShortTransactionId t_xvac; + } t_field3; +} HeapTupleFields; + +typedef struct HeapTupleHeaderData { + union { + HeapTupleFields t_heap; + DatumTupleFields t_datum; + } t_choice; + ItemPointerData t_ctid; /* 当前元组或更新后元组的行号 */ + uint16 t_infomask2; /* 字段个数和标记位 */ + uint16 t_infomask; /* 标记位 */ + uint8 t_hoff; /* 包括NULL字段位图、对齐填充在内的元组头部大小 */ + bits8 t_bits[FLEXIBLE_ARRAY_MEMBER]; /* NULL字段位图 */ + /* 实际元组数据再该元组头部结构体之后,距离元组头部处偏移t_hoff字节 */ +} HeapTupleHeaderData; +``` + +下面是元组头部结构体定义: + +1. 插入元组的事务号 +2. t_xmax,如果元组还没有被删除,那么为零。 +3. t_cid,插入或删除元组的命令号。 +4. t_ctid,当前元组的页面和页面内元组指针下标。如果该元组被更新,为更新后元组的页面号和页面内元组指针下标。 +5. t_hoff,元组数据距离元组头部结构体起始位置的偏移。 +6. t_bits,所有字段的NULL空值bitmap。每个字段对应t_bits中的一个bit位,因此是变长数组。 + +​ 数据结构中并没有出现存储元组实际数据的属性,这是因为通过编程技巧,巧妙的将数组的实际数据存放在heapTupleHeaderData结构后面的空间。 + +### 2.3 元组结构 + +​ 上述元组结构体在内存中使用时嵌入在一个更大的元组数据结构体中,该结构体的定义代码如下。除了保存元组内容的t_data成员之外,其他的成员保存了该元组的一些其他系统信息,这些信息构成了该元组剩余的一些系统字段内容: + +​ `src\include\access\htup.h` + +```c++ +typedef struct HeapTupleData { + uint32 t_len; /* 包括元组头部和数据在内的元组总大小 */ + ItemPointerData t_self; /* 元组行号 */ + Oid t_tableOid; /* 元组所属表的OID */ + TransactionId t_xid_base; + TransactionId t_multi_base; + HeapTupleHeader t_data; /* 指向元组头部 */ +} HeapTupleData; +``` + +​ HeapTupleData是元组在内存中的拷贝,它是磁盘格式的元组读入内存后的存在方式。 + +## 3. 元组的插入 + +​ 代码位于`src\gausskernel\storage\access\heap\heapam.cpp` + +插入元组之前,首先根据元组内数据和描述符等信息初始化HeapTuple结构,函数heap_form_tuple实现了这一功能,函数如下: + +```c++ +HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull) +``` + +​ 其中,values参数是将要插入的元组的各属性数组,isnull数组用于标识哪些属性为空值。heap_form_tuple根据values和isnull数组调用函数`tableam_tops_computedatasize_tuple`,计算形成元组所需要的内存大小,然后为元组分配足够的空间。 + + + + + +​ 在进行必要的头部设置后,调用函数`tableam_heap_fill_tuple`向元组中填充实际数据。 + +​ + +当完成元组在内存的构成后,下一步就可以准备向表中插入元组了,插入元组的接口为`tableam_heap_insert`,但最终真正起作用的是使用heap_insert函数向表中插入元组。heap_insert的作用流程如下: + + + +1. 首先我们会为新插入的元组(tup)调用newoid函数为其分配一个OID。 + +2. 初始化tup,包括设置t_xmin和t_cmin为当前事务ID和当前命令ID、将t_xmax置为无效、设置tableOid(包含此元组的表的OID)。 + +3. 找到属于该表且空闲空间(freespace)大于newtup的文件块,将其载入缓冲区以用来插入tup(调用函数RelationGetBufferForTuple)。 + +4. 有了插入的元组tup和存放元组的缓冲区后,就会调用RelationPutHeapTuple函数将新元组插入至选中的缓冲区。 + +5. 向事务日志(XLog)写入一条XLog。 + +6. 当完成上述过程后,将缓冲区解锁并释放,并返回插入元组的OID。 + + 流程如下: + + + +## 4. 元组的删除 + +​ `src\gausskernel\storage\access\heap\heapam.cpp` + +```c++ +TM_Result heap_delete(Relation relation, ItemPointer tid, CommandId cid, + Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool allow_delete_self) +``` + +​ 这是删除一个元组的详细流程,使用heap_delete完成,这是我看的有关postgre删除元组流程,在刚刚删除元组流程图上多出来一步,就是判断是否则正在被当前事务修改,如果是要将元组的ctid指向被修改后的元组物理位置,然后对缓冲区解锁释放, +​ 删除元组主要调用函数heap_delete来实现,其主要流程如下: + +1. 根据要删除的元组tid得到相关的缓冲区,并对其加排他锁。 + +2. 调用HeapTupleSatisfiesUpdate函数检查元组对当前事务的可见性。如果元组对当前事务是不可见的(HeapTupleSatisfiesUpdate函数返回HeapTupleInvisible),那么对缓冲区解锁并释放,再返回错误信息。 + +3. 如果元组正在被本事务修改(HeapTupleSatisfiesUpdate函数返回HeapTupleSelfUpdated)或已经修改(HeapTupleSatisfiesUpdate函数返回HeapTupleUpdated),则将元组的ctid字段指向被修改后的元组物理位置,并对缓冲区解锁、释放,再分别返回HeapTupleSelfUpdated和HeapTupleUpdated信息。 + + + +4. 如果元组正在被其他事务修改(HeapTupleSatisfiesUpdate函数返回HeapTupleBeingUpdated),那么将等待该事务结束再检测。如果事务可以修改(HeapTupleSatisfiesUpdate函数返回HeapTupleMayBeUpdated),那么heap_delete会继续向下执行。 + +5. 设置t_max为当前事务ID。到此为止该元组已经被标记删除,或者可以说该元组已经被删除了。 + +6. 记录XLog。 + +7. 如果此元组存在线外数据,即经过TOAST(过长字段存储技术)的数据,那么还需要将其TOAST表中对应的数据删除。调用函数toast_delete完成相关工作。 + +8. 如果是系统表元组,则发送无效消息。 + +9. 设置FSM表中该元组所处文件块的空闲空间值。 + + + + + +## 5. 多版本元组机制 + +​ `src\gausskernel\storage\access\heap\heapam.cpp` + +```c++ +TM_Result heap_update(Relation relation, Relation parentRelation, ItemPointer otid, HeapTuple newtup, + CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool allow_update_self) +``` + +​ 下面是对元组的修改,因为opengauss是进行更新元组是不是就地修改,所以会存在一个页面会存在元组的多个版本。 +多版本元组机制,即为同一条记录保留多个历史版本的物理元组以解决对同一条记录的读、写并发冲突(读事务和写事务工作在不同版本的物理元组上)。下面是举例插入一个元组之后,两次进行更新: + + + + + +1. 首先事务号为10的事务插入一条值为value1的新记录。对应的页面修改为:在0号物理页面的第一个元组指针指向位置,插入一条“xmin”字段为10、“xmax”字段为0、“ctid”字段为(0,1)、“data”字段为value1的物理元组。该事务提交,将CSN从3推进到4,并且在CSN日志中对应事务号10的槽位处记下该CSN的值。 +2. 然后事务号为12的事务将上面这条记录的值从value1修改为value2。对应的页面修改为:在0号物理页面的第二个元组指针指向位置,插入另一条“xmin”字段为12、“xmax”字段为0、“ctid”字段为(0,2)、“data”为value2的物理元组。同时保留上面第一条插入的物理元组,但是将其“xmax”字段从0修改为12,将其“ctid”字段修改为(0,2),即新版本元组的物理位置。该事务提交,将CSN从7推进到8,并且在CSN日志中对应事务号12的槽位处记下该CSN的值。 +3. 最后事务号为15的事务将上面这条记录的值从value2又修改为value3,对应的页面修改为:(假设0号页面已满)在1号物理页面的第一个元组指针指向位置,插入一条“xmin”字段为15、“xmax”字段为0、“ctid”字段为(1,1)、“data”字段为value3的物理元组;同时,保留上面第1、第2条插入的物理元组,但是将第2条物理元组的“xmax”字段从0修改为15,将其“ctid”字段修改为(1,1),即最新版本元组的物理位置。该事务提交,将CSN从9推进到10,并且在CSN日志中对应事务号15的槽位处记下该CSN的值。 +4. 对于并发的读事务,其在查询执行开始时,会获取当前的全局CSN值作为查询的快照CSN。对于上面同一条记录的3个版本的物理元组来说,该读查询操作只能看到同时满足如下两个条件的这个物理元组版本。 + 元组“xmin”字段对应的CSN值小于等于读查询的快照CSN。 + 元组“xmax”字段为0,或者元组“xmax”字段对应的CSN值大于读查询的快照CSN。 + +​ 那么如何知道应该读取哪一个版本的记录? 与CSN有关,CSN是一个步长为1的自增长的全局变量,在事务提交阶段,获取该值,每个非只读事务在运行过程中会取得一个xid号,在事务提交时会推进CSN,同时会将当前CSN与事务的xid映射关系保存起来(CSNLOG)。 +对于上面同一条记录的3个版本的物理元组来说,该读查询操作只能看到同时满足如下两个条件的这个物理元组版本。 + +- 元组“xmin”字段对应的CSN值小于等于读查询的快照CSN。 +- 元组“xmax”字段为0,或者元组“xmax”字段对应的CSN值大于读查询的快照CSN。 + +​ 并发的读事务会根据自己的查询快照在同一个记录的多个历史版本元组中选择合适的那个来返回。并且即使是在可重复读的事务隔离级别下,只要使用相同的快照总可以筛选出相同的那个历史版本元组。在整个过程中读事务不阻塞任何对该记录的并发写操作(更新和删除)。 +可重复读的事务隔离级别,可能发生幻读:一个事务(同一个read view)在前后两次查询同一范围的时候,后一次查询看到了前一次查询没有看到的行。 + +​ 如下是更新元组的详细流程: + + + diff --git a/content/zh/post/wangrui/PowerDesigner_for_openGauss.md b/content/zh/post/wangrui/PowerDesigner_for_openGauss.md new file mode 100644 index 0000000000000000000000000000000000000000..6ce0d62f7c195265fbd378e9d626990ec445cdf4 --- /dev/null +++ b/content/zh/post/wangrui/PowerDesigner_for_openGauss.md @@ -0,0 +1,92 @@ ++++ +title = "PowerDesigner使用JDBC连接openGauss指导" +date = "2021-03-16" +tags = ["PowerDesigner使用JDBC连接openGauss指导"] +archives = "2021-03-16" +author = "wangrui" +summary = "PowerDesigner使用JDBC连接openGauss指导" +img = "/zh/post/xingchen/title/img1.png" +times = "9:30" ++++ + + + +# PowerDesigner简介 + + +PowerDesigner是Sybase的企业建模和设计解决方案,采用模型驱动方法,将业务与IT结合起来,可帮助部署有效的企业体系架构,并为研发生命周期管理提供强大的分析与设计技术。 +在部分业务场景下,没有数据库PDM文件,数据库中表之间的各种关系无法直观地看清楚,此时可以使用PowerDesigner进行逆向工程,从已有的数据库生成PDM文件。 + + +# 环境准备 + + +(1)下载并安装PowerDesigner 16.6版本,进入{PowerDesigner_insatll_dir}/Resource Files/DBMS,导入opengauss.xdb文件; + +[openGauss配置文件下载](../images/opengauss.xdb) + +(2)输入cmd命令“java –version”确认本地JDK版本在1.5以上,低于1.5需要在系统环境变量中配置JAVA_HOME和CLASSPATH; + +(3)进入openGauss官网https://opengauss.org/ ,下载对应的JDBC版本到本地; + + +# PowerDesigner配置 + + +(1)打开PowerDesigner软件,“File”->“Reverse Engineer”->“Database…” +![](../images/image002.jpg) + + +(2)选择“General”页签,输入自定义Model name,在DBMS下拉框中选择数据库类型openGauss; +![](../images/image003.jpg) + + +(3)选择“Using a data source”,点击右侧按编辑数据库连接信息; +![](../images/image004.jpg) + + +(4)选择“Connection profile”,首次连接需点击“Configure”进行配置; +![](../images/image005.jpg) + + +(5)新增连接配置文件,配置项如下所示: +Connection profile name: 配置文件名称 +Directory: 配置文件本地保存路径 +Description:配置文件描述,可根据实际用途填写 +Connection type:连接方式,此处选择JDBC +DBMS type:数据库类型,提供大部分主流数据库选择,此处选择PostgreSQL +User name:登录数据库的用户名,此处可以不填 +JDBC driver class:指定驱动类,使用默认的org.postgresql.Driver +JDBC connection URL:连接URL,格式jdbc:postgresql://{host}:{port}/{database} +JDBC driver jar files:指定jar包路径,点击右侧选择下载的opengauss驱动 +![](../images/image006.jpg) + + +(6)点击左下方的“Test Connection”测试连接,注意需要在数据库服务端将客户端IP写入白名单,同时使用普通用户连接; +![](../images/image007.jpg) + + +(7)出现提示“Connection test successful”时,测试连接成功。 + + +# 常见问题 + + +(1)Non SQL Error : Could not load class com.postgresql.jdbc.Drive. +错误原因:无法找到JDBC驱动; +解决方法:可以在系统环境变量中配置CLASSPATH,将jar包路径写入,并重启PowerDesigner; +![](../images/image008.jpg) + + +(2)Fatal Error. Unable to initialize DatabaseMetaData class. +错误原因:本地JAVA环境错误; +解决方法:在任务管理器中查看PowerDesigner版本为32位,需要32位的JDK环境,重新安装32位JDK并重启PowerDesigner; +![](../images/image009.jpg) + + +(3)Connection to 90.90.52.60:5432 refused. Check that the hostname and port are correct and that the postmaster is accepting TCP/IP connections. +错误原因:数据库拒绝连接; +解决方法: +1、确认URL中IP和port是否填写正确; +2、确认数据库监听地址是否正确; +3、确认服务器防火墙是否关闭。 diff --git a/content/zh/post/wangrui/images/image001.jpg b/content/zh/post/wangrui/images/image001.jpg new file mode 100644 index 0000000000000000000000000000000000000000..90374efd2da76d9530654ae2b5313b82559c8481 Binary files /dev/null and b/content/zh/post/wangrui/images/image001.jpg differ diff --git a/content/zh/post/wangrui/images/image002.jpg b/content/zh/post/wangrui/images/image002.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b65e5852921076389dd025e2dadac26ec075b528 Binary files /dev/null and b/content/zh/post/wangrui/images/image002.jpg differ diff --git a/content/zh/post/wangrui/images/image003.jpg b/content/zh/post/wangrui/images/image003.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7bc845f3956e212a3c9db0bfeaad5614d5f48491 Binary files /dev/null and b/content/zh/post/wangrui/images/image003.jpg differ diff --git a/content/zh/post/wangrui/images/image004.jpg b/content/zh/post/wangrui/images/image004.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9777b98ff6348020236814ad7ae1145f8fdb47e0 Binary files /dev/null and b/content/zh/post/wangrui/images/image004.jpg differ diff --git a/content/zh/post/wangrui/images/image005.jpg b/content/zh/post/wangrui/images/image005.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04541677aefb03706ab36bd98977eae60c90d7af Binary files /dev/null and b/content/zh/post/wangrui/images/image005.jpg differ diff --git a/content/zh/post/wangrui/images/image006.jpg b/content/zh/post/wangrui/images/image006.jpg new file mode 100644 index 0000000000000000000000000000000000000000..057a3496b1adca88885373e1a7f534114e6d100b Binary files /dev/null and b/content/zh/post/wangrui/images/image006.jpg differ diff --git a/content/zh/post/wangrui/images/image007.jpg b/content/zh/post/wangrui/images/image007.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b957091aaabede1c6bb87d5245075283aa3de95e Binary files /dev/null and b/content/zh/post/wangrui/images/image007.jpg differ diff --git a/content/zh/post/wangrui/images/image008.jpg b/content/zh/post/wangrui/images/image008.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c07d89a420b6380566a96943514dadda9d745afd Binary files /dev/null and b/content/zh/post/wangrui/images/image008.jpg differ diff --git a/content/zh/post/wangrui/images/image009.jpg b/content/zh/post/wangrui/images/image009.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4b4dbf1369cb1e1a9ac8aaa63ef1238b0b3d3cef Binary files /dev/null and b/content/zh/post/wangrui/images/image009.jpg differ diff --git a/content/zh/post/wangrui/images/opengauss.xdb b/content/zh/post/wangrui/images/opengauss.xdb new file mode 100644 index 0000000000000000000000000000000000000000..92f1e498f80955601a0a5ee76f0d69441a98840e --- /dev/null +++ b/content/zh/post/wangrui/images/opengauss.xdb @@ -0,0 +1,4432 @@ + + + + + + + +83063C86-4E0A-4954-BF00-620C0D0F02D8 +openGauss 1.1.0 +PGSQL9 +0 + +1341906213 +mledier +ANSI +'****************************************************************************** +'* Purpose: This VB-Script holds global definitions shared by all the custom- +'* checks scripts of the model extension. +'****************************************************************************** + +Option Explicit ' This is to ensure all used variables are defined + + + + +F1A020C1-477C-4FE6-B187-7FC7D46FE484 +General +0 + +0 + +Target DBMS identification + + +EA3C706C-F552-4CC5-8A17-AB0A2D0F4BDE +SqlSupport +0 + +0 + +SQL syntax allowed. This does not impact the script generation, but it impacts the SQL Preview +1 + + +7868A7BA-E474-4E27-BDF3-1104AFABF7C0 +EnableCheck +0 + +0 + +Determines if the generation of check parameters is authorized or not +1 + + +714677D5-B508-4B87-AB65-AF6E66C5C151 +Enableconstname +0 + +0 + +Determines if constraint names are used during the generation +1 + + +AB1F411F-11C4-4EDD-8A76-7A79B81D042D +UniqConstName +0 + +0 + +Determines if unique constraint names for objects are authorized or not + + +68C5494A-C8AB-4FC7-A431-3EAF5B4C44D6 +EnableIntegrity +0 + +0 + +Allows integrity constraints in the DBMS. This controls whether generation parameters for primary, foreign, and alternate keys are grayed or available +1 + + +09CC7C62-0030-4BD4-BB4D-24373F684A76 +EnableMultiCheck +0 + +0 + +Determines if the generation of multiple check parameters is authorized or not +1 + + + + +CC013A88-7C22-4315-9C88-0B17EC54F8D0 +Script +0 + +0 + +DBMS characteristics, command definition, and data type translations for the Script generation and reverse engineering + + +5FD38E36-8DC0-4299-974F-F28B32CCF6C1 +Sql +0 + +0 + +Contains sub-categories Syntax, Format, File and Keywords. Each sub-category contains entries whose values define general syntax for the database + + +5C4F8EE0-FFDE-4901-A42E-08330F43BE4E +Syntax +0 + +0 + +Contains general parameters for SQL syntax + + +1207496B-6D26-4E86-B310-FC0B82E8E494 +Terminator +0 + +0 + +End of command character +; + + +30DA977E-0C12-41F8-A871-734B9C92752B +BlockTerminator +0 + +0 + +End of block character + + +E5E60CF7-3FDB-4F4A-ACFA-E61497BB94F8 +Delimiter +0 + +0 + +Field separation character. Example: col1, col2, col3 +, + + +02FCF451-A3F8-4443-854F-39B46B4E90D0 +Quote +0 + +0 + +Character used to enclose string values +' + + +53DCF79F-F968-4CFB-8081-100F8BE9598E +SqlContinue +0 + +0 + +Continuation character + + +941E4818-62E6-468B-AF8F-682C2B802D75 +LineComment +0 + +0 + +Characters used to enclose a single line comment +-- + + +EE966B5D-7C62-4EAA-837C-BEEC61F23DD8 +BlockComment +0 + +0 + +Characters used to enclose a multi-line comment +/* */ + + +7A611A4A-F0EF-4AF8-9A14-CACDA6F94A83 +UseBlockTerm +0 + +0 + +Use end of block character by default + + + + +B9BD47AF-E067-4888-91D0-369F4FC676FD +Format +0 + +0 + +Contains entries that define script formatting + + +BE0A4657-577C-40DA-823D-DFE9D65EA0FE +IllegalChar +0 + +0 + +Invalid characters for names +" +-*/!=<>'"()[]". + + +884832B1-A23F-4D99-9D32-3300C16BEFCD +UpperCaseOnly +0 + +0 + +Uppercase only + + +410153CC-039A-4BCC-A9DA-73A14758BB96 +LowerCaseOnly +0 + +0 + +Lowercase only + + +270DB985-97B4-4796-B3AC-8E008529FBC5 +EnableOwnerPrefix +0 + +0 + +Object codes can have a prefix made of the object owner code +1 + + + + +3025ED8F-2ED3-44A9-824D-5BCD6D6FD1B9 +File +0 + +0 + +Contains header, footer and usage text entries used during the generation + + +F615AB51-5D07-436F-8C4D-F43C8B7C5FDD +Header +0 + +0 + +Header text for a database generation script + + +71AC941F-F3B7-4278-BC51-DF34D6EF42CF +Footer +0 + +0 + +Footer text for a database generation script + + +F43D615E-3EBD-4C14-9199-5914CBEAA5DD +EnableMultiFile +0 + +0 + +Multi-script allowed +1 + + +FA22FD3F-AB4C-4DC2-86E9-10ED4592C386 +ScriptExt +0 + +0 + +Main script extension in database generation +sql + + +E532FCE4-F3B1-4CD0-BBD8-6006C09F4ACB +TableExt +0 + +0 + +Other scripts extension in database generation +tab + + +7AE42F8F-8544-4AB1-B396-D843BE10CE7E +StartCommand +0 + +0 + +Command for executing a script + + +8391BA96-63E7-43A6-B37C-7C65B9525CC9 +Usage1 + (1) Go to the directory %PATHSCRIPT% + (2) Start the SQL interpreter + (3) Run the database creation script: + %NAMESCRIPT% +0 + +0 + +Usage for a single script in database generation + + +29E46CF1-DB88-4860-ABD2-09D9933D3FC6 +Usage2 + (1) Go to the directory %PATHSCRIPT% + (2) Start the SQL interpreter + (3) Run the database creation scripts +0 + +0 + +Usage for multiple scripts in database generation + + + + +CF7B9B8A-86B0-42F1-883E-BB792C02DFE1 +Keywords +0 + +0 + +Contains the list of reserved words and functions available in SQL + + +C65EFCC0-604A-4E47-B059-EA77B400FCDE +ReservedDefault +CURRENT_USER +SESSION_USER +USER +CURRENT_DATE +CURRENT_TIME +CURRENT_TIMESTAMP +NULL +0 + +0 + +Reserved default values + + +B8C734A3-0407-4828-A3AF-9EC38A4A7084 +GroupFunc +avg() +count() +max() +min() +sum() +0 + +0 + +List of SQL functions to use with group keywords. + + +1997444A-6E38-441F-BB1A-01CFFAC96EC4 +NumberFunc +abs() +degrees() +exp() +ln() +log() +pi() +pow() +radians() +round() +sqrt() +cbrt() +trunc() +float() +float4() +integer() + +0 + +0 + +List of SQL functions used on numbers + + +08A5913D-0845-4676-B284-5E02B34DFC49 +CharFunc +char() +char_length() +character_length() +initcap() +lower() +lpad() +ltrim() +octet_length() +position() +rpad() +rtrim() +substr() +substring() +text() +textpos() +translate() +trim() +upper() +varchar() + +0 + +0 + +List of SQL functions for characters and strings + + +4E9B8DD2-C3C8-431A-83B1-84AEDB98B887 +DateFunc +abstime() +age() +date_part() +date_trunc() +interval() +isfinite() +reltime() +timestamp() + +0 + +0 + +List of SQL functions for dates + + +50363314-58C8-4960-BA9D-4F69F83815FB +ConvertFunc +to_char() +to_date() +to_timestamp() +to_number() +0 + +0 + +List of SQL functions used to convert values between hex and integer and handling strings + + +E0B49A16-9386-4F77-ABD8-B00A609C8F41 +OtherFunc +coalesce() +nullif() +area() +box() +center() +circle() +diameter() +height() +isclosed() +isoldpath() +isopen() +length() +lseg() +npoint() +path() +pclose() +point() +polygon() +popen() +radius() +revertpoly() +upgradepath() +upgradepoly() +width() +broadcast() +host() +masklen() +netmask() + +0 + +0 + +List of other SQL functions + + +8763140B-F7FB-4D02-9792-2F532B3C42DE +ListOperators +- +! +!! +# +% +& +* +*= +/ +@ +^ +| +|/ +|| +||/ +~ ++ +< +< all +< any +<< +<= +<= all +<= any +<> +<> all +<> any += += all += any +=* +> +> all +> any +>= +>= all +>= any +>> +and +between +exists +in +is +is not +like +not +not between +not exists +not in +not like +or +0 + +0 + +List of operators for comparing values, boolean, and various semantic operators + + +6B334218-ED13-4F55-93CC-9B462934CDCD +Commit +commit +0 + +0 + +Command for validating the transaction by OBDC + + +F9D5F8F9-A693-4FD9-897D-A61D7150C44E +ReservedWord +ALL +ANALYSE +ANALYZE +AND +ANY +AS +ASC +AUTHORIZATION +BETWEEN +BIGINT +BINARY +BIT +BOOLEAN +BOTH +CASE +CAST +CHAR +CHARACTER +CHECK +COALESCE +COLLATE +COLUMN +CONSTRAINT +CONVERT +CREATE +CROSS +CURRENT_DATE +CURRENT_TIME +CURRENT_TIMESTAMP +CURRENT_USER +DEC +DECIMAL +DEFAULT +DEFERRABLE +DESC +DISTINCT +DO +ELSE +END +EXCEPT +EXISTS +EXTRACT +FALSE +FLOAT +FOR +FOREIGN +FREEZE +FROM +FULL +GRANT +GROUP +HAVING +ILIKE +IN +INITIALLY +INNER +INT +INTEGER +INTERSECT +INTERVAL +INTO +IS +ISNULL +JOIN +LEADING +LEFT +LIKE +LIMIT +LOCALTIME +LOCALTIMESTAMP +NATURAL +NCHAR +NEW +NONE +NOT +NOTNULL +NULL +NULLIF +NUMERIC +OFF +OFFSET +OLD +ON +ONLY +OR +ORDER +OUTER +OVERLAPS +OVERLAY +PLACING +POSITION +PRIMARY +REAL +REFERENCES +RIGHT +ROW +SELECT +SESSION_USER +SETOF +SIMILAR +SMALLINT +SOME +SUBSTRING +TABLE +THEN +TIME +TIMESTAMP +TO +TRAILING +TREAT +TRIM +TRUE +UNION +UNIQUE +USER +USING +VARCHAR +VERBOSE +WHEN +WHERE +0 + +0 + +Reserved words + + + + + + +E16BAC33-A1FB-4D41-A754-3AB61E287A6A +Objects +0 + +0 + +Contains sub-categories for each type of object in the database, for example: Table, or Reference. Each sub-category contains entries whose values define database commands and object-related characteristics + + +5787687A-ED79-4140-AFDB-1BC438B4E1B2 +Table +TABL +0 + +0 + +The following system variables are available: + "TABLE" // generated code of the table + "TNAME" // name of the table + "TCODE" // code of the table + "TLABL" // comment of the table + "PKEYCOLUMNS" // list of primary key columns. Eg: A, B + "TABLDEFN" // complete body of the table definition. Contains definition of columns, checks and keys + + + +D25D7DD8-E18F-4E6C-8EDF-9ECED15275D7 +Maxlen +0 + +0 + +Maximum object code length +31 + + +4B9BC540-BD54-44B9-99E6-999BD9EBF5E3 +ConstName +CKT_%.U26:TABLE% +0 + +0 + +Constraint name template for check of table + + +F5049524-C037-4724-9519-F2C985F2BFD3 +Create +create [%Temporary% ]table [%QUALIFIER%]%TABLE% ( + %TABLDEFN% +) +[%OPTIONS%] +0 + +0 + +Command for creating a table. Example: create table %TABLE% + + +D3C15873-790A-4ACD-BC92-859B7C7C1F0E +Options +inherits : composite=yes, parenthesis=yes, separator=yes +{ + <parent_table> %s : multiple=yes +} +<special_columns> %s : list=with oids|without oids +on commit %s : list=preserve rows|delete rows|drop +tablespace %s : category=tablespace +0 + +0 + +Available options for creating a table + + +BCC29DEE-6F1F-45EE-B38B-41EFD1FFF636 +TableComment +comment on table [%QUALIFIER%]%TABLE% is +%.q:COMMENT% +0 + +1244729550 +mledier +Command for adding a table comment + + +71513402-ADFB-4CE2-A73C-56D3D8E26CC8 +Drop +drop table [%QUALIFIER%]%TABLE% +0 + +0 + +Command for dropping a table. Example: drop table %TABLE% + + +5A2D175A-FB8B-4182-A24B-3A863BEB2A49 +Rename +alter table [%QUALIFIER%]%OLDTABL% + rename to %NEWTABL% +0 + +1244729550 +mledier +Command for renaming a table + + +9E059BBF-C0DD-4A20-85E0-FEBA2D8A9435 +AlterTableHeader +0 + +0 + +Alter table header + + +080E3432-3F41-470A-B7A0-C83594DE3E52 +AlterTableFooter +0 + +0 + +Alter table footer + + +7524AF71-E973-4D57-98BF-86C82A19CF11 +DefineTableCheck +[constraint %CONSTNAME% ]check (%.A:CONSTRAINT%) +0 + +0 + +Allows to customize the script of table check constraints + + +DC6A6F24-AE59-494D-986E-7727698D9C48 +Enable +0 + +0 + +Table allowed +1 + + +8B726C93-8587-4D14-89DD-AB4744CF7677 +AddTableCheck +alter table [%QUALIFIER%]%TABLE% + add [constraint %CONSTNAME% ]check (%.A:CONSTRAINT%) +0 + +1244729550 +mledier +Allows to customize the script for modifying table constraints within an alter table statement + + +BA26AAF4-1E2B-40AF-BE30-F3B8F1BD6E95 +DropTableCheck +alter table [%QUALIFIER%]%TABLE% + drop constraint %CONSTNAME% +0 + +1244729550 +mledier +Command for dropping a table check in an alter table statement + + +B99A5700-27CC-45D8-B3C9-04218133C0D9 +SqlListQuery +{OWNER, TABLE, TABLE_TYPE, COMMENT} + +select + pg_get_userbyid(t.relowner), + t.relname, + case substring(t.relname from 1 for 3) + when 'pg_' then 'SYSTEM TABLE' else 'TABLE' + end::varchar, + obj_description(t.oid, 'pg_class') +from + pg_class t +where + t.relkind = 'r' +[ and pg_get_userbyid(t.relowner) = %.q:SCHEMA%] +order by 1, 2 +0 + +1341908013 +mledier +SQL query to list objects + + +81F8F744-2727-402B-BC1A-5B056B1E571E +AfterCreate +[%OWNER%?%SetOwnership%] +0 + +1257761737 +mledier +Commands executed after create statement + + +D46478F3-F5A2-45CA-811E-BCE8CF3780F0 +SqlChckQuery +{OWNER, TABLE, CONSTNAME, CONSTRAINT} + +select + pg_get_userbyid(t.relowner), + t.relname, + k.conname, + k.consrc +from + pg_constraint k + join pg_class t on (t.oid = k.conrelid) +where + k.contype = 'c' + and k.conrelid != 0 +[ and pg_get_userbyid(t.relowner) = %.q:OWNER%] +[ and t.relname = %.q:TABLE%] + and array_dims(k.conkey) != '[1:1]' -- a table constraint applies on more than one column ! + +0 + +1341908522 +mledier +SQL query to reverse object check constraints + + +3B8C614F-A6CB-47E9-A1C9-4AD0464951C1 +SetOwnership +.ifnot(%Owner.Schema%) +[-- set table ownership +]alter table [%QUALIFIER%]%TABLE% owner to [%R%?%NEWOWNER%:%OWNER%] +; +.endif() +0 + +1257761811 +mledier +1 + + +070CDD2B-026F-46B2-B53A-F06EB48A65C4 +ReversedStatements +SetOwnership +0 + +0 + +Additional statements to be reversed by script + + +DCBEC306-5F49-4294-B8B8-CFA0610D68B7 +SqlOptsQuery +{OWNER, TABLE, OPTIONS} + +select + pg_get_userbyid(t.relowner), + t.relname, + case (t.relhasoids) when '1' then 'with oids' else 'without oids' end + ||case when (t.reltablespace = 0) then '' else ' tablespace ' || (select s.spcname from pg_tablespace s where s.oid = t.reltablespace) end as coln +from + pg_class t +where + t.relkind = 'r' +[ and pg_get_userbyid(t.relowner) = %.q:SCHEMA%] +order by 1, 2 +0 + +1341908522 +mledier +SQL query to reverse object physical options + + + + +D469C1EC-3699-4486-B3C1-22A547A4D775 +Index +INDX +0 + +0 + +The following system variables are available: +(parent table items are also available for indexes) + "INDEX" // generated code of the index + "INDEXNAME" // index name + "INDEXCODE" // index code + "UNIQUE" // keyword "unique" when the index is unique + "INDEXTYPE" // index type (available only for a few DBMS) + "INDEXKEY" // keywords "primary", "unique" or "foreign" depending on the index origin + "CIDXLIST" // list of index columns. Eg: A asc, B desc, C asc + "CLUSTER" // keyword "cluster" when the index is cluster +For index columns, the following system variables are available: + "ASC" // keywords "ASC" or "DESC" depending on sort order + "ISASC" // TRUE if the index column sort is ascending + + + +6A266B2C-8C89-4333-B581-01FE4683BD49 +Maxlen +0 + +0 + +Maximum object code length +31 + + +158351C4-310E-4E60-AD98-A8AE9C963ABF +MaxColIndex +0 + +0 + +Maximum number of columns in an index +16 + + +A6311922-FFDD-48A8-BFF1-99B7FA752D4D +Enable +0 + +0 + +Index allowed +1 + + +63D26606-B484-4A2C-AC86-C366BF8E2124 +EnableAscDesc +0 + +0 + +ASC, DESC keywords allowed + + +E67A82C2-24E0-4C5E-AB7C-CAEE31E7DDCF +UniqName +0 + +0 + +Unique index name in the database +1 + + +C26CDC7E-654B-40BD-AB1E-A55CD2DF68D3 +Create +create [%UNIQUE%] index %INDEX% on %TABLE%[ using %INDEXTYPE%] ( +%CIDXLIST% +) +[%OPTIONS%] +0 + +0 + +Command for creating an index. Example: create index %INDEX% + + +AA8102D0-D4B0-4812-B4F4-25318309F35B +AddColIndex +%COLUMN% +0 + +0 + +Command for defining an index column + + +A496C837-C471-4255-A9A1-E7ADA47099A2 +Options +tablespace %s : category=tablespace +where %s +0 + +0 + +Default options for creating an index + + +89E10A37-4899-40B7-BDEF-EF62CC4EEA5A +Drop +drop index %INDEX% +0 + +0 + +Command for dropping an index. Example: drop index %INDEX% + + +A3892D84-13EC-4B8D-88D6-C7F04A4F2FD7 +IndexType +BTREE +RTREE +HASH +0 + +0 + +List of types available for an index + + +19D48614-2538-48BD-80E4-E9E932CEEDE5 +SqlListQuery +{OWNER, TABLE, INDEX, UNIQUE, INDEXTYPE, CIDXLIST} + +select + pg_get_userbyid(t.relowner), + t.relname, + i.relname, + case when (x.indisunique) then 'unique' else '' end, + (select c.amname from pg_am c where c.oid = i.relam), + pg_get_indexdef(x.indexrelid, 1, True) + || case when (x.indnatts > 1) then ', ' || pg_get_indexdef(x.indexrelid, 2, True) else '' end + || case when (x.indnatts > 2) then ', ' || pg_get_indexdef(x.indexrelid, 3, True) else '' end + || case when (x.indnatts > 3) then ', ' || pg_get_indexdef(x.indexrelid, 4, True) else '' end + || case when (x.indnatts > 4) then ', ' || pg_get_indexdef(x.indexrelid, 5, True) else '' end + || case when (x.indnatts > 5) then ', ' || pg_get_indexdef(x.indexrelid, 6, True) else '' end + || case when (x.indnatts > 6) then ', ' || pg_get_indexdef(x.indexrelid, 7, True) else '' end + || case when (x.indnatts > 7) then ', ' || pg_get_indexdef(x.indexrelid, 8, True) else '' end as coln +from + pg_class t + join pg_index x on (x.indrelid = t.oid and x.indisprimary is false) + join pg_class i on (i.oid = x.indexrelid) +where 1=1 +[ and pg_get_userbyid(t.relowner)=%.q:OWNER%] +[ and t.relname=%.q:TABLE%] +order by 1, 2, 3 + +0 + +1341908522 +mledier +SQL query to list objects + + +690BA164-502C-4CD7-87D2-9D103EC03A9D +EnableFunction +0 + +0 + +Function-based indexes allowed +1 + + +F6066718-C8EF-4569-9A40-03BC0A51237D +SqlOptsQuery +{OWNER, TABLE, INDEX, OPTIONS} + +select + pg_get_userbyid(t.relowner), + t.relname, + i.relname, + case when (i.reltablespace = 0) then '' else 'tablespace ' || (select s.spcname from pg_tablespace s where s.oid = i.reltablespace) end + ||case when (x.indpred is null) then '' else ' where ' || pg_get_expr(x.indpred, t.oid, false) end as coln +from + pg_class t + join pg_index x on (x.indrelid = t.oid and x.indisprimary is false) + join pg_class i on (i.oid = x.indexrelid) +where (i.reltablespace != 0) or (x.indpred is not null) +[ and pg_get_userbyid(t.relowner) = %.q:OWNER%] +[ and t.relname = %.q:TABLE%] +[ and i.relname = %.q:INDEX%] +0 + +1341908522 +mledier +SQL query to reverse object physical options + + + + +12961F66-AA6A-46EF-B73E-0429D5753729 +Sequence +SQNC +0 + +0 + +The following system variables are available: + "SQNC" // generated code of the sequence + + + +2590B828-ED12-472D-BD57-6C94ACB0018D +Enable +0 + +0 + +Sequence allowed +1 + + +E8D096B7-6265-4F0A-827E-AA7D09CD2D75 +Create +create sequence %SQNC% +[%OPTIONS%] +0 + +0 + +Command to create a sequence + + +04BD7462-001C-4E2A-9549-28DB02D36F04 +Drop +drop sequence %SQNC% +0 + +0 + +Command to drop a sequence + + +15DD78FF-1E58-44E3-8F0F-3B3B308D4E0F +Options +increment %d +minvalue %d +maxvalue %d +start %d +cache %d +cycle + +0 + +0 + +Options for creating a sequence + + +E063B299-96AF-4761-8BAF-F0C68AEBF420 +DefOptions +0 + +0 + +Default values for sequence options + + +1BE49214-419D-45F0-A7C4-5317B5191708 +SqlListQuery +{SQNC} + +select + s.relname +from + pg_class s +where + s.relkind = 'S' +order by 1 +0 + +0 + +SQL query to list objects + + +089823CE-5C3C-4E6C-8ADC-853E7EFEB8A5 +SqlAttrQuery +{SQNC, OPTIONS} + +select + s.sequence_name, + ('increment ' || (increment_by::varchar))::varchar +from + [%QUALIFIER%]%SQNC% s + +0 + +0 + +SQL query to reverse object attributes + + +780D038C-ECD5-4745-8A4A-101DB3C07F68 +Rename +rename %OLDNAME% to %NEWNAME% +0 + +0 + +Command for renaming a sequence + + + + +FF3844ED-6B0B-40E6-937B-917D5CB424A8 +Column +COLN +0 + +0 + +The following system variables are available: +(parent table items are also available for columns) + "COLUMN" // generated code of the column + "COLNNO" // position of the column in the list of columns of the table + "COLNNAME" // name of the column + "COLNCODE" // code of the column + "PRIMARY" // keyword "primary" if the column is primary + "ISPKEY" // TRUE if the column is part of the primary key + "FOREIGN" // TRUE if the column is part of one foreign key + + + +828D9749-94BF-484F-A06C-5C7B1F960903 +Maxlen +0 + +0 + +Maximum object code length +31 + + +CB82A9E9-FDD3-49DA-BA2F-8DFF3D31F8B8 +EnableDefault +0 + +0 + +Default values allowed +1 + + +09695073-87B8-48F8-98FF-93E405631B92 +ConstName +CKC_%.U17:COLUMN%_%.U8:TABLE% +0 + +0 + +Constraint name template for a column check parameter + + +8AC0DE6C-5EC7-4F0D-A25F-9446CC0E9101 +Add +%20:COLUMN% [%IDENTITY%?SERIAL:%20:DATATYPE%][.Z:[ %NULL%][ %NOTNULL%][ default %DEFAULT%] + [%CONSTDEFN%]] +0 + +0 + +Command for defining a table column + + +1A0A97C8-7902-4E80-85DE-03EB9F1C78B9 +ColumnComment +comment on column %TABLE%.%COLUMN% is +%.q:COMMENT% + +0 + +0 + +Command for adding a column comment + + +2F9C3E97-9D17-4AA8-A23F-B766F9C7EE8E +Rename +alter table %TABLE% + rename %OLDCOLN% to %NEWCOLN% +0 + +0 + +Command for renaming a column + + +6484FA2C-0C2D-4297-B18B-C4CE8E6E33DB +Create +alter table %TABLE% + add[ column] %COLUMN% %DATATYPE%[%DEFAULT%? default %DEFAULT%] +0 + +1370501484 +I063968 +Command for adding a column + + +69CDADE9-BDB7-45A9-9992-0DA3A03A3A37 +Drop +0 + +0 + +Command for dropping a column +Not yet supported. Should be +alter table %TABLE% drop[ column] %COLUMN% [restrict | cascade] + + + +827206FF-8FAC-4D38-A0A3-BEA2E85EBDCC +Enable +0 + +0 + +Column allowed +1 + + +A0B859F4-3181-4865-9A5B-80B41DA085CA +ModifyColnDflt +alter table %TABLE% + alter[ column] %COLUMN% [%DEFAULT%?set default %DEFAULT%:drop default] +0 + +0 + +Command for modifying a column default in an alter table statement + + +B18B751A-E445-443A-B4C6-99B933BFD22A +EnableIdentity +0 + +0 + +Identity keyword entry support. Identity columns are serial counters maintains by the database +1 + + +73CB8FE3-BCD6-4A0A-9A77-3370E9690128 +SqlListQuery +{OWNER, TABLE, COLUMN, DTTPCODE, LENGTH, PREC, ISMAND, DEFAULT, COMMENT} + +select + pg_get_userbyid(t.relowner), + t.relname, + c.attname, + case(c.attndims) when 0 then d.typname else (select x.typname from pg_type x where x.oid = d.typelem) || '[]' end, + case(c.atttypmod >> 16) when 0 then (c.atttypmod - ((c.atttypmod >> 16)<<16) - 4) else (c.atttypmod >> 16) end, + case(c.atttypmod >> 16) when 0 then 0 else (c.atttypmod - 65536 * (c.atttypmod / 65536) - 4) end, + CAST(NULLIF(c.attnotnull,false) as VARCHAR(10)), + case(c.atthasdef) + when true then (select adsrc::varchar from pg_attrdef f where f.adrelid = t.oid and f.adnum = c.attnum)::varchar + else '' end::varchar as coln, + col_description(t.oid, c.attnum) +from + pg_class t + join pg_attribute c on (c.attrelid = t.oid and c.attnum > 0 and c.attisdropped is false) + join pg_type d on (d.oid = c.atttypid) +where t.relkind = 'r' +[ and pg_get_userbyid(t.relowner) = %.q:OWNER%] +[ and t.relname = %.q:TABLE%] +order by 1, 2, c.attnum +0 + +1614343383 +w00484080 +SQL query to list objects + + +32224D8A-DF82-45F5-B446-39F2272E16EA +SqlChckQuery +{OWNER, TABLE, COLUMN, CONSTNAME, CONSTRAINT} + +select + pg_get_userbyid(t.relowner), t.relname, c.attname, k.conname, k.consrc +from + pg_constraint k + join pg_class t on (t.oid = k.conrelid) + join pg_attribute c on (c.attrelid = t.oid and c.attnum > 0 and c.attnum = k.conkey[1]) +where + k.contype = 'c' + and k.conrelid != 0 + and array_dims(k.conkey) = '[1:1]' -- a column constraint applies on only one column ! +[ and pg_get_userbyid(t.relowner) = %.q:OWNER%] +[ and t.relname = %.q:TABLE%] +[ and c.attname = %.q:COLUMN%] +0 + +1341909104 +mledier +SQL query to reverse object check constraints + + +A7FBC232-1BEB-4789-8E22-0148B2ABA21E +DefineColnChck +[%R%?[[foreign key (%S%) ]references %S%[(%S%)][ with no index]][%ISPKEY%?[constraint %PKNAME% ]primary key ][%ISAKEY%?[constraint %AKNAME% ]unique ]][[constraint %CONSTNAME%] check (%.A:CONSTRAINT%)] + +0 + +0 + +Allows to customize the script of column check constraints + + + + +4F5CF591-46F1-489C-BB52-8456FC8F6387 +MaxConstLen +0 + +0 + +Maximum constraint name length +31 + + +162F26E6-8FF1-42AD-B4E8-F671074E05DF +EnableOption +0 + +0 + +Physical options allowed YES/NO +1 + + +00D9D8AE-E5D1-456C-9E00-6413C2F27450 +Reference +REFR +0 + +0 + +The following system variables are available: + "REFR" // generated code of the reference + "REFNAME" // reference name + "PARENT" // generated code of the parent table + "PNAME" // name of the parent table + "PCODE" // code of the parent table + "CHILD" // generated code of the reference + "CNAME" // name of the child table + "CCODE" // code of the child table + "PQUALIFIER" // qualifier of the parent table. See QUALIFIER + "CQUALIFIER" // qualifier of the child table. See QUALIFIER + "REFRNAME" // Reference name + "REFRCODE" // Reference code + "FKCONSTRAINT" // Reference constraint name (foreign key) + "PKCONSTRAINT" // constraint name of the parent key used to reference object + "CKEYCOLUMNS" // list of parent key columns. Eg: C1, C2, C3 + "FKEYCOLUMNS" // list of child foreign key columns. Eg: + "UPDCONST" // Update declarative constraint. Keywords "restrict", "cascade", "set null" or "set default" + "DELCONST" // Delete declarative constraint. Keywords "restrict", "cascade", "set null" or "set default" + "MINCARD" // Min cardinality + "MAXCARD" // Max cardinality + "POWNER" // Parent table owner + "COWNER" // child table owner + "CHCKONCMMT" // TRUE when check on commit is selected on the reference (ASA 6.0 specific) + +For reference joins (couple of column in a reference), +the following system variables are available: + "CKEYCOLUMN" // generated code of the parent table column (primary key) + "FKEYCOLUMN" // generated code of the child table column (foreign key) + "PK" // primary key column generated code + "PKNAME" // primary key column name + "FK" // foreign key column generated code + "FKNAME" // foreign key column name + "AK" // alternate key column code (same as PK) + "AKNAME" // alternate key column name (same as PKNAME) + "COLTYPE" // primary column column datatype + "DEFAULT" // foreign key column default value + + + +EB1D6CAE-4F39-40A9-A085-7E520D794CC3 +Enable +0 + +0 + +Foreign key allowed +1 + + +9CFA076E-382C-4E94-BD87-C8B9A0279178 +FKAutoIndex +0 + +0 + +Foreign key is auto-indexed + + +2C3A75D2-AF8F-4B73-8811-088384CF5768 +ConstName +FK_%.U8:CHILD%_%.U9:REFR%_%.U8:PARENT% +0 + +0 + +Constraint name template for foreign keys + + +EC2AEA60-0853-4A0D-A257-CF973D2C1422 +CheckOnCommit +0 + +0 + +Referential integrity test differed after the COMMIT + + +D2D10DAE-E158-45A1-A6C6-494BE005B975 +Add +[constraint %CONSTNAME% ]foreign key (%FKEYCOLUMNS%) + references %PARENT%[ (%CKEYCOLUMNS%)] + [ on delete %DELCONST%][ on update %UPDCONST%] +0 + +0 + +Command for defining a foreign key + + +78ED96D5-7585-4E89-91E2-BAA53B8B5A28 +DclUpdIntegrity +RESTRICT +CASCADE +SET NULL +SET DEFAULT +NONE +0 + +0 + +Declarative referential integrity constraint allowed for update + + +317E3894-CA93-4320-8BDD-0B71E3C76394 +DclDelIntegrity +RESTRICT +CASCADE +SET NULL +SET DEFAULT +NONE +0 + +0 + +Declarative referential integrity constraint allowed for delete + + +D60B0ACA-00FB-4AB4-809C-38AD4FEADB2D +Create +alter table %TABLE% + add [constraint %CONSTNAME% ]foreign key (%FKEYCOLUMNS%) + references %PARENT%[ (%CKEYCOLUMNS%)] + [ on delete %DELCONST%][ on update %UPDCONST%][%Deferrable%? deferrable[%ForeignKeyConstraintDeferred%? initially deferred: initially immediate]] + +0 + +0 + +Command for adding a foreign key + + +CCBCF292-6A8A-496A-AB10-81DC9B1D60A0 +SqlListQuery +{POWNER, PARENT, COWNER, CHILD, CONSTNAME, Deferrable, ForeignKeyConstraintDeferred, UPDCONST, DELCONST, FKEYCOLUMNS, CKEYCOLUMNS} + +select + pg_get_userbyid(p.relowner), + p.relname, + pg_get_userbyid(c.relowner), + c.relname, + k.conname, + case when (k.condeferrable) then 'true' else 'false' end, + case when (k.condeferred) then 'true' else 'false' end, + case(k.confupdtype) when 'r' then 'restrict' when 'c' then 'cascade' when 'n' then 'set null' when 'd' then 'set default' else 'no action' end::varchar as colnA, + case(k.confdeltype) when 'r' then 'restrict' when 'c' then 'cascade' when 'n' then 'set null' when 'd' then 'set default' else 'no action' end::varchar as colnB, + substring(substring(pg_get_constraintdef(k.oid) from 'FOREIGN KEY (.*) REFERENCES') from 2 for char_length(substring(pg_get_constraintdef(k.oid) from 'FOREIGN KEY (.*) REFERENCES')) - 2) as colnC, + substring(substring(pg_get_constraintdef(k.oid) from 'REFERENCES '||p.relname||'(.*)') from 2 for position(')' in substring(pg_get_constraintdef(k.oid) from 'REFERENCES '||p.relname||'(.*)') ) - 2) as colnD +from + pg_constraint k + join pg_class p on (p.oid = k.confrelid) + join pg_class c on (c.oid = k.conrelid) +where + k.contype = 'f' +[ and pg_get_userbyid(c.relowner) = %.q:OWNER%] +[ and c.relname=%.q:TABLE%] +order by 1, 2, 3, 4, 5 +0 + +1341908522 +mledier +SQL query to list objects + + + + +394D6E77-60DA-41C7-89D4-FB9783581615 +PKey +PKEY +0 + +0 + +The following system variables are available: +(parent table items are also available for keys) + "PKEYCOLUMNS" // list of primary key columns. Eg: A, B + "ISPKEY" // TRUE when the key is the primary key of the table + "KEY" // constraint name + "PKEY" // constraint name for primary key + "AKEY" // constraint name for alternate key + "ISMULTICOLN" // TRUE if key has more than one column + "CLUSTER" // keyword cluster + + +FD4344E9-E9CC-4B7C-9BB0-4E4EED0CEE72 +Enable +0 + +0 + +Primary key allowed +1 + + +35F0CA00-8A94-4D42-A911-F9BCFF8C206D +PkAutoIndex +0 + +0 + +Primary key is auto-indexed + + +9E64641C-FC08-4E73-9433-D69D614EC61D +ConstName +PK_%.U27:TABLE% +0 + +0 + +Constraint name template for primary keys + + +7F2E72C1-2E4C-4159-8942-AAFA742A4131 +Add +[constraint %CONSTNAME% ]primary key (%PKEYCOLUMNS%) +0 + +0 + +Command for defining a primary key + + +303ED984-E4BB-4FAA-BF86-95DBBBCCF697 +Create +alter table %TABLE% + add [constraint %CONSTNAME% ]primary key (%PKEYCOLUMNS%) + +0 + +0 + +Command for adding a primary key + + +C02EC114-6CBB-45B1-B4B5-6B7E3CEBED89 +Options +using index tablespace %s +0 + +0 + +Available options for creating a primary key + + +6CBAA878-3E66-4E06-B35F-9BCA1250C13A +Drop +alter table %TABLE% + drop constraint %CONSTNAME% +0 + +0 + +Command for dropping a primary key + + + + +5CA1DDBD-771A-4DEE-99A2-325F82A15288 +Key +KEY +0 + +0 + +The following system variables are available: +(parent table items are also available for keys) + "COLUMNS" // List of columns of the key. Eg: "A, B, C" + "ISPKEY" // TRUE when the key is the primary key of the table + "KEY" // constraint name + "PKEY" // constraint name for primary key + "AKEY" // constraint name for alternate key + "ISMULTICOLN" // TRUE if key has more than one column + "CLUSTER" // keyword cluster + + + +B6AA8161-11E3-42C4-AD8C-5B8E0804508A +Enable +0 + +0 + +UNIQUE constraint allowed for tables +1 + + +29F6C46C-6412-4DE9-97FC-6BF9ED12C4F6 +UniqConstAutoIndex +0 + +0 + +UNIQUE constraint is auto-indexed +1 + + +FE710D18-9664-4853-860B-089C2AC56D43 +ConstName +AK_%.U18:AKEY%_%.U8:TABLE% +0 + +0 + +Constraint name template for alternate keys + + +3E8D0AD3-B254-4556-9885-B6D603C45759 +Add +[constraint %CONSTNAME% ]unique (%COLUMNS%) +0 + +0 + +Command for defining an alternate key + + +B946017B-7EB2-4E25-8864-27BFB6774ED0 +Create +alter table %TABLE% + add [constraint %CONSTNAME% ]unique (%COLUMNS%) +0 + +0 + +Command for adding an alternate key + + +BDE692C1-B51D-48EB-81FB-0BFCF3B49572 +Options +using index tablespace %s + +0 + +0 + +Available options for an alternate key + + +619ECBA7-A1D2-4EE3-B8C9-F811E7CC0290 +SqlListQuery +{OWNER ID, TABLE ID, CONSTNAME ID, ISPKEY ID, COLUMNS ...} + +select + pg_get_userbyid(t.relowner), + t.relname, + i.relname, + CAST(NULLIF(x.indisprimary,false) as VARCHAR(10)), + a.attname || ','::varchar +from + pg_index x + join pg_class t on (t.oid = x.indrelid and t.relkind = 'r') + join pg_class i on (i.oid = x.indexrelid) + join pg_attribute a on (a.attrelid = i.oid) +where x.indisprimary is true +[ and pg_get_userbyid(t.relowner)=%.q:OWNER%] +[ and t.relname=%.q:TABLE%] +order by 1, 2, 3, a.attnum + +0 + +1614343383 +w00484080 +SQL query to list objects + + +C32D12FB-648F-4281-B1D7-0BE9540242C9 +Drop +alter table %TABLE% + drop constraint %CONSTNAME% +0 + +0 + +Command for dropping an alternate key + + + + +5C93D4F0-AD9E-4ACC-90D2-F4A90002BE81 +Database +DTBS +0 + +0 + +The following system variables are available: + "DATABASE" // generated code of the database + + + +D00C6FC9-2B52-4462-B5B2-C46605197C05 +Create +create database %DATABASE% +[ [%R%?with ][.Z:[ template[ =] %Template%][ encoding[ =] %.q:Encoding%][ tablespace[ =] %OPTIONS%]]] +0 + +0 + +Command for creating a database. Example: create database %DATABASE% + + +1347A7DC-CC8D-4365-B8B7-AC815C936A87 +OpenDatabase +0 + +0 + +Command for opening a database. Example: open database %DATABASE% + + +883CDA80-551C-43E7-A0FA-19468A55E254 +CloseDatabase +0 + +0 + +Command for closing a database. Example: close database + + +527489B6-A8CA-491E-82A9-FB300D10904D +Drop +drop database %DATABASE% +0 + +0 + +Command for dropping a database. Example: drop database %DATABASE% + + +6271B61C-5571-4923-AD35-50AE6044E279 +Enable +0 + +0 + +Database allowed +1 + + +7B94D113-E137-4FF2-B383-A8FB0E129E1D +EnableManyDatabases +0 + +0 + +Many databases allowed +1 + + +43028201-52E6-4DC1-B48D-EBECCDC7EE94 +Options +<tablespace> %s : category=TABLESPACE +0 + +0 + +Available options for creating a database + + +E616EC6E-2767-4410-BC15-764AA6B67647 +SqlListQuery +{DATABASE, COMMENT, Encoding, OPTIONS} + +select + d.datname, + obj_description(d.oid, 'pg_database'), + pg_encoding_to_char(d.encoding), + (select t.spcname from pg_tablespace t where t.oid = d.dattablespace) +from + pg_database d +order by 1 +0 + +0 + +SQL query to list objects + + + + +6D6DC36E-2660-47B5-9731-E97BB1AFC19C +View +VIEW +0 + +0 + +The following system variables are available: + "VIEW" // generated code of the view + "VIEWNAME" // view name + "VIEWCODE" // view code + "VIEWCOLN" // List of columns of the view. Eg: "A, B, C" + "SQL" // SQL text of the view. Eg: Select * from T1 + "VIEWCHECK" // Keyword "with check option" if selected on the view + "SCRIPT" // complete view creation order. Eg: create view V1 as select * from T1 + + + +60C4C986-06CB-4FB0-80C1-7444D0E2DD0F +Create +create[ or replace] view %VIEW%[ (%VIEWCOLN%)] as +%SQL% +0 + +0 + +Command for creating a view. Example: create view %VIEW% + + +D7F80660-A729-463E-8715-6602F92DAA4A +Drop +drop view %VIEW% +0 + +0 + +Command for dropping a view. Example: drop view %VIEW% + + +A2DF1F7F-1FE8-4053-898C-741F30BD0E32 +ViewComment +comment on view %VIEW% is +%.q:COMMENT% + +0 + +0 + +Command for adding a view comment. + + +A65C3C8D-4FF8-4BAC-B525-7B7C0A85BA0E +SqlListQuery +{OWNER, VIEW} + +select + pg_get_userbyid(t.relowner), + t.relname +from + pg_class t +where t.relkind = 'v' +[ and pg_get_userbyid(t.relowner) = %.q:SCHEMA%] +order by 1, 2 +0 + +1341908522 +mledier +SQL query to list objects + + +9E32900C-BF73-416C-8A2B-EF1770CB4D13 +Enable +0 + +0 + +View allowed +1 + + +FAED4860-650D-4088-B0E6-574FD05B32A5 +SqlAttrQuery +{OWNER, VIEW, SQL} + +select + pg_get_userbyid(v.relowner), + v.relname, + pg_get_viewdef(v.oid) +from + pg_class v +where v.relkind = 'v' +[ and pg_get_userbyid(v.relowner) = %.q:OWNER%] +[ and v.relname = %.q:VIEW%] + +0 + +1341908522 +mledier +SQL query to reverse object attributes + + +7B673B74-71BD-423E-80F6-670FFCDA0C25 +SetOwnership +[-- set view ownership +]alter table %VIEW% owner to %OWNER% +; +0 + +0 + +1 + + +4AD2B5CD-4B14-41DE-AA40-2A9977165E28 +AfterCreate +[%OWNER%?%SetOwnership%] +0 + +0 + +Commands executed after create statement + + +B5AABC8F-D6D9-49DE-8200-FC6813E9259F +ReversedStatements +SetOwnership +0 + +0 + +Additional statements to be reversed by script + + + + +315292D9-F98A-4EE5-B761-6515CB98B37C +Domain +PDMDOMN +0 + +0 + +The following system variables are available: + "DOMAIN" // generated code of the domain (also available for columns) +SQL Server specific domain system variables: + "RULENAME" // name of the rule object associated with the domain + "DEFAULTNAME" // name of the default object associated with the domain + + + +609E7A72-CC2A-4D04-A138-92795CBEACC9 +Create +[%Stereotype%=CompositeType?create type [%QUALIFIER%]%DOMAIN% as (%CompositeDefinition%):[%Stereotype%=BaseType?create type [%QUALIFIER%]%DOMAIN% ([.Z:[ + input=%ExtTypeInput%,][ + output=%ExtTypeOutput%,][ + internallength=%ExtTypeLength%][, + default=%DEFAULT%][, + element=%ExtTypeElement%][, + delimiter=%.q:ExtTypeDelimiter%][, + send=%ExtTypeSend%][, + receive=%ExtTypeReceive%][%ExtTypePassedByValue%?, passedbyvalue]] +):create domain [%QUALIFIER%]%DOMAIN%[ as] %DATATYPE%]] + +0 + +0 + +Command for creating a user defined data type + + +C4EF9C30-D684-4430-AE35-A4EE0A89648B +Maxlen +0 + +0 + +Maximum object code length +30 + + +EB4A3477-D194-4ADC-8684-4E398F454C2E +Enable +0 + +0 + +User defined data types allowed +1 + + +46CB428B-DC23-4DBE-98CB-EA7BD9DA1E64 +Drop +[%Stereotype%=BaseType?drop type %DOMAIN%:drop domain %DOMAIN%] +0 + +0 + +Command for dropping a user defined data type + + +F776AAE4-1B7D-4417-ADA0-925918FA8C46 +SqlListQuery +{OWNER, DOMAIN, DTTPCODE, Stereotype} + +select + pg_get_userbyid(d.typowner), + d.typname, + case (d.typtype) when 'd' then (select t.typname from pg_type t where t.oid = d.typBaseType) else '' end, + case (d.typtype) when 'b' then 'BaseType' when 'c' then 'CompositeType' else '' end +from + pg_type d + left outer join pg_class r on (r.oid = d.typrelid) +where + (d.typtype in ('b', 'd') or (d.typtype = 'c' and r.relkind = 'c')) +[ and pg_get_userbyid(d.typowner) = %.q:SCHEMA%] +order by 1, 2 +0 + +1341908522 +mledier +SQL query to list objects + + +AFA53FAE-E05C-4C4B-BB73-A8EC404CFFE6 +EnableOwner +0 + +0 + +Owner allowed +1 + + +E2BAA09E-4664-4153-A28C-A206743C8BD5 +EnableCheck +0 + +0 + +Allows the creation of checks on the domain +1 + + +DA4BAC34-7D11-4D74-9A58-5142566F9D88 +SqlCompositeTypeColnList +{OWNER ID, DOMAIN ID, CompositeDefinition ...} + +select + pg_get_userbyid(r.relowner), + r.relname, + c.attname || ' ' || t.typname || ', ' +from + pg_class r + join pg_attribute c on (c.attrelid = r.oid) + join pg_type t on (t.oid = c.atttypid) +where + r.relkind = 'c' +[ and r.relname = %.q:DOMAIN%] +[ and pg_get_userbyid(r.relowner) = %.q:OWNER%] +order by 1, 2, c.attnum +0 + +0 + +1 + + +F67508D3-3C78-4C24-BC4B-55D90C64689A +ReversedQueries +SqlCompositeTypeColnList +0 + +0 + +Additional attributes queries to be called by ODBC + + +330B7CDB-EF27-4ADE-BBFB-56329E4AE80A +UddtComment +comment on [%Stereotype%=BaseType?type:[%Stereotype%=CompositeType?type:domain]] %DOMAIN% is +%.q:COMMENT% +0 + +0 + +command for adding a user-defined data type comment + + + + +2D803C02-6E8E-42E3-884C-99D8D9D11B0A +Procedure +PROC +0 + +0 + +The following system variables are available: + "PROC" // generated code of the procedure (also available for trigger when the trigger is implemented with a procedure) + "FUNC" // generated code of the procedure if the procedure is a function (with return value) + "PROCPRMS" // list of parameters of the procedure (also available for function) + + + +4975CC75-CAB7-4FAC-9989-EC181498F4EB +Enable +0 + +0 + +Procedure allowed +1 + + +B227AD93-0969-43CE-B311-735CF6FA3BA7 +EnableFunc +0 + +0 + +Function allowed +1 + + +FDA1A9E3-3694-48FD-A788-228D53B19829 +Maxlen +0 + +0 + +Maximum object code length +31 + + +567B59B0-D391-4E8B-B6EB-F9280D8458C8 +MaxFuncLen +0 + +0 + +Function name length +31 + + +93FB86F2-AFA3-40FB-A122-704F47E14567 +DropFunc +drop function %FUNC% ( <type> ) +0 + +0 + +Command for dropping a function. Example: drop function %FUNC% + + +68DA6237-E801-4609-8DF3-9913541D763C +CustomFunc +CREATE FUNCTION %FUNC% ( <type> ) + RETURNS <type> + AS '%SCRIPT%' + LANGUAGE '%ProcLanguage%' +; +0 + +0 + +Command for creating a function + + +251982F3-DE0C-4FAA-A131-7DBFE0759575 +SqlListQuery +{OWNER, PROC, FUNC} + +select + pg_get_userbyid(p.proowner), + p.proname, + case(pronargs) when 0 then '' else p.proname end +from + pg_proc p + join pg_language l on (l.oid = p.prolang and l.lanname <> 'internal') +[where pg_get_userbyid(p.proowner) = %.q:SCHEMA%] +order by 1, 2, 3 +0 + +0 + +SQL query to list objects + + +192AC9A1-15B6-4DF7-BC4C-B516BDAF88FB +SqlAttrQuery +{OWNER, PROC, ProcLanguage, TRGDEFN} + +select + pg_get_userbyid(p.proowner), + p.proname, + l.lanname, + p.prosrc::varchar +from + pg_proc p + join pg_language l on (l.oid = p.prolang and l.lanname <> 'internal') +where 1=1 +[ and pg_get_userbyid(p.proowner) = %.q:OWNER%] +[ and p.proname = %.q:PROC%] +0 + +0 + +SQL query to reverse object attributes + + +40FF2B3A-67ED-44EF-A265-8A9E228E7687 +Create +create procedure %PROC%[(%PROCPRMS%)] +as %.q:TRGDEFN% +0 + +0 + +Command for creating a procedure. Example: create procedure %PROC% %TRGDEFN% + + +AAEB8CBB-C876-4B6D-BE50-E6E73BC964D8 +CreateFunc +create[ or replace] function %FUNC%[(%PROCPRMS%)] +%TRGDEFN% +0 + +0 + +Command for creating a function. Example: create function %FUNC% %TRGDEFN% + + + + +8FF2CB5D-C3CB-4D91-BE5B-64EE3E717750 +User +USER +0 + +0 + +The following system variables are available: + "USER" // generated code of the user + + + +E6F5A60D-CA9F-44E1-8E1C-126237EE8264 +Maxlen +0 + +0 + +Maximum object code length +31 + + +D9017676-BDD5-453E-924C-0D0D36CC2448 +Enable +0 + +0 + +User allowed +1 + + +F8617015-368F-4D56-9D87-9FE3DADBC19A +SqlListQuery +{USER, Schema, SysId, CreateDB, CreateUser, Validity, Owner} + +select + usename, 'false', + usesysid, + case (usecreatedb) when true then 'true' else 'false' end, + case (usesuper) when true then 'true' else 'false' end, + to_char(valuntil, 'DD/MM/YYYY HH24:MI:SS'), + '' +from + pg_user +[where + UPPER(usename) = %.qU:SCHEMA%] +union select + nspname, 'true', + 0, + '', + '', + '', + pg_get_userbyid(nspowner) +from + pg_namespace +[where UPPER(nspname) = %.qU:SCHEMA%] +order by 1 +0 + +0 + +SQL query to list objects + + +AE3804C2-2F0E-498E-AE13-BFE9F75A1DB7 +Create +[%CanCreate%?[%Schema%?create schema %USER%[ authorization %Owner.Code%]:create user %USER%[%R%? with][.Z:[ sysid %SysId%][%CreateDB%? createdb:[%R%? nocreatedb]][%CreateUser%? createuser:[%R%? nocreateuser]][ + [%EncryptedPassword%? encrypted: unencrypted] password %.q:PASSWORD%][ valid until %.q:Validity%]] +]] + +0 + +1246366826 +mledier +Command for creating a user + + +0C08C855-C137-4B0C-91DD-5C84925F17C8 +Drop +[%CanCreate%?drop [%Schema%?schema:user] %USER%] +0 + +1246367139 +mledier +Command for dropping a user + + + + +3F101A42-CA85-4BEA-A518-927051455323 +Trigger +TRGR +0 + +0 + +The following system variables are available: +(parent table items are also available for indexes) + trigger object specific items + "ORDER" // order number of the trigger (to sort the triggers when the DBMS supports more than one trigger of one type) + "TRIGGER" // generated code of trigger + "TRGTYPE" // trigger type (keywords "beforeinsert", "afterupdate", ...) + "TRGEVENT" // trigger event (keywords "insert", "update", "delete") + "TRGTIME" // trigger time (keywords NULL, "before", "after") + inside the body of the trigger, macros redefine variables on objects + "REFNO" // reference position in the list of references of the table + customized error messages management + "ERRNO" // error number for standard error + "ERRMSG" // error message for standard error + "MSGTAB" // name of the table containing user-defined error messages + "MSGNO" // in the user-defined error table, name of the column containing the error numbers + "MSGTXT" // in the user-defined error table, name of the column containing the error messages + "SCRIPT" // body of trigger or procedure + "TRGDEFN" // complete body of the trigger definition. Contains variable declarations and body of trigger or procedure + + + +E19DFCA9-7B9C-4C66-94E1-6219E6D60F2A +EnableMultiEvent +0 + +0 + +Multiple event allowed +1 + + +7F592BF2-1A0B-45C8-85FC-65AB6BF27D7A +Drop +drop trigger %TRIGGER% on [%QUALIFIER%]%TABLE% +0 + +0 + +Command for dropping a trigger. Example: drop trigger %TRIGGER% + + +6D51FD27-F0DA-4172-B79C-881FD8691D28 +SqlListQuery +{OWNER, TABLE, TRIGGER, TRGEVENT, TRGTIME} + +select + pg_get_userbyid(t.relowner), + t.relname, + g.tgname, + case (g.tgtype & 28) + when 4 then 'insert' when 8 then 'delete' when 16 then 'update' + when 12 then 'insert or delete' when 20 then 'insert or update' when 24 then 'delete or update' + when 28 then 'insert or delete or update' else '' end::varchar as coln, + case (g.tgtype & 2) + when 2 then 'before' when 0 then 'after' else '' end::varchar +from + pg_trigger g + join pg_class t on (t.oid = g.tgrelid and t.relkind = 'r') +where g.tgconstraint = 0 +[ and pg_get_userbyid(t.relowner) = %.q:SCHEMA%] +order by 1, 2 +0 + +1341907281 +mledier +SQL query to list objects + + +1A74C1FB-8DBF-49E1-8CC1-C218CBB058A6 +SqlAttrQuery +{OWNER, TABLE, TRIGGER, SCRIPT} + +select + pg_get_userbyid(t.relowner), + t.relname, + g.tgname, + pg_get_triggerdef(g.oid) +from + pg_trigger g + join pg_class t on (t.oid = g.tgrelid and t.relkind = 'r') +where g.tgconstraint = 0 +[ and pg_get_userbyid(t.relowner) = %.q:SCHEMA%] +[ and t.relname = %.q:TABLE%] +[ and g.tgname = %.q:TRIGGER%] +order by 1, 2 +0 + +1341909260 +mledier +SQL query to reverse object attributes + + +CE4F0440-B491-4676-9435-73737B55A8FF +UseErrorMsgTable +let errno = %ERRNO%; +select %MSGTXT% +into errmsg +from %MSGTAB% +where %MSGNO% = %ERRNO%; +raise exception -746, 0, errmsg; +0 + +0 + +Errors handling using an error messages table + + +5989F402-A130-4D39-AA84-E13023A49577 +UseErrorMsgText +let errno = %ERRNO%; +let errmsg = "%ERRMSG%"; +raise exception -746, 0, errmsg; +0 + +0 + +Errors handling using fixed error messages + + +00CC31F8-67B8-419E-82C9-CB8BDBE3C7ED +Create +create trigger %TRIGGER% %TRGTIME% %TRGEVENT% on %TABLE% +%TRGDEFN% +0 + +0 + +Command for creating a trigger. Example: create trigger on [%QUALIFIER%]%TABLE% %TRGDEFN% + + +BCE86A5F-400D-4EF5-AFEA-7A66DE9E65D1 +Maxlen +0 + +0 + +Maximum object code length +30 + + +5D052D24-6CFE-4A8F-8662-E5599DBEB469 +DefaultTriggerName +%TEMPLATE%_%.L:TABLE% +0 + +0 + +Default trigger name + + +802E3B4B-7A6E-451C-B065-37C52B07E693 +Enable +0 + +0 + +Trigger allowed +1 + + +34C945EE-91C3-44A0-990B-52391394D184 +EventDelimiter +0 + +0 + +Events separation character. +or + + +DE6F260F-ADD8-4CE8-8FC3-58D9D68CE6C0 +Time +before +after +0 + +0 + +Extended trigger times list. + + +A19C9B0A-3FF2-43AE-8340-BF77D513A2E9 +Event +insert +delete +update +0 + +0 + +Extended trigger events list. + + + + +7D1D2CDF-E58B-42B2-9016-5108F01CE18C +Tablespace +TSPC +0 + +0 + +The following system variables are available: + "TABLESPACE" // generated code of the tablespace + + + +0000D97B-6EF3-4C81-AC59-0AC769EF1889 +Enable +0 + +0 + +Tablespace allowed +1 + + +2BE7A654-B74D-4D5B-AA49-D6D2A546B6FE +Create +create tablespace %TABLESPACE% +[ owner %TbspOwner% +] location %.q:TbspLocation% +0 + +0 + +Command for creating a tablespace. Example: create tablespace %TABLESPACE% + + +0E7909E4-9E5E-4A10-80C0-CC8E1F8DD69C +Drop +drop tablespace %TABLESPACE% +0 + +0 + +Command for dropping a tablespace. Example: drop tablespace %TABLESPACE% + + +CD05E302-8958-41BF-9042-81185E8C6753 +SqlListQuery +{TABLESPACE, TbspOwner, TbspLocation} + +select + t.spcname, + pg_get_userbyid(t.spcowner), + t.spclocation +from + pg_tablespace t +order by 1 +0 + +0 + +SQL query to list objects + + + + +7837BA9D-D967-437A-95C3-C44DA43469A8 +Group +GRPE +0 + +0 + +Manages the group object. + + +B2F69FF7-ECB8-4F16-82E5-E8BE6E333B20 +Enable +0 + +0 + +Group allowed +1 + + +92FE7ABE-3D9C-4D59-BBB1-0AE7F543E719 +Create +create group %GROUP% +[ [%R%?with ] sysid %SysId%] +0 + +0 + +Command for creating a group. + + +6B19B14B-B135-4310-9025-3783B7F72E25 +Drop +drop group %GROUP% +0 + +0 + +Command for dropping a group. + + +BCE6A1C0-AD38-4AF6-8918-112A9109D421 +SqlListQuery +{GROUP} + +select + groname +from + pg_group +order by 1 +0 + +0 + +SQL query to list objects + + +44322879-5000-42F5-AB39-4DE022512CA5 +Bind +alter group %GROUP% add user %USER% +0 + +0 + +Command for adding a member to a group + + +880E598B-C369-4F64-8BEF-85689387E12A +Unbind +alter group %GROUP% drop user %USER% +0 + +0 + +Command for removing a member from a group + + +B29585AC-4B80-456A-97F0-5FBCD48641A5 +SqlListChildrenQuery +{GROUP, MEMBER} +select + g.groname, + u.usename +from + pg_group g + join pg_user u on (u.usesysid = any(g.grolist)) +[where g.groname = %.q:GROUP%] + +0 + +0 + +SQL query to list members of the group + + + + +EDE2756E-AD00-4A2F-B677-CF1436597E81 +GenerationOrder +<Order> +<Metaclass Name="Storage" /> +<Metaclass Name="Tablespace" /> +<Metaclass Name="Database"> + <Metaclass Name="Database::Permission" /> +</Metaclass> +<Metaclass Name="Role" /> +<Metaclass Name="Group" /> +<Metaclass Name="User" /> +<Metaclass Name="BusinessRule" /> +<Metaclass Name="Sequence"> + <Metaclass Name="Sequence::Permission" /> +</Metaclass> +<Metaclass Name="AbstractDataType"> + <Metaclass Name="AbstractDataType::Permission" /> +</Metaclass> +<Metaclass Name="PhysicalDefault" /> +<Metaclass Name="PhysicalDomain" /> +<Metaclass Name="Table"> + <Metaclass Name="Table::Key" /> + <Metaclass Name="Table::Index" /> + <Metaclass Name="Table::Permission" /> +</Metaclass> +<Metaclass Name="View"> + <Metaclass Name="View::ViewIndex" /> + <Metaclass Name="View::Permission" /> +</Metaclass> +<Metaclass Name="Reference" /> +<Metaclass Name="JoinIndex" /> +<Metaclass Name="Synonym"> + <Metaclass Name="Synonym::Permission" /> +</Metaclass> +<Metaclass Name="Dimension" /> +<Metaclass Name="Procedure"> + <Metaclass Name="Procedure::Permission" /> +</Metaclass> +<Metaclass Name="DatabasePackage"> + <Metaclass Name="DatabasePackage::Permission" /> +</Metaclass> +<Metaclass Name="Table::Trigger" /> +<Metaclass Name="View::Trigger" /> +<Metaclass Name="WebService" /> +</Order> +0 + +1263300423 +mledier +This list defines the objects generation order. + + + + +1C9264F0-50A8-45F8-BAB8-A6810D27D506 +DataType +0 + +0 + +Contains data type translation entries. These entries list the correspondence between internal data types and the target database data types + + +0BBE373C-5AAD-4A7A-853A-7BE50EA05FC0 +AmcdDataType +0 + +1240989401 +mledier +Data types translation table from internal data types to target database data types. + %n is the length of the data type + %s is the size of the data type + %p is the precision of the data type +<UNDEF>=<Undefined> +A=CHAR(1) +A%n=CHAR(%n) +VA%n=VARCHAR(%n) +LA=VARCHAR(1) +LA%n=VARCHAR(%n) +LVA=VARCHAR(1) +LVA%n=VARCHAR(%n) +BT=INT2 +BT%n=INT2 +SI=INT2 +I=INT4 +LI=INT8 +N=NUMERIC +N%n=NUMERIC(%n) +N%s,%p=NUMERIC(%s,%p) +DC=DECIMAL +DC%n=DECIMAL(%n) +DC%s,%p=DECIMAL(%s,%p) +SF=FLOAT4 +F=FLOAT8 +F%n=FLOAT8 +LF=FLOAT8 +MN=MONEY +MN%n=MONEY +MN%s,%p=MONEY +D=DATE +T=TIME +DT=DATE +TS=DATE +BL=BOOL +NO=SERIAL +NO%n=SERIAL +BIN%n=CHAR(%n) +LBIN=CHAR +LBIN%n=CHAR(%n) +TXT=TEXT +TXT%n=TEXT +MBT=CHAR +MBT%n=CHAR(%n) +VMBT=VARCHAR(254) +VMBT%n=VARCHAR(%n) +PIC=CHAR(254) +PIC%n=CHAR(%n) +BMP=CHAR(254) +BMP%n=CHAR(%n) +OLE=CHAR(254) +OLE%n=CHAR(%n) +*=CHAR(10) +POINT=POINT +LINE=LINE +SEGMENT=LSEG +RECTANGLE=BOX +POLYGON=POLYGON +CIRCLE=CIRCLE +<UNDEF> +A +A%n +VA%n +LA +LA%n +LVA +LVA%n +BT +BT%n +SI +I +LI +N +N%n +N%s,%p +DC +DC%n +DC%s,%p +SF +F +F%n +LF +MN +MN%n +MN%s,%p +D +T +DT +TS +BL +NO +NO%n +BIN%n +LBIN +LBIN%n +TXT +TXT%n +MBT +MBT%n +VMBT +VMBT%n +PIC +PIC%n +BMP +BMP%n +OLE +OLE%n +POINT +LINE +SEGMENT +RECTANGLE +POLYGON +CIRCLE + + + +8152888C-EC21-4CE9-BE44-BB0185B56336 +PhysDataType +0 + +1240989401 +mledier +Data types translation table from target database data types to internal data types. +<Undefined>=<UNDEF> +DECIMAL=DC +DECIMAL(%n)=DC%n +DECIMAL(%s,%p)=DC%s,%p +FLOAT4=F7 +FLOAT8=F16 +INT2=SI +INT4=I +INT8=LI +NUMERIC=N +NUMERIC(%n)=N%n +NUMERIC(%s,%p)=N%s,%p +SERIAL=NO +MONEY=MN +CHAR=A1 +CHAR(%n)=A%n +TEXT=TXT +VARCHAR(%n)=VA%n +TIMESTAMP=TS +TIMESTAMP WITH TIME ZONE=TS +INTERVAL=DT +DATE=D +TIME=T +TIME WITH TIME ZONE=T +BOOL=BL +POINT=POINT +LINE=LINE +LSEG=SEGMENT +BOX=RECTANGLE +PATH=A248 +POLYGON=POLYGON +CIRCLE=CIRCLE +CIDR=A10 +INET=A10 +*=A10 +<Undefined> +DECIMAL +DECIMAL(%n) +DECIMAL(%s,%p) +FLOAT4 +FLOAT8 +INT2 +INT4 +INT8 +NUMERIC +NUMERIC(%n) +NUMERIC(%s,%p) +SERIAL +MONEY +CHAR +CHAR(%n) +TEXT +VARCHAR(%n) +TIMESTAMP +TIMESTAMP WITH TIME ZONE +INTERVAL +DATE +TIME +TIME WITH TIME ZONE +BOOL +POINT +LINE +LSEG +BOX +PATH +POLYGON +CIRCLE +CIDR +INET + + + +7743713B-3320-4EE4-9E87-3AFB729355D7 +PhysDttpSize +0 + +0 + +Table of storage sizes of target database data type + + +87D4DC23-7078-4D7D-94A4-B3210DA8CC3E +OdbcPhysDataType +0 + +0 + +Data types translation table from ODBC data types to target database data types. +bpchar(%n)=char(%n) +bpchar(%n) + + + + + + + +CDC2B06C-5AC1-4D4C-88AF-2C88898BDB26 +Profile +0 + +0 + +1 + + +815871BB-A068-41E2-BA52-205B50A0AD52 +Shared +Shared +0 + +0 + +1 + + +ExtendedAttributeTypeTargetItem +8995054D-72B6-4201-B86A-98DB680FB1BD +Extended Attribute Types +0 + +0 + +1 + + +94C33C46-EC2F-4743-8D37-18C5B6666938 +ProcLanguageList +0 + +0 + +Predefined list of language that the function is implemented in. (See also Extended Attribute ProcLanguage.) +1 +sql +sql +c +internal + + +9984F847-6977-4648-826F-78BC26C6C4CD +TemporaryState +0 + +0 + +This list stores all possible states for Temporary attribute. +Optionally, GLOBAL or LOCAL can be written before TEMPORARY or TEMP. This makes no difference in opengauss. +1 +temp +temporary +local temp +local temporary +global temp +global temporary + + + + + + +1C98415B-16D6-425F-8303-C2FF48130BBE +PhysicalDomain +0 + +0 + +1 + + +StereotypeTargetItem +ADAC425E-EB22-4640-8A4F-D9CD71687A79 +Stereotypes +0 + +0 + +1 + + +0F50EE1C-651C-4508-B05E-AA4FB19D3130 +BaseType +0 + +0 + +1 + + +ExtendedAttributeTargetItem +7C56205A-F82D-4439-9C47-AA7E39B8C1B9 +Extended Attributes +0 + +0 + +1 + + +3954A652-B44B-4642-88E7-CE8099392324 +ExtTypeInput +0 + +0 + +The name of a function, created by CREATE FUNCTION, which converts data from its external form to the type's internal form. +1 +12 +0 + + +D596DD04-D85D-4A04-A7EC-349BC723BD2F +ExtTypeOutput +0 + +0 + +The name of a function, created by CREATE FUNCTION, which converts data from its internal form to a form suitable for display. +1 +12 +0 + + +B5F0DA75-D422-4AE5-97E2-7379ACA74F9D +ExtTypeLength +0 + +0 + +A literal value, which specifies the internal length of the new type. +1 +10 +0 + + +BE7B22DF-64C1-4C0B-B2EA-AF35795600FA +ExtTypeElement +0 + +0 + +The type being created is an array; this specifies the type of the array elements. +1 +12 +0 + + +DE189C84-B4B4-44F5-80D7-794AE7E5FAA8 +ExtTypeDelimiter +0 + +0 + +The delimiter character for the array. +1 +12 +0 + + +00878568-A073-4D54-A848-DD34908584D5 +ExtTypeSend +0 + +0 + +The name of a function, created by CREATE FUNCTION, which converts data of this type into a form suitable for transmission to another machine. +1 +12 +0 + + +A557019E-E970-424C-8772-69236830584E +ExtTypeReceive +0 + +0 + +The name of a function, created by CREATE FUNCTION, which converts data of this type from a form suitable for transmission from another machine to internal form. +1 +12 +0 + + +DA5B44D4-E590-4A53-828F-EC8963669BB2 +ExtTypePassedByValue +0 + +0 + +indicates that operators and functions which use this data type should be passed an argument by value rather than by reference. +1 +FALSE +0 + + + + +FormTargetItem +EBF9D66B-A877-4182-902E-FDD810E82BF1 +Forms +0 + +0 + +1 + + +1402B726-1615-42A2-9D4D-DFD6811689A2 +opengauss +<Form > + <ExtendedAttribute Attribute="ExtTypeLength" AttributeID="{B5F0DA75-D422-4AE5-97E2-7379ACA74F9D}" Name="ExtTypeLength" Caption="Length" GrayHiddenAttributes="No" /> + <ExtendedAttribute Attribute="ExtTypeElement" AttributeID="{BE7B22DF-64C1-4C0B-B2EA-AF35795600FA}" Name="ExtTypeElement" Caption="Array Element type" GrayHiddenAttributes="No" /> + <ExtendedAttribute Attribute="ExtTypeDelimiter" AttributeID="{DE189C84-B4B4-44F5-80D7-794AE7E5FAA8}" Name="ExtTypeDelimiter" Caption="Array delimiter" GrayHiddenAttributes="No" /> + <ExtendedAttribute Attribute="ExtTypePassedByValue" AttributeID="{DA5B44D4-E590-4A53-828F-EC8963669BB2}" Name="ExtTypePassedByValue" Caption="By Value" GrayHiddenAttributes="No" /> + <Separator Name="Separator1" /> + <ExtendedAttribute Attribute="ExtTypeInput" AttributeID="{3954A652-B44B-4642-88E7-CE8099392324}" Name="ExtTypeInput" Caption="Input function" GrayHiddenAttributes="No" /> + <ExtendedAttribute Attribute="ExtTypeOutput" AttributeID="{D596DD04-D85D-4A04-A7EC-349BC723BD2F}" Name="ExtTypeOutput" Caption="Output function" GrayHiddenAttributes="No" /> + <ExtendedAttribute Attribute="ExtTypeSend" AttributeID="{00878568-A073-4D54-A848-DD34908584D5}" Name="ExtTypeSend" Caption="Send function" GrayHiddenAttributes="No" /> + <ExtendedAttribute Attribute="ExtTypeReceive" AttributeID="{A557019E-E970-424C-8772-69236830584E}" Name="ExtTypeReceive" Caption="Receive function" GrayHiddenAttributes="No" /> +</Form> + +0 + +1276524800 +obalen +1 +24912 + + + + + + +6DAAC25D-4FAB-425C-A545-5C5DD08DCFFF +CompositeType +0 + +0 + +1 + + +ExtendedAttributeTargetItem +4F0F83D1-D45D-4A84-9904-35BB9E96F981 +Extended Attributes +0 + +0 + +1 + + +F09B1253-9E0E-48DD-94C5-7EB645E424C9 +CompositeDefinition +0 + +0 + +The composite type is specified by a list of attribute names and data types. This is essentially the same as the row type of a table, but using CREATE TYPE avoids the need to create an actual table when all that is wanted is to define a type. A stand-alone composite type is useful as the argument or return type of a function. +1 +12 +0 + + + + +FormTargetItem +EF4CD3F3-26DA-4F9D-8318-D1BBA1AFE4EB +Forms +0 + +0 + +1 + + +166747AD-2DF7-46FB-A33D-1DF60DFCFADB +opengauss +<Form > + <ExtendedAttribute Attribute="CompositeDefinition" AttributeID="{F09B1253-9E0E-48DD-94C5-7EB645E424C9}" Name="Definition" Caption="Definition" GrayHiddenAttributes="No" /> +</Form> + +0 + +1276524800 +obalen +1 +24912 + + + + + + + + + + +CCF5FE55-2BCB-4591-A512-B232444428CF +Procedure +0 + +0 + +1 + + +ExtendedAttributeTargetItem +08020C9D-AE04-4063-ADDF-3BB6FB1FF63D +Extended Attributes +0 + +0 + +1 + + +09E77E7F-0834-4FFF-A0EB-3FE1A3FA16B1 +ProcLanguage +0 + +0 + +The name of the language that the function is implemented in. May be SQL, C, internal, or the +name of a user-defined procedural language. (See also extended attribute type ProcLanguageList.) +1 +-1 +sql +0 + + + + + + + +FormTargetItem +0BA45BFD-1CF5-4D99-B6FB-AB6E8B510D0F +Forms +0 + +0 + +1 + + +632C0A96-5390-4ABF-B891-B723954AFA3D +opengauss +<Form > + <GroupBox Name="GroupBox1" Caption="Language" ControlAsLabel="No" > + <StaticText Name="StaticText1" Caption="Choose here the name of the language that the function is implemented in." /> + <StaticText Name="StaticText3" Caption="May be SQL, C, internal, or the name of a user-defined procedural language." /> + <ExtendedAttribute Name="ProcLanguage" Caption="Language" Attribute="ProcLanguage" AttributeID="{09E77E7F-0834-4FFF-A0EB-3FE1A3FA16B1}" GrayHiddenAttributes="Yes" /> + </GroupBox> +</Form> + +0 + +1276524800 +obalen +1 +24912 + + + + + + +D6A943D2-2621-4BAD-8B30-F90E7FC2FB95 +Tablespace +0 + +0 + +1 + + +ExtendedAttributeTargetItem +8BA82F30-4EA2-4A6D-B7B9-D5EDE8FBA4C6 +Extended Attributes +0 + +0 + +1 + + +6A4623D4-04FD-49E4-BBB2-26B246E58EAF +TbspLocation +0 + +0 + +The directory that will be used for the tablespace. The directory must be specified by an absolute path name. +1 +12 +0 + + +8B7D44B4-EC82-41B9-9443-CC961DEE9E81 +TbspOwner +0 + +0 + +The name of the user who will own the tablespace. If omitted, defaults to the user executing the command. Only superusers may create tablespaces, but they can assign ownership of tablespaces to non-superusers. +1 +12 +0 + + + + +FormTargetItem +8C308551-D020-4376-A081-F598F6B78A23 +Forms +0 + +0 + +1 + + +83A3A92E-36CE-46F3-A9F6-F04DBFBAC1C4 +opengauss +<Form > + <ExtendedAttribute Attribute="TbspLocation" AttributeID="{6A4623D4-04FD-49E4-BBB2-26B246E58EAF}" Name="TbspLocation" Caption="Location" GrayHiddenAttributes="No" /> + <ExtendedAttribute Attribute="TbspOwner" AttributeID="{8B7D44B4-EC82-41B9-9443-CC961DEE9E81}" Name="TbspOwner" Caption="Owner" GrayHiddenAttributes="No" /> +</Form> + +0 + +1276524800 +obalen +1 +24912 + + + + +CustomCheckTargetItem +A643B629-0C06-4935-97B2-BECCC51FA9D4 +Custom Checks +0 + +0 + +1 + + +4447EEA6-F62F-416E-9A45-E707E0A8A3EF +Mandatory Tablespace Location +0 + +0 + +1 +This check ensures that the Location extended attribute is set for the tablespace +The following tablespaces have no location defined +Function %Check%(obj) + ' Check that the extended attribute "TbspLocation" is filled. + ' The contents of the attribute is not controlled. + %Check% = (obj.GetExtendedAttribute("TbspLocation") <> "") +End Function +Function %Fix%(obj, outmsg) + ' Implement your automatic correction on <obj> here + ' filling <outmsg> as you wish + ' and return True if successful. + + outmsg = "Automatic correction not implemented" + + %Fix% = False +End Function + + + + + + +C5C915C9-3817-4DDB-95E2-5CCBD01C091C +IndexColumn +0 + +0 + +1 + + +CustomCheckTargetItem +6B0A0403-6F86-4876-9CB0-9AED66E3CA2E +Custom Checks +0 + +0 + +1 + + +8422D3FA-4033-45AA-95A4-F0DDA1A40A64 +Index expression enclosed in parenthesis +0 + +0 + +1 +This check ensures that index column expression are always enclosed in parenthesis +The following indexes column have an invalid expression: +Function %Check%(obj) + if (obj.Expression <> "") then + %Check% = (left(obj.Expression, 1) & right(obj.Expression, 1) = "()") + else + %Check% = True + end if +End Function +Function %Fix%(obj, outmsg) + obj.Expression = "( " & obj.Expression & " )" + outmsg = "Parenthesis have been added around column index expression" + + %Fix% = True +End Function +1 +1 + + + + + + +ACD56323-30AB-4823-B61B-03AD25AF177B +Table +0 + +0 + +1 + + +ExtendedAttributeTargetItem +7FB938DF-E6B5-4DFA-A865-A7175805E4D6 +Extended Attributes +0 + +0 + +1 + + +04B573C8-8062-4FFC-8C07-AA34F76050C2 +Temporary +0 + +0 + +If specified, the table is created as a temporary table. Temporary tables are automatically dropped at the end of a session, or optionally at the end of the current transaction +1 +-1 +0 + + + + + + + +FormTargetItem +033A27B6-2884-45DD-83BC-65E79E967C47 +Forms +0 + +0 + +1 + + +4E49596A-0533-4E72-8AE6-1315208D0BBF +opengauss +<Form > + <ExtendedAttribute Name="Temporary" Caption="Temporary state" Attribute="Temporary" AttributeID="{04B573C8-8062-4FFC-8C07-AA34F76050C2}" GrayHiddenAttributes="Yes" /> +</Form> + +0 + +1276524800 +obalen +1 +24912 + + + + +MethodTargetItem +B74022BD-ECA7-4557-B8D0-8713FD87874B +Methods +0 + +0 + +1 + + +3C7A869B-607A-433B-859A-748E99F6EAEE +SetPublicSchema +0 + +1244729286 +mledier +1 +Sub %Method%(obj) + Dim pub + for each pub in obj.Model.Users + if (LCase(pub.Code) = "public") then Exit For + next + if Not(isObject(pub)) then + Set pub = obj.Model.Users.CreateNew() + pub.SetExtendedAttribute "%CurrentTargetCode%.Schema", True + pub.SetNameAndCode "public", "public", False + end if + Set obj.Owner = pub +End Sub + + + + + + +269A9BBC-165B-467F-ABC9-DD53C67FC88F +Reference +0 + +0 + +1 + + +ExtendedAttributeTargetItem +FB33CA15-0879-43BA-8B1F-545AD3EAFDFA +Extended Attributes +0 + +0 + +1 + + +6D3919F1-F3EA-49D8-8E50-7AC8380DC462 +Deferrable +0 + +0 + +This controls whether the constraint can be deferred. A constraint that is not deferrable will be checked immediately after every command. Checking of constraints that are deferrable may be postponed until the end of the transaction. +Only foreign key constraints currently accept this clause. All other constraint types are not deferrable. +1 +false +0 + + +E93DB726-E4A4-46A4-9219-548CAA66F2A0 +ForeignKeyConstraintDeferred +0 + +0 + +If a constraint is deferrable, this clause specifies the default time to check the constraint. +False means the constraint is INITIALLY IMMEDIATE, it is checked after each statement. This is the default. +True means the constraint is INITIALLY DEFERRED, it is checked only at the end of the transaction. +1 +false +0 + + + + +FormTargetItem +E4F188B9-21A4-4DC5-B9D0-1298AAC67E48 +Forms +0 + +0 + +1 + + +86AF89FA-E313-4725-AE1E-08C0AAFEE0BB +opengauss +<Form > + <ExtendedAttribute Name="Deferrable" Caption="Deferrable" Attribute="Deferrable" AttributeID="{6D3919F1-F3EA-49D8-8E50-7AC8380DC462}" GrayHiddenAttributes="Yes" /> + <ExtendedAttribute Name="ForeignKeyConstraintDeferred" Caption="Foreign key constraint deferred" Attribute="ForeignKeyConstraintDeferred" AttributeID="{E93DB726-E4A4-46A4-9219-548CAA66F2A0}" GrayHiddenAttributes="Yes" /> +</Form> + +0 + +1276524800 +obalen +1 +24912 + + + + + + +7F47B72E-AE21-435D-83AC-1FCFD007BBEB +Database +0 + +0 + +1 + + +ExtendedAttributeTargetItem +8AB877B5-6E4E-424A-A234-6DD9DC4D6EB4 +Extended Attributes +0 + +0 + +1 + + +406ADDC6-D29B-4555-A539-15BC5B712A26 +Template +0 + +0 + +The name of the template from which to create the new database, or DEFAULT to use the default template +1 +12 +0 + + +5B507946-57A3-42B3-BEBF-00E549563D32 +Encoding +0 + +0 + +Character set encoding to use in the new database. Specify a string constant (e.g., 'SQL_ASCII'), or an integer encoding number, or DEFAULT to use the default encoding +1 +12 +0 + + + + +FormTargetItem +9EAAD069-88CB-4CAC-8765-F86B5891DCBA +Forms +0 + +0 + +1 + + +81D28ABD-0696-418A-9A37-43CC32CDE7DC +opengauss +<Form > + <ExtendedAttribute Name="Template" Caption="Template" Attribute="Template" AttributeID="{406ADDC6-D29B-4555-A539-15BC5B712A26}" GrayHiddenAttributes="Yes" /> + <ExtendedAttribute Name="Encoding" Caption="Encoding" Attribute="Encoding" AttributeID="{5B507946-57A3-42B3-BEBF-00E549563D32}" GrayHiddenAttributes="Yes" /> +</Form> + +0 + +1276524800 +obalen +1 +24912 + + + + + + +16AB5309-2509-4C3E-AE19-BB18CC382404 +Group +0 + +0 + +1 + + +ExtendedAttributeTargetItem +96DD5873-228F-460D-9602-0A3CF94FABF1 +Extended Attributes +0 + +0 + +1 + + +E6FC2DB5-17D5-4FD1-B8DE-A0ED011588DE +SysId +0 + +0 + +The SYSID clause can be used to choose the opengauss group ID of the new group. This is normally not necessary, but may be useful if you need to recreate a group referenced in the permissions of some object. +1 +10 +0 + + + + +FormTargetItem +8BB37684-F2F8-4866-8F43-5ABC9B681689 +Forms +0 + +0 + +1 + + +FE43BCEE-E961-4DFE-B597-7C5B3D57F55A +opengauss +<Form > + <ExtendedAttribute Attribute="SysId" AttributeID="{E6FC2DB5-17D5-4FD1-B8DE-A0ED011588DE}" Name="SysId" Caption="Group identifier (id)" GrayHiddenAttributes="Yes" /> +</Form> + +0 + +1276524800 +obalen +1 +24912 + + + + + + +EEA405CC-F187-4546-909F-60D33599A4DF +User +0 + +0 + +1 + + +ExtendedAttributeTargetItem +CC851CCE-CF77-4A16-9215-E9408BE04646 +Extended Attributes +0 + +1246366865 +mledier +1 + + +947DD0F5-B5A2-4BF9-BA50-736D57126F7B +Schema +0 + +0 + +Indicates if selected user is a schema or not. +1 +false +Is schema +0 + + +FAFA5191-4B2E-4087-BCE3-6EF3058B19BA +CanCreate +1246366826 +mledier +1246367214 +mledier +If user can be created in database. +1 +1 +RO +Function %Get%(obj) + %Get% = (LCase(obj.Code) <> "public") +End Function +Can create +0 + + + + +FormTargetItem +A68579A4-53A6-4AEE-81DB-6DF3BC0C6E9C +Forms +0 + +1246366881 +mledier +1 + + +A1FB5438-65BB-48B7-A086-FF0BDB9FF81D +General +GENERAL +<Form > + <StandardNameAndCode Attribute="NameAndCode" SizeInChars="1" HorizontalResize="true" /> + <StandardAttribute Attribute="Comment" Caption="Comment:" SizeInChars="1" LineNumber="3" HorizontalResize="true" VerticalResize="true" LeftText="false" /> + <StandardAttribute Attribute="Stereotype" Caption="Stereotype:" SizeInChars="1" LineNumber="3" HorizontalResize="true" VerticalResize="true" LeftText="false" /> + <StandardAttribute Attribute="Password" Caption="Password:" SizeInChars="1" LineNumber="3" HorizontalResize="true" VerticalResize="true" LeftText="false" /> + <HorizontalLayout Name="HorizontalLayout1" > + <ExtendedAttribute Attribute="Schema" Caption="Is schema" AttributeID="{947DD0F5-B5A2-4BF9-BA50-736D57126F7B}" GrayHiddenAttributes="Yes" SizeInChars="1" LineNumber="3" HorizontalResize="true" VerticalResize="true" Display="Checkbox" LeftText="Yes" /> + <ExtendedAttribute Attribute="Owner" Caption="Owner" AttributeID="{3358B5FA-08C0-4DFB-869C-FEEFEB1C49BB}" GrayHiddenAttributes="No" SizeInChars="1" HorizontalResize="Yes" /> + </HorizontalLayout> <StandardAttribute Attribute="KeywordList" Caption="Keywords" SizeInChars="1" LineNumber="3" HorizontalResize="Yes" VerticalResize="true" LeftText="false" /> +</Form> + +0 + +1276524800 +obalen +1 +GENERAL +24912 + + +8E605CC9-9153-4D30-96C1-0B0D713729C0 +Hidden +<Form > + <ExtendedAttribute Attribute="CanCreate" Caption="Can create" AttributeID="{FAFA5191-4B2E-4087-BCE3-6EF3058B19BA}" GrayHiddenAttributes="Yes" Display="Checkbox" LeftText="No" /> +</Form> + +1246366865 +mledier +1257761859 +mledier +1 +Dialog + + + + +CriterionTargetItem +B186F72A-EBB8-4762-AEE6-A159D0BD2065 +Criteria +0 + +0 + +1 + + +066ABFE6-7605-44F9-9247-10E90A158571 +isSchema +(%Schema%) +0 + +0 + +1 + + +ExtendedAttributeTargetItem +F855AC43-4986-451F-AD18-2B3151BF9597 +Extended Attributes +0 + +0 + +1 + + +3358B5FA-08C0-4DFB-869C-FEEFEB1C49BB +Owner +0 + +0 + +Owner of the schema +1 +15 +4BA9F642-DAB1-11D1-9944-006097355D9B +Owner +0 + + + + + + + +ExtendedCollectionTargetItem +AC0619DB-1C44-4A62-9746-D9EE1B446AD7 +Extended Collections +0 + +0 + +1 + + +4E7637E8-52C0-48F5-9218-E17C22025290 +Owner +0 + +0 + +1 +Schemas +4BA9F642-DAB1-11D1-9944-006097355D9B + + + + + + +60BBFAB7-C4BD-449F-AA31-C2ABF4D22667 +isUser +(%Schema%==false) +0 + +0 + +1 + + +FormTargetItem +D36A8DBE-B11A-4D49-B6AB-1081192A5722 +Forms +0 + +0 + +1 + + +002FACA9-83F6-4E6C-B3A7-5E79A15E210F +opengauss +<Form > + <ExtendedAttribute Attribute="SysId" Caption="User identifier (id)" AttributeID="{8F48507A-7C0D-4822-A566-2D7767715205}" GrayHiddenAttributes="Yes" SizeInChars="1" LineNumber="3" HorizontalResize="true" VerticalResize="true" LeftText="false" /> + <GroupBox Name="GroupBox2" Caption="Special permission" ControlAsLabel="No" > + <HorizontalLayout Name="HorizontalLayout1" Indentation="0" > + <ExtendedAttribute Attribute="CreateDB" Caption="Create database" AttributeID="{8A4EBE2D-8EDB-438E-8160-E66B06BDED64}" GrayHiddenAttributes="Yes" SizeInChars="1" LineNumber="3" HorizontalResize="true" VerticalResize="true" LeftText="false" /> + <ExtendedAttribute Attribute="CreateUser" Caption="Create user" AttributeID="{664FD4CF-0D90-4BA0-B7EF-A21C2C16B995}" GrayHiddenAttributes="Yes" SizeInChars="1" LineNumber="3" HorizontalResize="true" VerticalResize="true" LeftText="false" /> + </HorizontalLayout> + </GroupBox> + <GroupBox Name="GroupBox1" Caption="Security" ControlAsLabel="No" > + <ExtendedAttribute Attribute="Validity" Caption="Validity" AttributeID="{E21C45EE-516B-4540-9EC1-B88EFE86A7C9}" GrayHiddenAttributes="Yes" SizeInChars="1" LineNumber="3" HorizontalResize="true" VerticalResize="true" LeftText="false" /> + <ExtendedAttribute Attribute="EncryptedPassword" Caption="Encrypted password" AttributeID="{89E80C1A-9E8C-4BD5-BCA4-8FCE7EB7D6D7}" GrayHiddenAttributes="Yes" SizeInChars="1" LineNumber="3" HorizontalResize="true" VerticalResize="true" LeftText="false" /> + </GroupBox> +</Form> + +0 + +0 + +1 + + + + +ExtendedAttributeTargetItem +5D39E586-E369-4646-BA41-950BF52CEE33 +Extended Attributes +0 + +0 + +1 + + +8A4EBE2D-8EDB-438E-8160-E66B06BDED64 +CreateDB +0 + +0 + +Defines a user's ability to create databases. +If TRUE, the user is allowed to create databases. +1 +false +0 + + +664FD4CF-0D90-4BA0-B7EF-A21C2C16B995 +CreateUser +0 + +0 + +If TRUE, the user is allowed to create new users. +This option also turns the user into a superuser who can override all access restrictions. +1 +false +0 + + +89E80C1A-9E8C-4BD5-BCA4-8FCE7EB7D6D7 +EncryptedPassword +0 + +0 + +Control whether the password is stored encrypted in the system catalogs. +1 +true +0 + + +8F48507A-7C0D-4822-A566-2D7767715205 +SysId +0 + +0 + +The SYSID clause can be used to choose the opengauss user ID of the new user. This is normally not necessary, but may be useful if you need to recreate the owner of an orphaned object. +1 +10 +0 + + +E21C45EE-516B-4540-9EC1-B88EFE86A7C9 +Validity +0 + +0 + +This clause sets an absolute time after which the user's password is no longer valid. If this clause is omitted the password will be valid for all time. +1 +2 +0 + + + + + + + + + + +62B3BA47-41C9-42A1-B298-23FF4E128490 +Model +0 + +0 + +1 + + +EventHandlerTargetItem +128243FB-D24B-4635-B002-E7705C4500CF +Event Handlers +0 + +0 + +1 + + +CEE1267A-8981-4459-9BA1-2D512C7DB04D +AfterDatabaseReverseEngineer +0 + +0 + +1 +Function %AfterDatabaseReverseEngineer%(model) + Dim tab + For each tab in model.Tables + if (tab.Owner is Nothing) then + tab.ExecuteCustomMethod("%CurrentTargetCode%.SetPublicSchema") + end if + Next + %AfterDatabaseReverseEngineer% = True +End Function + + + + + + + + +F45EAD0C-A5E6-4DA5-BF2C-D37E197C36B6 +Odbc +0 + +0 + +DBMS characteristics, command definition, and data type translations for the ODBC generation and reverse engineering + + +93397FEC-B239-43E0-808E-6C37FE1D55D9 +Objects +0 + +0 + +Contains sub-categories for each type of object in the database, for example: Table, or Reference. Each sub-category contains entries whose values define database commands and object-related characteristics + + +B561F06F-8EA2-4923-9026-BA6D50FCC935 +Qualifier +0 + +0 + +Manages the use of qualifier in ODBC reverse engineering + + +D3ECF29D-3212-414B-B983-755D3A28E865 +Enable +0 + +0 + +Allows using the qualifier combo box during ODBC reverse engineering + + +6DBE6644-748C-411E-8D24-C9762EF859BA +SqlListQuery +0 + +0 + +Query to retrieve qualifier during ODBC reverse engineering + + +69A5288F-87C6-4041-A68A-E9DDCC5629A4 +Label +0 + +0 + +Label for <All> in qualifier selection list +All catalogs + + + + + + + + + + + \ No newline at end of file diff --git a/content/zh/post/xiake/images/20220406-a79f0a2a-ab08-44e7-9e1c-e9724aef0293.png b/content/zh/post/xiake/images/20220406-a79f0a2a-ab08-44e7-9e1c-e9724aef0293.png new file mode 100644 index 0000000000000000000000000000000000000000..7f058877bf884ba9dfc59bd8d287ff1c663c10d5 Binary files /dev/null and b/content/zh/post/xiake/images/20220406-a79f0a2a-ab08-44e7-9e1c-e9724aef0293.png differ diff --git "a/content/zh/post/xiake/openGauss MogDB\350\260\203\347\224\250C FUNCTION.md" "b/content/zh/post/xiake/openGauss MogDB\350\260\203\347\224\250C FUNCTION.md" new file mode 100644 index 0000000000000000000000000000000000000000..20c93b84d3595d769e270e7afbb47790ce2da2ea --- /dev/null +++ "b/content/zh/post/xiake/openGauss MogDB\350\260\203\347\224\250C FUNCTION.md" @@ -0,0 +1,187 @@ ++++ + +title = "openGauss/MogDB调用C FUNCTION" + +date = "2022-04-08" + +tags = ["openGauss/MogDB调用C FUNCTION"] + +archives = "2022-04" + +author = "夏克" + +summary = "openGauss/MogDB调用C FUNCTION" + +img = "/zh/post/xiake/title/img6.png" + +times = "10:20" + ++++ + +# openGauss/MogDB调用C FUNCTION + +## 环境准备 + +- 安装openGauss/MogDB + 可参考官方文档 +- 服务器环境 + 本地虚拟机 centos 7.9 + +**注意:尽量进入omm用户下进行编译,可以避免一些不必要的环境问题** + +## 代码 + +- C代码 + 基本与postgres插件开发一样,关键是4,5,6三行。 + +``` +#include "postgres.h" +#include "fmgr.h" + +PG_MODULE_MAGIC; +extern "C" Datum add_ab(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1(add_ab); + +Datum +add_ab(PG_FUNCTION_ARGS) +{ + int32 arg_a = PG_GETARG_INT32(0); + int32 arg_b = PG_GETARG_INT32(1); + + PG_RETURN_INT32(arg_a + arg_b); +} +``` + +- CMakeLists.txt + +```sql +cmake_minimum_required (VERSION 2.8) + +project (gs_plug) +set(CMAKE_CXX_FLAGS "-Wall -std=c++11 -Wall") +set(CMAKE_CXX_FLAGS_DEBUG "-g3") +set(CMAKE_CXX_FLAGS_RELEASE "-O2") +set(CMAKE_BUILD_TYPE Debug) + +set(MOG_INCLUDE /opt/mogdb/app/include/postgresql/server) +set(MOG_LIBPATH /opt/mogdb/app/lib/postgresql/proc_srclib) +include_directories(${MOG_INCLUDE}) + +aux_source_directory(. DIR_SRCS) +add_library (${PROJECT_NAME} SHARED ${DIR_SRCS}) + +install(TARGETS ${PROJECT_NAME} DESTINATION ${MOG_LIBPATH}) +``` + +***要点1:获取包含头文件的目录*** + +``` +[omm@vmpc funcs]$ pg_config --includedir /opt/mogdb/app/include +``` + +所需头文件路径:`pg_config --includedir`/postgresql/server + +***要点1:c函数安装路径*** + +``` +[omm@vmpc funcs]$ pg_config --pkglibdir /opt/mogdb/app/lib/postgresql +``` + +安装路径:`pg_config --pkglibdir`/proc_srclib/ + +## 编译 & 安装 + +``` +[omm@vmpc funcs]$ mkdir build +[omm@vmpc funcs]$ cd build/ +[omm@vmpc build]$ cmake ../ +CMake Deprecation Warning at CMakeLists.txt:1 (cmake_minimum_required): + Compatibility with CMake < 2.8.12 will be removed from a future version of + CMake. + + Update the VERSION argument value or use a ... suffix to tell + CMake that the project does not need compatibility with older versions. + + +-- The C compiler identification is GNU 4.8.5 +-- The CXX compiler identification is GNU 4.8.5 +-- Detecting C compiler ABI info +-- Detecting C compiler ABI info - done +-- Check for working C compiler: /bin/cc - skipped +-- Detecting C compile features +-- Detecting C compile features - done +-- Detecting CXX compiler ABI info +-- Detecting CXX compiler ABI info - done +-- Check for working CXX compiler: /bin/c++ - skipped +-- Detecting CXX compile features +-- Detecting CXX compile features - done +-- Configuring done +-- Generating done +-- Build files have been written to: /opt/mogdb/funcs/build +[omm@vmpc build]$ make +[ 50%] Building CXX object CMakeFiles/gs_plug.dir/testfunc.cpp.o +[100%] Linking CXX shared library libgs_plug.so +[100%] Built target gs_plug +[omm@vmpc build]$ make install +Consolidate compiler generated dependencies of target gs_plug +[100%] Built target gs_plug +Install the project... +-- Install configuration: "Debug" +-- Installing: /opt/mogdb/app/lib/proc_srclib/libgs_plug.so +``` + +**依次执行如下命令** + +``` +mkdir build +cd build +cmake ../ +make +make install +``` + +**确认安装** + +``` +[omm@vmpc build]$ ll /opt/mogdb/app/lib/proc_srclib/libgs_plug.so -rwxr-xr-x. 1 omm dbgrp 215696 Apr 2 00:17 /opt/mogdb/app/lib/proc_srclib/libgs_plug.so +``` + +## 验证 + +- 链接mogdb + +``` +[omm@vmpc ~]$ pgcli -p 26000 -d postgres +Server: PostgreSQL 9.2.4 +Version: 3.4.1 +Home: http://pgcli.com +postgres> +``` + +- 创建C FUNCTION + +```sql +postgres> CREATE FUNCTION add_ab(a int ,b int ) RETURNS integer + AS 'testfunc.so', 'add_ab' + LANGUAGE C STRICT; +CREATE FUNCTION +Time: 0.039s +``` + +- 查看函数 + +![image.png](../images/20220406-a79f0a2a-ab08-44e7-9e1c-e9724aef0293.png) + +- 调用函数 + +```sql +postgres> select add_ab(a := 4, b := 2); ++--------+ +| add_ab | +|--------| +| 6 | ++--------+ +SELECT 1 +Time: 0.033s +postgres> +``` diff --git a/content/zh/post/xiake/title/img.png b/content/zh/post/xiake/title/img.png new file mode 100644 index 0000000000000000000000000000000000000000..86a420b92fb8289658d807d49f137b6d13862f6d Binary files /dev/null and b/content/zh/post/xiake/title/img.png differ diff --git a/content/zh/post/xiake/title/img6.png b/content/zh/post/xiake/title/img6.png new file mode 100644 index 0000000000000000000000000000000000000000..2ddddfa2858d77999b4cfec8e97e4f29ac0cab79 Binary files /dev/null and b/content/zh/post/xiake/title/img6.png differ diff --git a/content/zh/post/xingchen/boost_compile_failed.md b/content/zh/post/xingchen/boost_compile_failed.md new file mode 100644 index 0000000000000000000000000000000000000000..f770105a2658840df3a819a92f174ac36f224bd5 --- /dev/null +++ b/content/zh/post/xingchen/boost_compile_failed.md @@ -0,0 +1,64 @@ ++++ +title = "boost编译失败" +date = "2021-05-21" +tags = ["boost编译失败"] +archives = "2021-05-21" +author = "xingchen" +summary = "boost编译失败" +img = "/zh/post/xingchen/title/img1.png" +times = "19:30" ++++ + +## 概述 + +本文档介绍再编译openGauss-third_party三方库时候,boost编译失败解决方法。 目前在 1.1.0 2.0.0 版本都适用。 + +## 错误分析和处理 + +在编译openGauss-third_party三方库时候,我们首先在build目录下执行`sh build.sh`,三方库自行编译。 但是经常遇到在编译到boost的时候,脚本终止掉了。很明显,这里面编译出错了。 + +![](../images/boost/image1.png) + +dependency依赖的编译日志在 `dependency/build` 目录下demo.log,这里面可以很明显的看到错误信息。 +也可以进入到`dependency/boost`目录下,单独编译,看看报错。 + +``` +sh build.sh -m all +``` +![](../images/boost/image2.png) + +可以看到缺少了 pyconfig.h 头文件。 +使用 `yum provides */pyconfig.h` 查找下,发现在python-devel包里面,需要安装下`python2-devel`包。 +![](../images/boost/image3.png) + +``` +yum install python2-devel +yum install python3-devel +``` + +其实我们在编译前,已经安装过这两个依赖。但是还是出现了这个问题。。。。 + +我们在开始编译的时候,做了这个操作,将python链接到python3: `ln -s /usr/bin/python3 /usr/bin/python` +![](../images/boost/image5.png) + +这里是因为,三方库里面有很多需要使用python3编译的三方库,但是脚本中写的`/usr/bin/python`的执行环境,所以需要做这个链接操作,将python指向python3。\ +但是boost编译,依赖的头文件是python2-devel的,这里需要将python链接到python2上。如下: + +``` +unlink /usr/bin/python +ln -s /usr/bin/python2 /usr/bin/python +``` + +再次编译 `sh build.sh -m all`,编译成功。 + +![](../images/boost/image4.png) + +在 `dependency/build/build_dependency.sh` 里面,注释掉已经编译好的脚本片段,执行 `sh build_dependency.sh` +继续编译未完成的三方库。 + +***注意:在编译完boost后,还需要将python再链接到python3,以完成后面三方库的编译工作。*** + +``` +unlink /usr/bin/python +ln -s /usr/bin/python3 /usr/bin/python +``` \ No newline at end of file diff --git a/content/zh/post/xingchen/cluster_on_one_node.md b/content/zh/post/xingchen/cluster_on_one_node.md new file mode 100644 index 0000000000000000000000000000000000000000..7b796c7c7940fdca00b1df00800902e1b3052261 --- /dev/null +++ b/content/zh/post/xingchen/cluster_on_one_node.md @@ -0,0 +1,220 @@ ++++ +title = "在一台服务器上安装主备集群" +date = "2021-02-18" +tags = ["在一台服务器上安装主备集群"] +archives = "2021-02-18" +author = "xingchen" +summary = "在一台服务器上安装主备集群" +img = "/zh/post/xingchen/title/img1.png" +times = "14:30" ++++ + + +默认使用openGauss的OM工具去安装主备集群,是要求主备分别在不同的服务器上的,即一台服务器只能安装一个数据库实例。 +可以在一台服务器上安装多个单机数据库,通过修改配置建立主备关系,以达到一个服务器上安装数据库集群的效果。 + +***这种方式下,是不能通过OM工具来管理集群的。只能使用数据库内部工具操作。*** + +以下步骤以一主一备为例,一主多备类似。 + +### 安装两个单机数据库 + +可以通过使用OM工具安装两个单机数据库,保证两个数据库端口不同,数据目录不同。 + +或者直接使用编译安装的方式,启动两个不同端口和数据目录的数据库。 + +***注意: 两个数据库的端口不要挨得太近。*** + +如下面,启动两个数据库,端口分别是12000和22000 + +``` +[opengauss@ecs-761c dn_22000]$ ps -ef | grep gauss | grep -v grep +root 9789 9532 0 10:49 pts/1 00:00:00 su - opengauss +opengau+ 17649 1 2 11:06 pts/1 00:00:02 /usr/local/opengauss/1.1.0/bin/gaussdb -D /home/opengauss/datanode/dn_12000 +opengau+ 18357 1 2 11:08 pts/1 00:00:01 /usr/local/opengauss/1.1.0/bin/gaussdb -D /home/opengauss/datanode/dn_22000 +``` + +``` +[opengauss@ecs-761c dn_22000]$ netstat -nap | grep gauss +(Not all processes could be identified, non-owned process info + will not be shown, you would have to be root to see it all.) +tcp 0 0 127.0.0.1:12000 0.0.0.0:* LISTEN 17649/gaussdb +tcp 0 0 127.0.0.1:12001 0.0.0.0:* LISTEN 17649/gaussdb +tcp 0 0 127.0.0.1:22000 0.0.0.0:* LISTEN 18357/gaussdb +tcp 0 0 127.0.0.1:22001 0.0.0.0:* LISTEN 18357/gaussdb +tcp6 0 0 ::1:12000 :::* LISTEN 17649/gaussdb +tcp6 0 0 ::1:12001 :::* LISTEN 17649/gaussdb +tcp6 0 0 ::1:22000 :::* LISTEN 18357/gaussdb +tcp6 0 0 ::1:22001 :::* LISTEN 18357/gaussdb +udp6 0 0 ::1:49528 ::1:49528 ESTABLISHED 17649/gaussdb +udp6 0 0 ::1:45521 ::1:45521 ESTABLISHED 18357/gaussdb +unix 2 [ ACC ] STREAM LISTENING 56069 18357/gaussdb /tmp/.s.PGSQL.22000 +unix 2 [ ACC ] STREAM LISTENING 56072 18357/gaussdb /tmp/.s.PGSQL.22001 +unix 2 [ ACC ] STREAM LISTENING 54146 17649/gaussdb /tmp/.s.PGSQL.12000 +unix 2 [ ACC ] STREAM LISTENING 54149 17649/gaussdb /tmp/.s.PGSQL.12001 + +``` + +### 以主机或备机模式分别启动数据库 + +选择一个实例作为主机,以primary方式重启数据库。 + +以dn_12000为例,作为主机,重启: + +``` +gs_ctl restart -D /home/opengauss/datanode/dn_12000 -M primary +``` +启动完成后查询: +``` +gs_ctl query -D /home/opengauss/datanode/dn_12000 +``` +``` +[2021-02-18 11:11:14.687][19947][][gs_ctl]: gs_ctl query ,datadir is /home/opengauss/datanode/dn_12000 + HA state: + local_role : Primary + static_connections : 0 + db_state : Normal + detail_information : Normal + + Senders info: +No information + Receiver info: +No information +``` +local_role为Primary,即以primary方式启动成功。该机器作为主机。 + +同理,将另外一个实例dn_22000作为备机启动: +``` +gs_ctl restart -D /home/opengauss/datanode/dn_22000 -M standby +``` +查询状态: +``` +gs_ctl query -D /home/opengauss/datanode/dn_22000 + +[2021-02-18 11:15:57.003][21884][][gs_ctl]: gs_ctl query ,datadir is /home/opengauss/datanode/dn_22000 + HA state: + local_role : Standby + static_connections : 0 + db_state : Need repair + detail_information : Disconnected + + Senders info: +No information + Receiver info: +No information +``` + +### 修改主备实例的配置 + +使用如下命令,分别修改主备实例的配置: +``` +gs_guc set -D {dn} -c "replconninfo1='localhost={localhost} localport={localport} localheartbeatport={localeHeartPort} localservice={localservice} remotehost={remoteNode} remoteport={remotePort} remoteheartbeatport={remoteHeartPort} remoteservice={remoteservice}'" +gs_guc set -D {dn} -c 'remote_read_mode=off'; +gs_guc set -D {dn} -c 'replication_type=1'; +``` +{dn} 数据目录 +{localhost} 本地数据库绑定的ip地址 +{localport} 一般为port地址+1 +{localeHeartPort} 设置为port+4,不冲突即可 +{localservice} 设置为port+5,不冲突即可 +{remoteNode} 远端数据库绑定的ip地址。因为同一台机器,地址与localhost相同 +{remotePort} 远端port地址+1 + +示例: +主机实例dn_12000设置: +``` +gs_guc set -D /home/opengauss/datanode/dn_12000 -c "replconninfo1='localhost=127.0.0.1 localport=12001 localheartbeatport=12004 localservice=12005 remotehost=127.0.0.1 remoteport=22001 remoteheartbeatport=22004 remoteservice=22005'" +gs_guc set -D /home/opengauss/datanode/dn_12000 -c 'remote_read_mode=off'; +gs_guc set -D /home/opengauss/datanode/dn_12000 -c 'replication_type=1'; +``` +备机实例dn_22000设置: +``` +gs_guc set -D /home/opengauss/datanode/dn_22000 -c "replconninfo1='localhost=127.0.0.1 localport=22001 localheartbeatport=22004 localservice=22005 remotehost=127.0.0.1 remoteport=12001 remoteheartbeatport=12004 remoteservice=12005'" +gs_guc set -D /home/opengauss/datanode/dn_22000 -c 'remote_read_mode=off'; +gs_guc set -D /home/opengauss/datanode/dn_22000 -c 'replication_type=1'; +``` + +### 重建备机 + +重启备机: +``` +gs_ctl restart -D /home/opengauss/datanode/dn_22000 -M standby +``` + +重建备机: +``` +gs_ctl build -D /home/opengauss/datanode/dn_22000 -M standby +``` +重建成功后,主备安装成功。 + +### 查询主备状态 + +使用gs_ctl指定主机的数据库目录查询状态: +``` +gs_ctl query -D /home/opengauss/datanode/dn_12000 +``` +如下,表示主备建立成功: +``` +[2021-02-18 11:58:29.295][54782][][gs_ctl]: gs_ctl query ,datadir is /home/opengauss/datanode/dn_12000 + HA state: + local_role : Primary + static_connections : 0 + db_state : Normal + detail_information : Normal + + Senders info: + sender_pid : 44560 + local_role : Primary + peer_role : Standby + peer_state : Normal + state : Streaming + sender_sent_location : 0/4001720 + sender_write_location : 0/4001720 + sender_flush_location : 0/4001720 + sender_replay_location : 0/4001720 + receiver_received_location : 0/4001720 + receiver_write_location : 0/4001720 + receiver_flush_location : 0/4001720 + receiver_replay_location : 0/4001720 + sync_percent : 100% + sync_state : Sync + sync_priority : 1 + sync_most_available : Off + channel : 127.0.0.1:12001-->127.0.0.1:51698 + + Receiver info: +No information +``` + +### 验证 + +登录主机并建立表: +``` +[opengauss@ecs-761c dn_12000]$ gsql -d postgres -p 12000 -r +gsql ((openGauss 1.1.0 build 392c0438) compiled at 2020-12-31 20:08:06 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +postgres=# create table t1(id int); +CREATE TABLE +postgres=# \q + +``` + +登录备机查询表是否存在: +``` +[opengauss@ecs-761c dn_12000]$ gsql -d postgres -p 22000 -r +gsql ((openGauss 1.1.0 build 392c0438) compiled at 2020-12-31 20:08:06 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +postgres=# \d + List of relations + Schema | Name | Type | Owner | Storage +--------+------+-------+-----------+---------------------------------- + public | t1 | table | opengauss | {orientation=row,compression=no} +(1 row) + +postgres=# + +``` \ No newline at end of file diff --git a/content/zh/post/xingchen/compile_without_lse.md b/content/zh/post/xingchen/compile_without_lse.md new file mode 100644 index 0000000000000000000000000000000000000000..dcdafa37594a41ec2150ae27307a4c4a2865c877 --- /dev/null +++ b/content/zh/post/xingchen/compile_without_lse.md @@ -0,0 +1,39 @@ ++++ +title = "安装时报指令错误的处理" +date = "2021-05-21" +tags = ["安装时报指令错误的处理"] +archives = "2021-05-21" +author = "xingchen" +summary = "安装时报指令错误的处理" +img = "/zh/post/xingchen/title/img1.png" +times = "19:30" ++++ + +### 概述 + +在一些非官方指定的系统上面 (官方指定的系统:https://opengauss.org/zh/docs/latest/docs/Description/%E8%BF%90%E8%A1%8C%E7%8E%AF%E5%A2%83.html), 使用官网提供的镜像安装数据库,有时会遇到一些 `"非法指令" "illegal instruction"` 的问题, 这些往往是由于CPU指令集不兼容导致的。 + +此处我们只说下在ARM上面安装失败的问题。 + +常见的主要如下: + +官网发布的 `openEuler_arm` 包,在编译的时候,打开了`ARM_LSE`指令集做了编译的优化。但是对于一些其他版本的arm服务器,不一定支持。 + +代码注释中这么写的: +``` +build\script\mpp_package.sh + +# it may be risk to enable 'ARM_LSE' for all ARM CPU, but we bid our CPUs are not elder than ARMv8.1 +``` + +实测在 ***鲲鹏920*** 和 ***麒麟990*** 的cpu芯片下是支持安装的。 +cpu可以通过 `lscpu` 名称查看。 + +对于其他不自持该指令的系统,需要去掉 `-D__ARM_LSE` 指令重新编译即可。 + +在编译脚本中 `build\script\mpp_package.sh`,删除掉所有的 `-D__ARM_LSE` , 重新编译数据库。 + +patch如下图: + +![](../images/compile/withoutlse.png) + diff --git a/content/zh/post/xingchen/gcc_compile.md b/content/zh/post/xingchen/gcc_compile.md new file mode 100644 index 0000000000000000000000000000000000000000..c51046cda0b0df6aa3f53737ba1a14da1eaa2b05 --- /dev/null +++ b/content/zh/post/xingchen/gcc_compile.md @@ -0,0 +1,168 @@ ++++ +title = "gcc编译指导" +date = "2021-02-23" +tags = ["gcc编译指导"] +archives = "2021-02-23" +author = "xingchen" +summary = "gcc编译指导" +img = "/zh/post/xingchen/title/img1.png" +times = "19:30" ++++ + +## 概述 + +openGauss的编译依赖gcc,目前官方推荐使用gcc7.3版本来编译。 但是三方库中未给出gcc的编译指导。本文档介绍如何编译gcc。 + + +## 步骤 + +以openEuler20.03 LTS系统,arm架构,gcc7.3版本为例 (其他版本的gcc也与此相同)。 +Gcc的编译依赖 `gmp` `isl` `mpc` `mpfr` ,需要先编译这四个库。 + +三方库依赖以及下载地址如下: + + + + + + + + + + + + + + + + + + + + + + + + +
    gcc7.3http://ftp.gnu.org/gnu/gcc/gcc-7.3.0/
    gmphttp://ftp.gnu.org/gnu/gmp/gmp-6.1.1.tar.xz
    mpfrhttp://ftp.gnu.org/gnu/mpfr/mpfr-4.0.2.tar.gz
    mpchttp://ftp.gnu.org/gnu/mpc/mpc-1.1.0.tar.gz
    islhttps://gcc.gnu.org/pub/gcc/infrastructure/isl-0.18.tar.bz2
    + +将这几个三方库上传到服务器上面,按照如下顺序进行编译: + +## 1. 编译gmp + +解压:`tar -xf gmp-6.1.1.tar.xz` + +编译: +***prefix路径可以自己指定,表示编译结果存放路径。*** + +``` +./configure --prefix=/usr2/zxb/compile/target/gmp +make –j +make install –j +``` + +## 2. 编译mpfr + +解压: `tar –xf mpfr-4.0.2.tar.gz` + +编译: +***--prefix路径自己指定,--with-gmp路径为上一步gmp编译的prefix路径。下面与此相同。*** + +``` +./configure --prefix=/usr2/zxb/compile/target/mpfr --with-gmp=/usr2/zxb/compile/target/gmp +make –j +make install -j +``` + +## 3. 编译mpc + +解压: `tar –xf mpc-1.1.0.tar.gz` + +编译: + +``` +./configure --prefix=/usr2/zxb/compile/target/mpc --with-gmp=/usr2/zxb/compile/target/gmp --with-mpfr=/usr2/zxb/compile/target/mpfr +make –j +make install -j +``` + +## 4. 编译isl + +解压: `tar –xf isl-0.18.tar.bz2` + +编译: + +``` +./configure --prefix=/usr2/zxb/compile/target/isl --with-gmp-prefix=/usr2/zxb/compile/target/gmp +make –j +make install -j +``` + +## 5. 编译gcc + +1. 先安装编译gcc需要的依赖 + +``` +yum install gcc-c++ glibc-devel +``` + +2. 注释 `sys/ustat.h `相关信息 + +在 `glibc>=2.28` 的系统,去掉了 `ustat.h` 文件,gcc源码需要删除相关信息。(可以通过 `yum list | grep glibc` 查看当前系统的 `glibc` 版本) + +``` +vim ./libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc +``` + +注释掉如下内容: + +``` +第157行 //#include +第250行 //unsigned struct_ustat_sz = sizeof(struct ustat); +``` + + +(参照: https://stackoverflow.com/questions/56096060/how-to-fix-the-gcc-compilation-error-sys-ustat-h-no-such-file-or-directory-i) + +3. 导入环境变量 + +``` +export LD_LIBRARY_PATH=/usr2/zxb/compile/target/gmp/lib:/usr2/zxb/compile/target/mpfr/lib:/usr2/zxb/compile/target/mpc/lib:/usr2/zxb/compile/target/isl/lib:${LD_LIBRARY_PATH} +export C_INCLUDE_PATH=/usr2/zxb/compile/target/gmp/include:/usr2/zxb/compile/target/mpfr/include:/usr2/zxb/compile/target/mpc/include:/usr2/zxb/compile/target/isl/include:${C_INCLUDE_PATH} +``` + +***环境变量中的路径,为上面编译的几个三方库各自的prefix路径。*** + +4. 编译gcc + +***环境变量中的路径,为上面编译的几个三方库各自的prefix路径。*** + +``` +./configure CFLAGS='-fstack-protector-strong -Wl,-z,noexecstack -Wl,-z,relro,-z,now ' --prefix=/usr2/zxb/compile/target/gcc --with-gmp=/usr2/zxb/compile/target/gmp --with-mpfr=/usr2/zxb/compile/target/mpfr --with-mpc=/usr2/zxb/compile/target/mpc --with-isl=/usr2/zxb/compile/target/isl --disable-multilib --enable-languages=c,c++ +make –j +make install –j +``` + + +## 6. 编译cmake + +下载cmake源码 `https://github.com/Kitware/CMake/releases` + +解压后进入到源码根目录 + +``` +./configure --prefix=/usr2/zxb/compile/cmake3.18 ##prefix为编译结果路径 +make –j +make install -j +``` + + +## 拷贝gcc到三方库中 + +编译完后,将 `/usr2/zxb/compile/target` 下的 `gmp mpfr mpc isl gcc` 拷贝到三方库二进制的 `buildtools/${platform}/gcc7.3` 下面,即可用来编译openGauss数据库。 + + + + + + diff --git a/content/zh/post/xingchen/images/boost/image1.png b/content/zh/post/xingchen/images/boost/image1.png new file mode 100644 index 0000000000000000000000000000000000000000..dafda6f55292f413a51692dae36d78a693d03000 Binary files /dev/null and b/content/zh/post/xingchen/images/boost/image1.png differ diff --git a/content/zh/post/xingchen/images/boost/image2.png b/content/zh/post/xingchen/images/boost/image2.png new file mode 100644 index 0000000000000000000000000000000000000000..1b4e9a980d8ba6c67374118d0d0e7f80a096bd07 Binary files /dev/null and b/content/zh/post/xingchen/images/boost/image2.png differ diff --git a/content/zh/post/xingchen/images/boost/image3.png b/content/zh/post/xingchen/images/boost/image3.png new file mode 100644 index 0000000000000000000000000000000000000000..d83b8c71dcc5fb842d74fe396efc5ee0a8c8ee8a Binary files /dev/null and b/content/zh/post/xingchen/images/boost/image3.png differ diff --git a/content/zh/post/xingchen/images/boost/image4.png b/content/zh/post/xingchen/images/boost/image4.png new file mode 100644 index 0000000000000000000000000000000000000000..1ccf5b9ebbc2b28d61ca898371f5085dcb090dc8 Binary files /dev/null and b/content/zh/post/xingchen/images/boost/image4.png differ diff --git a/content/zh/post/xingchen/images/boost/image5.png b/content/zh/post/xingchen/images/boost/image5.png new file mode 100644 index 0000000000000000000000000000000000000000..325458758738102cf351962dced3ba8a27bbabda Binary files /dev/null and b/content/zh/post/xingchen/images/boost/image5.png differ diff --git a/content/zh/post/xingchen/images/compile/1.3.1.png b/content/zh/post/xingchen/images/compile/1.3.1.png new file mode 100644 index 0000000000000000000000000000000000000000..2b9d45b7458e70ba6df096efaa0cd6407db0305f Binary files /dev/null and b/content/zh/post/xingchen/images/compile/1.3.1.png differ diff --git a/content/zh/post/xingchen/images/compile/1.3.2.png b/content/zh/post/xingchen/images/compile/1.3.2.png new file mode 100644 index 0000000000000000000000000000000000000000..10812f71b06c38987b9afafbcbbe80669b6c9614 Binary files /dev/null and b/content/zh/post/xingchen/images/compile/1.3.2.png differ diff --git a/content/zh/post/xingchen/images/compile/2.1.1.png b/content/zh/post/xingchen/images/compile/2.1.1.png new file mode 100644 index 0000000000000000000000000000000000000000..719ab072bf9880c8d1a6c427f323b0e0ff598195 Binary files /dev/null and b/content/zh/post/xingchen/images/compile/2.1.1.png differ diff --git a/content/zh/post/xingchen/images/compile/2.2.1.png b/content/zh/post/xingchen/images/compile/2.2.1.png new file mode 100644 index 0000000000000000000000000000000000000000..330e2a02bd09a9ef082e5f74e6935cca17c62f48 Binary files /dev/null and b/content/zh/post/xingchen/images/compile/2.2.1.png differ diff --git a/content/zh/post/xingchen/images/compile/2.2.2.png b/content/zh/post/xingchen/images/compile/2.2.2.png new file mode 100644 index 0000000000000000000000000000000000000000..f9a4508d91c645594a79a0ed68412285868ea423 Binary files /dev/null and b/content/zh/post/xingchen/images/compile/2.2.2.png differ diff --git a/content/zh/post/xingchen/images/compile/2.2.3.png b/content/zh/post/xingchen/images/compile/2.2.3.png new file mode 100644 index 0000000000000000000000000000000000000000..77a04ea80bf31b6dfb25bb2a8cae1382b1892bc3 Binary files /dev/null and b/content/zh/post/xingchen/images/compile/2.2.3.png differ diff --git a/content/zh/post/xingchen/images/compile/2.3.1.png b/content/zh/post/xingchen/images/compile/2.3.1.png new file mode 100644 index 0000000000000000000000000000000000000000..464185b9086cf0fa157558390f676725adb10a24 Binary files /dev/null and b/content/zh/post/xingchen/images/compile/2.3.1.png differ diff --git a/content/zh/post/xingchen/images/compile/2.3.2.png b/content/zh/post/xingchen/images/compile/2.3.2.png new file mode 100644 index 0000000000000000000000000000000000000000..29b94d18615282f288145727a0396a49a33ebd4e Binary files /dev/null and b/content/zh/post/xingchen/images/compile/2.3.2.png differ diff --git a/content/zh/post/xingchen/images/compile/gcc_conf.png b/content/zh/post/xingchen/images/compile/gcc_conf.png new file mode 100644 index 0000000000000000000000000000000000000000..ded898806e657a94cfa804d8592a22c08bd072e5 Binary files /dev/null and b/content/zh/post/xingchen/images/compile/gcc_conf.png differ diff --git a/content/zh/post/xingchen/images/compile/gcc_mpp1.png b/content/zh/post/xingchen/images/compile/gcc_mpp1.png new file mode 100644 index 0000000000000000000000000000000000000000..1d9bc5d85b1ad5a050ec706c7c502114ef81118c Binary files /dev/null and b/content/zh/post/xingchen/images/compile/gcc_mpp1.png differ diff --git a/content/zh/post/xingchen/images/compile/gcc_mpp2.png b/content/zh/post/xingchen/images/compile/gcc_mpp2.png new file mode 100644 index 0000000000000000000000000000000000000000..8bd37903a5a0550e2ae02d64ba4a5ee9baf44d11 Binary files /dev/null and b/content/zh/post/xingchen/images/compile/gcc_mpp2.png differ diff --git a/content/zh/post/xingchen/images/compile/withoutlse.png b/content/zh/post/xingchen/images/compile/withoutlse.png new file mode 100644 index 0000000000000000000000000000000000000000..f37fdfe49a8e1016423ed3e643dcd7aae138cb16 Binary files /dev/null and b/content/zh/post/xingchen/images/compile/withoutlse.png differ diff --git a/content/zh/post/xingchen/om_support_os.md b/content/zh/post/xingchen/om_support_os.md new file mode 100644 index 0000000000000000000000000000000000000000..3d5ebd25d78c97fcbbe692594e23134a468b5cd7 --- /dev/null +++ b/content/zh/post/xingchen/om_support_os.md @@ -0,0 +1,71 @@ ++++ +title = "openGauss-OM修改来适配其他操作系统的安装" +date = "2021-05-21" +tags = ["openGauss-OM修改来适配其他操作系统的安装"] +archives = "2021-05-21" +author = "xingchen" +summary = "OM修改来适配其他操作系统的安装" +img = "/zh/post/xingchen/title/img1.png" +times = "19:30" ++++ + +## 概述 + +openGauss官方发布的镜像(https://opengauss.org/zh/download.html), 企业版镜像的安装支持如下系统: +``` +centos7.6 x86_64 +openEuler20.03LTS arm | x86_64 +kylin v10 arm | x86_64 +``` +此外其他的系统,在使用OM工具进行安装的时候,提示不支持该操作系统。 + +但是也有很多系统,比如说上面这些系统的发行版,在操作系统内核方面与以上并没有多少区别,但是由于修改了os-release的信息,导致在安装openGauss数据库时候被识别为不支持的系统,安装不上。 + +## 解决方案 + +可以通过两种方式来解决: 1.修改OM代码,增加对操作系统适配 2.修改操作系统os-release信息 + +#### 1.修改OM代码,增加对操作系统适配 + +OM工具安装(也就是企业版的安装),在安装过程中,会对操作系统进行校验,我们修改相关的python文件,添加我们的系统信息。 + +获取当前操作系统的脚本: +``` +source /etc/os-release; echo $ID +``` +获取当前cpu系统架构: +``` +uname -p +``` + +在如下文件中添加系统信息,可以参照代码已有的系统修改: + +`script/gspylib/os/gsplatform.py` 文件: + +> 80 - 13行左右,增加操作系统平台信息和对应的版本。 + +> 1472行,getPackageFile 函数,增加对应的包名称。 + +`script/local/LocalCheckOS.py` 文件: + +> CheckPlatformInfo函数,增加操作系统平台与对应版本信息。 + +#### 2.修改操作系统os-release信息 + +安装的脚本代码会校验操作系统的版本,是否在支持的列表中。 我们也可以通过修改操作系统的版本标识为已有的这些系统,来骗过安装的脚本校验。 + +操作系统版本相关的信息,在 /etc目录下的release文件中: +``` +[root@ecs-6ac8 ~]# ll /etc | grep release +-rw-r--r-- 1 root root 37 Apr 6 15:52 centos-release +-rw-r--r-- 1 root root 51 Nov 23 23:08 centos-release-upstream +drwxr-xr-x. 2 root root 4096 Dec 13 00:27 lsb-release.d +lrwxrwxrwx 1 root root 21 Apr 1 14:32 os-release -> ../usr/lib/os-release +lrwxrwxrwx 1 root root 14 Apr 1 14:32 redhat-release -> centos-release +lrwxrwxrwx 1 root root 14 Apr 1 14:32 system-release -> centos-release +-rw-r--r-- 1 root root 23 Nov 23 23:08 system-release-cpe +``` + +我们修改这些文件,将其中内容改为对应的openEuler的或者centos的信息。 + +例如在centos7.4系统下,使用官方发布的包安装不上,可以修改`/etc/os-release`,改版本为7.6,即可正常安装。 diff --git a/content/zh/post/xingchen/opengauss_compile.md b/content/zh/post/xingchen/opengauss_compile.md new file mode 100644 index 0000000000000000000000000000000000000000..75b7f2fc2cb8c97c57d7f2bce47d14fef3799df6 --- /dev/null +++ b/content/zh/post/xingchen/opengauss_compile.md @@ -0,0 +1,265 @@ ++++ +title = "openGauss数据库编译指导" +date = "2021-02-23" +tags = ["openGauss数据库编译指导"] +archives = "2021-02-23" +author = "xingchen" +summary = "openGauss数据库编译指导" +img = "/zh/post/xingchen/title/img1.png" +times = "19:30" ++++ + + + + +- [1. 编译三方库](#1.编译三方库) + - [1.1.编译步骤概述](#1.1.编译步骤概述) + - [1.2.编译gcc和cmake](#1.2.编译gcc和cmake) + - [1.3.脚本增加平台信息](#1.3.脚本增加平台信息) + - [1.4.编译三方库](#1.4.编译三方库) + - [1.5.编译完成](#1.5.编译完成) +- [2.编译数据库](#2.编译数据库) + - [2.1.编译数据库](#2.1.编译数据库) + - [2.2.编译适配其他系统](#2.2.编译适配其他系统) + - [2.3.编译适配其他版本gcc](#2.3.编译适配其他版本gcc) + + + +# openGauss数据库编译指导 + +openGauss数据库的编译依赖很多三方库,社区已经在 `openGauss-third_party` 仓库中将所有的依赖放入其中,按照操作编译完三方库后,就可以使用编译好的三方库来编译数据库了。详细步骤如下: + +## 1.编译三方库 + +社区针对 `centos_7.6_x86_64`、`openEuler20.03 LTS_arm`、`openEuler20.03 LTS_ x86_64` 三种架构及操作系统已经提供了编译好的二进制,对于这三种系统架构,可以直接使用社区提供的编译好的文件[openGauss-third_party_binarylibs.tar.gz](https://opengauss.obs.cn-south-1.myhuaweicloud.com/1.1.0/openGauss-third_party_binarylibs.tar.gz),对于其他系统,需要用户自己编译。 + +针对其他系统编译三方库步骤 + +### 1.1.编译步骤概述 + +(1) 需要自行编译好 `gcc` 和 `cmake`,gcc推荐使用 `7.3.0` 版本,cmake推荐 `>=3.16`(如果系统中的cmake版本>=3.16则直接使用系统自带cmake即可)。 +(2) 下载三方库源码,修改脚本增加新的平台信息。 +(3) 进入到 `openGauss-third_party/build` 下,运行 `sh build_all.sh` ,即可进行全量三方库的编译。 + 在此编译过程中,首先编译了 `openssl` ,然后按照顺序编译构建工具 `buildtools` 、平台软件 `platform` 、三方依赖 `dependency`。 +(4) 编译完成后,编译结果在 `openGauss-third_party/output` 目录下。 + +### 1.2.编译gcc和cmake + +gcc和cmake编译请参照:[gcc编译指导](/zh/post/xingchen/gcc_compile/) + +编译完成后,将gcc和cmake导入到环境变量中,然后便可进行三方库编译: +``` +export CMAKEROOT=$cmake_prefix ##编译cmake指定的--prefix +export GCC_PATH=$gcc_prefix ##编译gcc指定的--prefix +export CC=$GCC_PATH/gcc/bin/gcc +export CXX=$GCC_PATH/gcc/bin/g++ +export LD_LIBRARY_PATH=$GCC_PATH/gcc/lib64:$GCC_PATH/isl/lib:$GCC_PATH/mpc/lib/:$GCC_PATH/mpfr/lib/:$GCC_PATH/gmp/lib/:$CMAKEROOT/lib:$LD_LIBRARY_PATH +export PATH=$GCC_PATH/gcc/bin:$CMAKEROOT/bin:$PATH +``` + +### 1.3.脚本增加平台信息 + +修改 `openGauss-third_party/build/get_PlatForm_str.sh` 增加新的平台,如下图 + +![](../images/compile/1.3.1.png) + +$kernel信息可以通过命令获取: +``` +lsb_release -d | awk -F ' ' '{print $2}'| tr A-Z a-z +``` + +在以下python的三方依赖中,增加平台信息。 +``` +openGauss-third_party/dependency/fio/build.py +openGauss-third_party/dependency/iperf /build.py +openGauss-third_party/dependency/jemalloc/build.py +openGauss-third_party/dependency/kerberos/build.py +openGauss-third_party/dependency/libcgroup/build.py +openGauss-third_party/dependency/libedit/build.py +openGauss-third_party/dependency/nanomsg /build.py +openGauss-third_party/dependency/numactl/build.py +openGauss-third_party/dependency/openssl/build.py +openGauss-third_party/dependency/protobuf/build.py +``` + +binary_parse 函数中,增加新的平台相关信息配置。这几个文件改动相同 + +![](../images/compile/1.3.2.png) + +以下错误信息均是由于未增加平台信息引起: +``` +Failed +[Error] the plat form is not supported! +[ERROR] Not supported platform type +``` + + +### 1.4.编译三方库 + +进入到`openGauss-third_party/build`目录,运行 `sh build_all.sh`,全量编译三方库 +如果在编译过程中,某一步有错误,解决错误后,可以注释掉已经编译好的三方库,只编译还未编译成功的库,分组单独进行编译。 + +***编译buildtools*** +``` +cd openGauss-third_party/build_tools +sh build_tools.sh +``` + +***编译platform*** +``` +cd openGauss-third_party/platform/build/ +sh build_platform.sh +``` + +***编译dependency*** +``` +cd openGauss-third_party/dependency/build/ +sh build_dependency.sh +``` + + +### 1.5.编译完成 +编译结果在 `openGauss-third_party/output` 之中,目录层级结构如下(`${platform}` 即为当前平台信息): +``` +openGauss-third_party + - output + - buildtools + - license_control + - server_key + - ${platform} + - gcc7.3 + - common + - commons-codec + - fastjson + …… + - dependency + - ${platform} + - boost + - cjson + …… + - install_tools_${platform} + - asnlcrypto + …… + - platform + - ${platform} + - openjdk + …… +``` +将编译好的 `gmp mpfr mpc isl gcc` 目录拷贝到`openGauss-third_party/output/buildtools/${platform}/gcc7.3`下,output目录即为完整的三方库二级制。将output目录拷贝出去,重命名为binarylibs,便可以使用它进行数据库编译。 + +## 2.编译数据库 + +以上完成三方库编译后,就可以开始数据库的编译了。以下介绍如何进行编译,以及修改代码适配不同的gcc版本、不同的操作系统。 + +### 2.1.编译数据库 + +下载openGauss-server代码,进入到源码目录下。 + +***一键编译*** + +编译命令: +``` +sh build.sh -m release -3rd /usr2/compile/binarylibs +``` + +-3rd 为第一步编译好的三方库二进制目录 +-m 参数可选择 `debug|release|memcheck` +编译完成后,目标文件在`./mppdb_tmp_install`下面。 + +***手动编译*** + +导入环境变量: +``` +export CODE_BASE=/usr2/compile/openGauss-server +export BINARYLIBS=/usr2/compile/binarylibs +export GAUSSHOME=$CODE_BASE/dest/ +export GCC_PATH=$BINARYLIBS/buildtools/openeuler_aarch64/gcc7.3/ +export CC=$GCC_PATH/gcc/bin/gcc +export CXX=$GCC_PATH/gcc/bin/g++ +export LD_LIBRARY_PATH=$GAUSSHOME/lib:$GCC_PATH/gcc/lib64:$GCC_PATH/isl/lib:$GCC_PATH/mpc/lib/:$GCC_PATH/mpfr/lib/:$GCC_PATH/gmp/lib/:$LD_LIBRARY_PATH +export PATH=$GAUSSHOME/bin:$GCC_PATH/gcc/bin:$PATH +``` + +参数说明: +CODE_BASE 为openGauss-server源码目录; +BINARYLIBS 为第一步编译好的三方库二进制目录; +GAUSSHOME 为编译完成的目标文件路径; +GCC_PATH 二进制中,GCC编译结果存放的路径,需要修改里面的openeuler_aarch64为实际的平台信息。 + + + +编译命令如下: +``` +./configure --gcc-version=7.3.0 CC=g++ CFLAGS='-O0' --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-debug --enable-cassert --enable-thread-safety --without-readline --without-zlib +make –sj +make install –sj +``` + +### 2.2.编译适配其他系统 + +对于 `centos_7.6` 以及 `openEuler20.03 LTS` 操作系统,可以直接编译。其他系统需要修改代码适配。 + +***修改脚本新增平台:*** + +打开 `openGauss-server/src/get_PlatForm_str.sh`,在里面(134行)增加新的平台信息,例如: + +![](../images/compile/2.2.1.png) + +在 `openGauss-server/build/script/mpp_package.sh:46` 行 +和 `openGauss-server/build/script/package_opengauss.sh:55` 行 +增加平台的信息。 + +![](../images/compile/2.2.2.png) + +***修改Makefile文件*** +打开`openGauss-server/src/gausskernel/Makefile`文件,在702行,复制一份其他系统的并更改为当前平台。 + +![](../images/compile/2.2.3.png) + +***一键编译*** +``` +sh build.sh -m release -3rd /usr2/compile/binarylibs +``` + +***手动编译*** + +参照***2.1编译数据库***的***手动编译***命令,只需要修改 `GCC_PATH` 中平台参数,其他操作保持一致。 +``` +export GCC_PATH=$BINARYLIBS/buildtools/ubuntu_x86_64/gcc7.3/ +``` +![](../images/compile/2.1.1.png) + +### 2.3.编译适配其他版本gcc + +当前openGauss固定编译使用的gcc版本为 `gcc7.3`,如果使用其他gcc版本,例如 `gcc8.2`(支持c11标准),也可以进行编译。需要修改代码适配。 + +***修改配置中的gcc版本为指定版本*** +``` +openGauss-server/configure:936行,gcc_version改为指定版本。 +openGauss-server/build/script/mpp_package.sh 62行和311行(关键字gcc_version),7.3.0改为指定版本。 +``` +configure文件: +![](../images/compile/gcc_conf.png) +mpp_package文件: +![](../images/compile/gcc_mpp1.png) +![](../images/compile/gcc_mpp2.png) + + +***一键编译*** +``` +sh build.sh -m release -3rd /usr2/compile/binarylibs +``` + +***手动编译*** +参照2.1手动编译命令,修改环境变量的gcc版本为指定版本: +``` +export GCC_PATH=$BINARYLIBS/buildtools/openeuler_aarch64/gcc8.2/ +``` +![](../images/compile/2.3.1.png) + +修改configure版本为指定版本: +``` +./configure --gcc-version=8.2.0 ........ +``` +![](../images/compile/2.3.2.png) +其他命令与2.1一致。 \ No newline at end of file diff --git a/content/zh/post/xiteming/HowtorunFastcheck.md b/content/zh/post/xiteming/HowtorunFastcheck.md new file mode 100644 index 0000000000000000000000000000000000000000..ba4b28ae0831bc45a9abd3879e3d4388e2f38353 --- /dev/null +++ b/content/zh/post/xiteming/HowtorunFastcheck.md @@ -0,0 +1,75 @@ ++++ +title = "如何跑各种check" +date = "2021-11-09" +tags = ["openGauss如何跑各种check"] +archives = "2021-11-09" +author = "xiteming, pengjiong" +summary = "如何跑各种check" +img = "/zh/post/xingchen/title/img1.png" + ++++ + +## 如何进行Fastcheck? +首先,导入环境变量: +``` +export CODE_BASE=/data/openGauss-server +export BINARYLIBS=/data/openGauss-third_party_binarylibs +export GAUSSHOME=$CODE_BASE/dest/ +export GCC_PATH=$BINARYLIBS/buildtools/openeuler_aarch64/gcc7.3/ +export CC=$GCC_PATH/gcc/bin/gcc +export CXX=$GCC_PATH/gcc/bin/g++ +export LD_LIBRARY_PATH=$GAUSSHOME/lib:$GCC_PATH/gcc/lib64:$GCC_PATH/isl/lib:$GCC_PATH/mpc/lib/:$GCC_PATH/mpfr/lib/:$GCC_PATH/gmp/lib/:$LD_LIBRARY_PATH +export PATH=$GAUSSHOME/bin:$GCC_PATH/gcc/bin:$PATH +``` +需要准备好的文件有:testname.sql和testname.out; +第一步:将testname.sql放入/src/test/regress/sql路径下,同时将testname.out放入/src/test/regress/expected路径下。 +Tip1:执行完本步后,需要注意两个问题: +(1) 文件权限问题,相关命令关键字:chmod,chown; +(2) 文件格式问题,相关命令关键字:dos2unix。 +第二步:在/src/test/regress/parallel_schedule0中添加你的测试用例: +test:testname +第三步:进入源码根目录进行configure: +``` +./configure --gcc-version=7.3.0 CC=g++ CFLAGS='-O0' --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-debug --enable-cassert --enable-thread-safety --with-readline --without-zlib +``` +第四步:在源码根目录下编译及安装 +make -sj +make install –sj +第五步:在/src/test/regress目录下执行语句: +make fastcheck_single +经验技巧: +1.如何确定期望输出:对于期望输出,如果你的测试用例的输出是确定的,那么一个最简单的方法就是先创建一个parallel_scheduleYYY的临时文件,里面只包含你要添加的测试用例,然后运行一次make fastcheck_single,这样得到的diffs中就包含是你的期望输出。 + +## 如何进行memcheck? +memcheck并不是一个新的check,只是编译openGauss时,编译一个memcheck版的,然后通过跑fastcheck_single来发现代码中的内存问题。 +编译方式和编译普通的openGauss基本一致,只是在configure时,添加一个 ```--enable-memory-check``` 参数,编译出来的就是memcheck版本的openGauss。 +``` +./configure --gcc-version=7.3.0 CC=g++ CFLAGS='-O0' --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-debug --enable-cassert --enable-thread-safety --with-readline --without-zlib --enable-memory-check +``` +跑memcheck之前,需要设置环境变量: +```shell +ulimit -v unlimited +``` +设置完环境变量后,正常跑fastcheck_single即可,跑完后,会在 ```~/memchk/asan/```路径下生成文件名为runlog.xxx的memcheck报告。根据memcheck报告分析是否有内存问题。如何分析memcheck报告可自行网上搜索memcheck报告分析、asan报告分析等关键字。 + +## 如何进行hacheck? +hacheck是对openGauss主备功能进行测试的check,openGauss的编译方式同fastcheck,编译完成后,进入 ```src/test/ha```目录,修改standby_env.sh文件,在文件最前面新增一行 +```shell +export prefix=$GAUSSHOME +``` +脚本中将尝试通过ifconfig命令获取本机IP,如果本机网卡的名称不是eth0、eth1、ens4f0、enp2s0f0、enp2s0f1、enp125s0f0之一的话,获取IP将失败,此时可以在 +``` +enp125s0f0=`/sbin/ifconfig enp125s0f0|sed -n 2p |awk '{ print $2 }'` +``` +的下面手动添加本机IP地址: +``` +enp125s0f0=`/sbin/ifconfig enp125s0f0|sed -n 2p |awk '{ print $2 }'` +eth0ip=1.1.1.1 +``` +配置好脚本后,执行hacheck脚本: +```shell +sh run_ha_multi_single.sh +sh run_ha_single.sh +``` +运行是否成功会在屏幕打印 ok/failed,运行日志在 ```src/test/ha/results```目录下。 + diff --git a/content/zh/post/xiteming/image/docker_images.PNG b/content/zh/post/xiteming/image/docker_images.PNG new file mode 100644 index 0000000000000000000000000000000000000000..0f147e3c83ae42950e589a2ef3430d3c7f891a7e Binary files /dev/null and b/content/zh/post/xiteming/image/docker_images.PNG differ diff --git a/content/zh/post/xiteming/image/name_fix.PNG b/content/zh/post/xiteming/image/name_fix.PNG new file mode 100644 index 0000000000000000000000000000000000000000..fa92ea5a7d8298109580856f1634d11d082ee526 Binary files /dev/null and b/content/zh/post/xiteming/image/name_fix.PNG differ diff --git a/content/zh/post/xiteming/image/openGauss_images.PNG b/content/zh/post/xiteming/image/openGauss_images.PNG new file mode 100644 index 0000000000000000000000000000000000000000..d1bacd8f5b399cc18c7e53bfd06b89fbed5c801d Binary files /dev/null and b/content/zh/post/xiteming/image/openGauss_images.PNG differ diff --git a/content/zh/post/xiteming/image/run.PNG b/content/zh/post/xiteming/image/run.PNG new file mode 100644 index 0000000000000000000000000000000000000000..3fc2effc4209545a0b9a06732e3b66563f7653c8 Binary files /dev/null and b/content/zh/post/xiteming/image/run.PNG differ diff --git a/content/zh/post/xiteming/title/img1.png b/content/zh/post/xiteming/title/img1.png new file mode 100644 index 0000000000000000000000000000000000000000..65e2d4c4751f069c64357704715e2ba99beb511a Binary files /dev/null and b/content/zh/post/xiteming/title/img1.png differ diff --git "a/content/zh/post/xiteming/\351\200\232\350\277\207Docker\345\256\211\350\243\205openGauss.md" "b/content/zh/post/xiteming/\351\200\232\350\277\207Docker\345\256\211\350\243\205openGauss.md" new file mode 100644 index 0000000000000000000000000000000000000000..8f4110406e164a2e5e42278bb0adaf5390982621 --- /dev/null +++ "b/content/zh/post/xiteming/\351\200\232\350\277\207Docker\345\256\211\350\243\205openGauss.md" @@ -0,0 +1,126 @@ ++++ +title = "通过Docker安装openGauss" +date = "2022-02-19" +tags = ["通过Docker安装openGauss"] +archives = "2022-02-19" +author = "xiteming" +summary = "通过Docker安装openGauss" +img = "/zh/post/xiteming/title/img1.png" +times = "19:30" ++++ + +###通过Docker安装openGauss + +#### Docker概述 + +Docker 是一个开源的应用容器引擎,让开发者可以打包他们的应用以及依赖包到一个可移植的容器中,然后发布到任何流行的Linux或Windows操作系统的机器上,也可以实现虚拟化,容器是完全使用沙箱机制,相互之间不会有任何接口。 + +#### 概述 + +本章节介绍通过Docker安装单机版openGauss。 + +以openGauss-server 2.1.0版本、openeuler-20.03版本和openGauss-server 2.1.0 版本openEuler系统安装包为例。 + +#### 前提准备 + +1. openGauss-server代码库。(下载地址:https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.1.0/arm/openGauss-2.1.0-openEuler-64bit-all.tar.gz) + +2. openeuler操作系统在docker环境下的镜像文件。(下载地址:https://repo.openeuler.org/openEuler-20.03-LTS/docker_img/aarch64/) + +3. openGauss在openEuler平台的的软件安装包。(下载地址:https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.1.0/arm/openGauss-2.1.0-openEuler-64bit-all.tar.gz) + +4. openEuler_aarch64.repo文件。(下载地址:https://mirrors.huaweicloud.com/repository/conf/openeuler_aarch64.repo) + +#### 上传软件包 + +1. 在Linux系统下,创建目录来放软件包 +``` +mkdir -p /opt/xxx +``` +2. 通过ftp等工具,将openGauss-2.1.0-CentOS-64bit-all.tar.gz包放到/opt/xxx目录下 + +#### 安装docker +``` +yum list |grep docker-engine.aarch64 //查看版本信息 +yum install -y docker +``` + +#### 验证docker安装是否成功 +``` +docker version +``` + +#### 加载openeuler docker镜像文件 +``` +docker load -i openEuler-docker.aarch64.tar.xz +``` + +#### 查看openeuler docker镜像是否加载成功 +``` +docker images +``` +![](../image/docker_images.png) + +#### 修改dockerfile_arm文件 + +进入到/opt/xxx/openGauss-server/docker/dockerfiles路径下,`cd /opt/xxx/openGauss-server/docker/dockerfiles` + +将1.1.0文件夹名称修改为2.1.0,`mv 1.1.0 2.1.0` + +进入该文件夹,`cd 2.1.0` + +打开dockerfile_arm文件,`vim dockerfile_arm` + +将openGauss版本名统一修改成2.1.0,如下图所示: + +![](../image/name_fix.png) + +#### 创建openGauss docker镜像 + +1. 进入存放软件安装包的路径下,将下载好的openGauss-2.1.0-openEuler-64bit-all.tar.gz安装包解压 +``` +tar –zxvf openGauss-2.1.0-openEuler-64bit-all.tar.gz +``` + +2. 将解压出来的openGauss-2.1.0-openEuler-64bit.tar.bz2移至/opt/xxx/openGauss-server/docker/dockerfiles/2.1.0 +``` +cp openGauss-2.1.0-openEuler-64bit.tar.bz2 /opt/xxx/openGauss-server/docker/dockerfiles/2.1.0 +``` + +3.将openEuler_aarch64.repo文件并放到/opt/xxx/openGauss-server/docker/dockerfiles/2.1.0 +``` +mv openEuler_aarch64.repo /opt/xxx/openGauss-server/docker/dockerfiles/2.1.0 +``` + +4.创建openGauss docker镜像,-v 后面的值为版本号,-i意为跳过MD5检查 +``` +sh buildDockerImage.sh –v 2.1.0 –i +``` +#### 查看openGauss docker镜像是否创建成功,成功后如下图所示 + +``` +docker images +``` + +![](../image/openGauss_iamges.png) + +#### 开启openGauss实例 + +``` +docker run --name OG1 --privileged=true -d -e GS_PASSWORD=openGauss@123 –e GS_NODENAME=test –e GS_USERNAME=test –p 8888:5432 opengauss:2.1.0 +``` + +#### 进入docker + +``` +docker exec-ti OG1 /bin/bash +``` + +#### 登录子用户并连接数据库,成功后如下图所示 + +``` +su – omm +gsql –d postgres –p 5432 –r +``` + +![](../image/run.png) diff --git a/content/zh/post/xixingxing/title/title.jpg b/content/zh/post/xixingxing/title/title.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5f8656d78745d35942f059d7d512d8c565ea9d70 Binary files /dev/null and b/content/zh/post/xixingxing/title/title.jpg differ diff --git "a/content/zh/post/xixingxing/\347\274\226\350\257\221\345\256\211\350\243\205openGuass 3.0.0.md" "b/content/zh/post/xixingxing/\347\274\226\350\257\221\345\256\211\350\243\205openGuass 3.0.0.md" new file mode 100644 index 0000000000000000000000000000000000000000..268656ec35fdc5de2b76ba9fc12c2fbce7718977 --- /dev/null +++ "b/content/zh/post/xixingxing/\347\274\226\350\257\221\345\256\211\350\243\205openGuass 3.0.0.md" @@ -0,0 +1,252 @@ ++++ +title = "编译安装openGuass 3.0.0" +date = "2022-05-16" +tags = ["编译安装openGuass 3.0.0"] +archives = "2022-05" +author = "xixingxing" +summary = "编译安装openGuass 3.0.0" +img = "/zh/post/xixingxing/title/title.jpg" +times = "17:30" ++++ + +## 编译安装openGuass 3.0.0 + +### 1. 环境检查 +#### 1.1 检查OS版本 + +```c +openGauss支持的操作系统: + +CentOS 7.6(x86 架构) +openEuler-20.03-LTS(aarch64 架构) +openEuler-20.03-LTS(x86 架构) +Kylin-V10(aarch64 架构) +``` + +[root@og3 ~]# cat /etc/redhat-release +CentOS Linux release 7.6.1810 (Core) + +#### 1.2 修改主机名及/etc/hosts +hostnamectl set-hostname og +cat>>/etc/hosts<>/etc/yum.repos.d/rhel-source.repo <>/etc/profile<>/etc/profile<> /opt/software/openGauss/cluster_config.xml <<-EOF + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +EOF + + + + +#### 5.4 初始化安装环境 +cd /opt/software/openGauss/script +./gs_preinstall -U omm -G dbgrp -L -X /opt/software/openGauss/cluster_config.xml + + +### 6. 执行安装 +su - omm +gs_install -X /opt/software/openGauss/cluster_config.xml + + +### 7. 初始化数据库 +[root@og openGauss]# su - omm +Last login: Mon May 9 19:28:07 CST 2022 on pts/1 + +#### 7.1 检查数据库状态 + +[omm@og ~]$ gs_om -t status + +```shell +执行如下命令检查数据库状态是否正常,“cluster_state ”显示“Normal”表示数据库可正常使用。 +``` + +[omm@og ~]$ gsql -d postgres -p 15400 +openGauss=# CREATE DATABASE mydb WITH ENCODING 'GBK' template = template0; +CREATE DATABASE +openGauss=# + + diff --git a/content/zh/post/xuemengen/gs_expansion.md b/content/zh/post/xuemengen/gs_expansion.md new file mode 100644 index 0000000000000000000000000000000000000000..12faab480db326efd522cb53bd788cc95d122229 --- /dev/null +++ b/content/zh/post/xuemengen/gs_expansion.md @@ -0,0 +1,58 @@ ++++ +title = "openGauss数据库扩容指导" +date = "2021-12-20" +tags = ["openGauss数据库扩容指导"] +archives = "2021-12-20" +author = "xuemengen" +summary = "openGauss数据库扩容指导" +img = "/zh/post/xuemengen/title/img1.png" +times = "20:00" ++++ + +## 前置条件 +当前集群状态正常 +## 操作步骤 +1、新节点创建用户和用户组,注意需要与当前集群的用户与用户组一致,密码也要保持一致。假设当前集群用户为omm。 +2、检查新节点环境变量,清理和openGauss相关的环境变量配置。主要检查/etc/profile和/home/omm/.bashrc两个文件。如果清理不干净,会导致扩容不成功。或者提示待扩容备机节点已经安装。 +3、在主节点准备与当前主节点版本相同的安装包并解压,进入script目录。 +4、创建主节点与其他节点互信,包括root用户和omm用户,可以使用opengauss提供的工具`gs_sshexkey`创建互信。 +``` +./gs_sshexkey -f /home/omm/hostfile +``` +集群内所有的ip,每个ip及主机名一行: +``` +192.168.1.1 +192.168.1.2 +192.168.1.3 +Host1 +Host2 +Host3 +``` + +执行结果提示如下代表互信建立成功 +``` +Successfully distributed SSH trust file to all node. +Verifying SSH trust on all hosts. +Successfully verified SSH trust on all hosts. +Successfully created SSH trust. +``` +`注意:` +如果是同一台机器恢复后再加入集群,需要清理root用户和omm用户的~/.ssh/know_host和~/.ssh/authorized_keys里的相关信息,否则创建互信会失败。 +需要分别在root用户和omm用户下执行,各节点密码需要一致,后期可以再修改。 +全新的机器需要安装python3。 +5、创建新的xml文件,将新节点信息加入其中。 + +6、执行扩容操作 +切换到root用户,务必要首先导入当前集群环境变量,`source /home/omm/env` +``` +./gs_expansion -U omm -G dbgrp -h 192.168.1.2 -X ./clusterconfig.xml +``` +最后显示如下 +``` +Expansion results: +192.168.1.2: Success +Expansion Finish. +``` +即扩容成功 +## 常见问题 +如果数据量较大,建立主备联系过程可能较长,如果中途由于网络中断等原因导致建联失败,但是此时新节点的数据库是已经安装成功的,所以再次执行扩容的时候无需再次进行安装,需要在扩容命令末尾加上-L表示跳过安装过程 \ No newline at end of file diff --git a/content/zh/post/xuemengen/image/patroni_principle.png b/content/zh/post/xuemengen/image/patroni_principle.png new file mode 100644 index 0000000000000000000000000000000000000000..cbd881baddd4d12e50a3fe10fa5f7989f5ae37ab Binary files /dev/null and b/content/zh/post/xuemengen/image/patroni_principle.png differ diff --git a/content/zh/post/xuemengen/patroniForOpenGauss.md b/content/zh/post/xuemengen/patroniForOpenGauss.md new file mode 100644 index 0000000000000000000000000000000000000000..ad618e30811d5d4ce2b6923e3d4897b4203b3340 --- /dev/null +++ b/content/zh/post/xuemengen/patroniForOpenGauss.md @@ -0,0 +1,26 @@ ++++ +title = "patroniForOpenGauss高可用方案基本原理" +date = "2021-09-01" +tags = ["openGauss分布式解决方案"] +archives = "2021-09-01" +author = "xuemengen" +summary = "patroniForOpenGauss高可用方案基本原理" +img = "/zh/post/xuemengen/title/img.png" +times = "9:30" ++++ + +## 1 patroni简介 + +  Patroni是一个由Zalando研发的,完全由python开发的开源产品,其能够通过分布式存储系统(Distributed configuration system, DCS)来检测存储数据库集群各个节点的状态和配置,并且能够对数据库集群进行自动管理和故障切换。 +## 2 patroni原理介绍 + +  一个高可用集群由patroni、DCS和数据库组成,本方案中DCS选用etcd,数据库为openGauss。 +  etcd是一个分布式键值对存储,设计用来可靠而快速的保存关键数据并提供访问,通过分布式锁,leader选举和写屏障(write barriers)来实现可靠的分布式协作,etcd集群是为高可用,持久性数据存储和检索而准备。 +  patroni通过一个api接口连接到etcd,向其插入键值对记录patroni参数、数据库参数、主备信息以及连接信息,平常通过etcd对其它节点做心跳检测,通过从etcd获取键值对中存储的主备信息来判断各节点的状态对集群进行自动管理,其基本原理如下图所示。 +![patroni基本原理图](../image/patroni_principle.png) +  如图所示,同一时刻最多只能有一个patroni节点成为leader,即最多只能有一个patroni节点能够持有leader锁,因此能够避免脑裂的发生。 +当前patroni-for-openGauss支持修复的故障场景如下: +1. 主数据库意外停止,但可以通过重启恢复,立即自动启动主数据库; +2. 主数据库意外故障,且无法启动,首先当前主机释放leader锁降备,然后自动选择一个最健康的备机即同步情况与主机最接近的备机,提升为主机; +3. 备库意外挂机,重启后可立即恢复正常并与主机连接,则立即进行重启恢复; +4. 备库意外故障,可正常启动但是启动后落后于主机状态 ,则对其进行重建操作以恢复其状态。 diff --git a/content/zh/post/xuemengen/title/img.png b/content/zh/post/xuemengen/title/img.png new file mode 100644 index 0000000000000000000000000000000000000000..86a420b92fb8289658d807d49f137b6d13862f6d Binary files /dev/null and b/content/zh/post/xuemengen/title/img.png differ diff --git a/content/zh/post/xuemengen/title/img1.png b/content/zh/post/xuemengen/title/img1.png new file mode 100644 index 0000000000000000000000000000000000000000..65e2d4c4751f069c64357704715e2ba99beb511a Binary files /dev/null and b/content/zh/post/xuemengen/title/img1.png differ diff --git a/content/zh/post/yanghaiyan/.keep b/content/zh/post/yanghaiyan/.keep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/content/zh/post/yanghaiyan/images/.keep b/content/zh/post/yanghaiyan/images/.keep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git "a/content/zh/post/yanghaiyan/openGauss\344\275\277\350\203\275paxos\347\211\271\346\200\247\345\256\236\350\267\265.md" "b/content/zh/post/yanghaiyan/openGauss\344\275\277\350\203\275paxos\347\211\271\346\200\247\345\256\236\350\267\265.md" new file mode 100644 index 0000000000000000000000000000000000000000..f00fb7a4be1ada0b025f4cc450b66b25b3918295 --- /dev/null +++ "b/content/zh/post/yanghaiyan/openGauss\344\275\277\350\203\275paxos\347\211\271\346\200\247\345\256\236\350\267\265.md" @@ -0,0 +1,83 @@ ++++ +title = "openGauss使能paxos特性实践" + +date = "2021-10-21" + +tags = ["openGauss使能paxos特性"] + +archives = "2021-10" + +author = "yanghaiyan" + +summary = "openGauss使能paxos" + +img = "/zh/post/yanghaiyan/title/img1.png" + +times = "17:00" ++++ + +# openGauss使能paxos特性实践 + +#### 前言 +GaussDB(for openGauss)推出了基于Paxos协议的DCF高可用组件,该组件使得GaussDB(for openGauss)在保证数据一致性的同时,在高可用方面可进一步得到增强,包括: +(1)通过自仲裁、多数派选主能力摆脱第三方仲裁组件,极大缩短RTO时间,且可预防任何故障下的脑裂双主; +(2)支持节点同步、同异步混合部署的多集群部署模式; +(3)提升主备间节点日志复制效率,提升系统的最大吞吐能力。 +借助GaussDB(for openGauss)的DCF高可用组件,用户不仅可以免去系统脑裂的风险,还可以提升可用性。 +#### 社区版本说明 +GaussDB(for openGauss)当前930开源计划已实施,包括正式推出了基于Paxos协议的DCF高可用组件内核新特性; 在此本文详细说明了openGauss使能paxos特性实践操作流程,让用户通过自适配能够使能paxos特性,体检集群高可用增强能力。 + +#### 使能paxos特性实践 +##### 1、版本下载与安装 +1) 从官方社区下载最新2.1.0版本
    +下载链接:[https://opengauss.org/zh/download.html](http://) +2) 创建配置文件和解压缩安装包
    +具体可参考openGauss安装流程: [https://gitee.com/opengauss/openGauss-server#%E5%AE%89%E8%A3%85)](http://) +3) 初始化安装环境:修改initdb配置参数(增加-c参数,使能dn创建paxosIndex信息文件)
    +具体的修改文件: script/gspylib/component/Kernel/DN_OLAP/DN_OLAP.py;修改函数:initInstance(搜索关键字“gs_initdb”有两处match位置)
    +修改示例: +```c{.line-num} +cmd = "%s/gs_initdb --locale=C -D %s -X %s --nodename=%s %s -c -C %s" +``` +4) 执行预安装和正式安装 +##### 2、使能DCF配置 +1) 进入到DN目录,通过修改postgresql.conf文件配置使能paxos dcf特性
    +配置示例(在此以集群3节点配置为例, **注意:每个节点均需要修改** ): +```c{.line-num} +#1. 使能dcf特性开关 +enable_dcf = on +#2. 当前节点id, 如果集群为3节点则每个节点可分别配置为1、2、3 +dcf_node_id = 1 +#3. 指定dcf数据目录 +dcf_data_path = '/xxx/cluster/data1/dn1/dcf_data' +#4. 指定dcf集群配置信息,每个节点上dcf_config内容一致,其中配置的ip/端口专用于dcf节点间通信链路,注意与所有其他已使用的ip/端口不要配置冲突 +dcf_config = '[{"stream_id":1,"node_id":1,"ip":"x.x.x.21","port":xx,"role":"LEADER"},{"stream_id":1,"node_id":2,"ip":"x.x.x.22","port":xx,"role":"FOLLOWER"},{"stream_id":1,"node_id":3,"ip":"x.x.x.23","port":xx,"role":"FOLLOWER"}]' +``` +##### 3、集群DCF模式运行 +- 切换到用户角色,依次启动节点1/2/3: +```c{.line-num} +gaussdb -D /xxx/cluster/data1/dn1 -M standby & +``` +待集群多数派节点启动成功后,即可以paxos模式运行;
    +通过:gs_om -t status --detail指令可查询节点状态信息; + +##### 4、(可选)集群故障模式恢复参考 +集群故障模式下,可通过以下少数派和重建流程来恢复集群paxos模式正常运行: +- 手动设置存活节点为少数派模式运行 +```c{.line-num} +gs_ctl setrunmode -D PATH -v 1 -x minority +``` +- 集群其他节点主动重建拉起 +```c{.line-num} +gs_ctl build -b full -Z single_node -D PATH +``` +- 存活节点重回多数派 +```c{.line-num} +gs_ctl setrunmode -D PATH -x normal +``` +至此,集群已经可以paxos多数派模式正常运行,对外提供服务了; +- 状态信息验证查询 +```c{.line-num} +gs_ctl query -D PATH +``` +通过该指令可以验证查询到本节点HA状态和Paxos复制状态相关信息。 \ No newline at end of file diff --git a/content/zh/post/yanghaiyan/title/.keep b/content/zh/post/yanghaiyan/title/.keep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/content/zh/post/yanghaiyan/title/img1.png b/content/zh/post/yanghaiyan/title/img1.png new file mode 100644 index 0000000000000000000000000000000000000000..480384c2637ed6569224a30e246ded6d2ce613b7 Binary files /dev/null and b/content/zh/post/yanghaiyan/title/img1.png differ diff --git "a/content/zh/post/yanzongshuai/openguass-NUMA\351\200\202\351\205\215\344\271\213\347\272\277\347\250\213\347\273\221\346\240\270.md" "b/content/zh/post/yanzongshuai/openguass-NUMA\351\200\202\351\205\215\344\271\213\347\272\277\347\250\213\347\273\221\346\240\270.md" new file mode 100644 index 0000000000000000000000000000000000000000..9772541aa481cff556933f5373ff19f679410d28 --- /dev/null +++ "b/content/zh/post/yanzongshuai/openguass-NUMA\351\200\202\351\205\215\344\271\213\347\272\277\347\250\213\347\273\221\346\240\270.md" @@ -0,0 +1,135 @@ ++++ + +title = "openGauss NUMA适配之线程绑核" + +date = "2021-06-29" + +tags = ["openGauss NUMA适配之线程绑核"] + +archives = "2021-06" + +author = "闫宗帅" + +summary = "openGauss NUMA适配之线程绑核构" + +times = "19:30" + ++++ + +# openGauss NUMA适配之线程绑核 + +## 1、多核NUMA结构 + +NUMA(Non-uniform memory access,非统一内存访问架构)出现前,CPU通过内存控制器访问内存,随着CPU核的增加,内存控制器成为评价。内存控制器一般拆分内存平均分配到各个node节点上,CPU访问本地内存速度快,跨片访问慢。NUMA距离定义为:NUMA node的处理器和内存块的物理距离。通过numactl工具可以查看到CPU访问的距离信息。 + +## 2、NUMA绑核优化思路 + +避免线程在运行中在不同核上漂移,从而引起访问NUMA远端内存。openGauss通过配置参数thread\_pool\_attr控制CPU绑核分配,该参数仅在enable\_thread\_pool打开后生效。参数分为3部分:’thread\_num,group\_num,cpubind\_info’。 + +其中thread\_num:线程池中线程总数,取值0-4096。0表示根据CPU核数量自动配置线程池中线程数。如果大于0,线程池中线程数等于该值 + +group\_num:线程池中线程分组个数。0-64。0表示根据NUMA组个数自动配置线程池中分组个数,否正为group\_num个数。 + +cpubind\_info:线程池是否绑核的配置参数。可以配置: + +1(nobind),线程不绑核 + +2(allbind),利用当前系统所有能查询到的CPU核做线程绑核; + +3 (nodebind:1,2),利用NUMA组1,2中CPU核进行绑核; + +4 (cpubind:0-30),利用0-30号CPU核进行绑核。 + +默认值‘16,2,(nobind)’ + +为充分利用CPU,线程数略大于核数。因为可能由线程等待,此时切换大其他线程进行。 + +## 3、源码解析 + +**操作流程** + +![](figures/20210614-1f4513c9-5d18-43e2-9789-a94d260cd602.png) + +- 在PostmasterMain中开始设置线程绑定动作 +- 如果设置enable\_thread\_pool,才会调用SetThreadPoolInfo函数 + 1. 首先InitCpuInfo将CPU信息结构m\_cpuInfo初始化 + 2. 判定是否已有CPU进行了绑定GetInstanceBind + 3. GetCpuAndNumaNum计算CPU个数及NUMA节点个数 + 4. ParseAttr函数解析thread\_pool\_attr字符串 + 5. GetSysCpuInfo函数获取CPU信息 + 6. SetGroupAndTreadNum设定组个数及每个组中线程数 + +- 在ServerLoop函数中接收用户端连接,并进行CPU绑定 + - 由函数g\_threadPoolControler-\>Init完成 + - 完成线程创建及CPU绑定的函数是TreadPoolGroup::Init完成 + + +**GetCpuAndNumaNum** + +通过lscpu命令来计算CPU核、NUMA个数。 + +``` +void ThreadPoolControler::GetCpuAndNumaNum() +{ + char buf[BUFSIZE]; + FILE* fp = NULL; + if ((fp = popen("lscpu", "r")) != NULL) { + while (fgets(buf, sizeof(buf), fp) != NULL) { + if (strncmp("CPU(s)", buf, strlen("CPU(s)")) == 0 && + strncmp("On-line CPU(s) list", buf, strlen("On-line CPU(s) list")) != 0 && + strncmp("NUMA node", buf, strlen("NUMA node")) != 0) { + char* loc = strchr(buf, ':'); + m_cpuInfo.totalCpuNum = pg_strtoint32(loc + 1); + } else if (strncmp("NUMA node(s)", buf, strlen("NUMA node(s)")) == 0) { + char* loc = strchr(buf, ':'); + m_cpuInfo.totalNumaNum = pg_strtoint32(loc + 1); + } + } + pclose(fp); + } +``` + +**GetSysCpuInfo** + +- 通过fp = popen\(“lscpu -b -e=cpu,node”, “r”\);执行lscpu命令获取cpuid和numaid +- 通过CPU\_ISSET判断CPU是否绑定,最后计算出活跃未绑定的CPU个数m\_cpuInfo.activeNumaNum + +**SetGroupAndThreadNum** + +- 进行线程绑定,默认情况下线程组个数2,每组里面线程个数16 +- ConstrainThreadNum限定线程池大小m\_maxPoolSize为min\(4096,max\_connection,\),线程个数m\_threadNum = Min\(m\_threadNum, m\_maxPoolSize\); + +**ThreadPoolGroup::Init** + +``` +m_listener->StartUp();//开启一个新线程 +InitWorkerSentry(); +|-- AddWorker + |-- AttachThreadToNodeLevel:: pthread_setaffinity_np +CPU_SET(m_groupCpuArr[i], &m_nodeCpuSet);//循环将CPU加入CPU集合 +``` + +**NUMA优化相关函数** + +openGauss中所有numa相关函数都可以通过宏定义ifdef \_\_USE\_NUMA找到其定义及调用的地方。 + +``` +int numa_available(void):NUMA的API是否可以在平台上正常使用 +int numa_max_node(void):当前系统上最大NUMA节点号 +void * numa_alloc_onnode(size_t size,int node):在一个指定NUMA节点分配内存 +void numa_free(void *start,size_t size):释放起始地址指定的内存 +int numa_run_on_node(int node):运行当前任务在指定NUMA节点上 +void numa_set_localalloc(void):设置当前的任务内存分配策略为本地化分配 +void numa_set_preferred(int node):为当前任务设置偏好NUMA节点 +void numa_set_interleave_mask(struct bitmask*nodemask):在一系列numa节点上分配交叉内存 +int pthread_getaffinity_np(pthread_t thread,size_t cpusetsize,cpu_set_t *cpuset):设置线程在某个CPU上运行。 +``` + +1. sched\_getaffinity和pthread\_getaffinity\_np都是绑核的函数。 +2. numa\_set\_preferred设置当前线程优先分配内的结点。内存分配器先尝试从这个结点上分配内存。如果这个结点没有足够的空间,它会尝试其他结点。 +3. numa\_set\_interleave\_mask函数可以让当前线程以交错(interleaving)方式分配内存。未来所有的内存,将会从掩码给定的结点上轮询(round robing)分配。numa\_all\_nodes将内存分配交错(interleaving)在所有的node上。numa\_no\_nodes将会关闭交错分配内存。numa\_get\_interleave\_mask函数返回当前的交错掩码。这可以将当前的内存分配策略保存到文件中,在策略修改后,再次恢复。 + +**参考** + +\}https://www.bilibili.com/video/BV1gD4y1o7qB?from=search&seid=11985947230954507904 + diff --git "a/content/zh/post/ysl/MogDB openGauss\346\225\260\346\215\256\345\272\223xlog\347\233\256\345\275\225\346\273\241\351\227\256\351\242\230\345\244\204\347\220\206.md" "b/content/zh/post/ysl/MogDB openGauss\346\225\260\346\215\256\345\272\223xlog\347\233\256\345\275\225\346\273\241\351\227\256\351\242\230\345\244\204\347\220\206.md" new file mode 100644 index 0000000000000000000000000000000000000000..4a1611d882b9d17f62aabb803f9c286fc3cd96fd --- /dev/null +++ "b/content/zh/post/ysl/MogDB openGauss\346\225\260\346\215\256\345\272\223xlog\347\233\256\345\275\225\346\273\241\351\227\256\351\242\230\345\244\204\347\220\206.md" @@ -0,0 +1,135 @@ ++++ + +title = "MogDB/openGauss数据库xlog目录满问题处理" + +date = "2022-04-07" + +tags = ["MogDB/openGauss数据库xlog目录满问题处理"] + +archives = "2022-04" + +author = "阎书利" + +summary = "MogDB/openGauss数据库xlog目录满问题处理" + +img = "/zh/post/ysl/title/img39.png" + +times = "10:20" + ++++ + +# MogDB/openGauss数据库xlog目录满问题处理 + +MODGDB/openGauss数据库xlog满通常为以下几个原因: +1.主备状态不正常,存在网络问题,集群内有宕机的节点 +2.xlog保留数量过多 +3.逻辑复制槽失效,且未及时清理 +4.开启归档,但归档失败导致xlog不清理 + +首先,确认数据库状态 + +``` +gs_om -t query +``` + +确认主备状态,是否存在宕机的节点。 +查看是否存在down,Standby Need repair(WAL)或者unkown的状态。 + +如果数据库状态不正常,xlog目录100% +需要手动移走一部分xlog后,检查数据库状态后将库拉起,并排查相关问题。 + +如果数据库状态正常,仅xlog目录大,则继续排查其他问题。 + +清理: +1.找一个空间大的目录 +例如: + +``` +su - omm +cd /mogdb_bak +mkdir xlog_mv_0919 +``` + +2.移走部分xlog到xlog路径下 + +``` +cd /ogdata/data/dn1/pg_xlog +``` + +查看xlog数量,看是否xlog保留过多 + +``` +ls | wc -l +``` + +**!!!为了恢复环境,移动一小部分xlog,其余等处理之后,自己清理** + +生成移动xlog语句,并检查(前1000条) + +``` +ls -ltr | head -n 1000 | awk '{print "mv "$9 " /mogdb_bak/xlog_mv_0919/"}' +``` + +3.#实际执行移动操作 + +``` +ls -ltr | head -n 1000 | awk '{print "mv "$9 " /mogdb_bak/xlog_mv_0919/"}' | sh +``` + +4.移动之后df -Th看空间是否下来 + +5.gs_om -t query 查看数据库状态 + +如果不正常,需要先尝试拉起主数据库 + +``` +gs_ctl start -D /ogdata/data/dn1 +``` + +然后依次拉起备机数据库 + +``` +gs_ctl start -D /ogdata/data/dn1 -M standby +``` + +备库拉不起来则先不处理,等找到xlog目录满源头后(例如主库删除失效逻辑复制后),考虑做build(先尝试增量不行再用增量) + +```sql +gs_ctl build -D /ogdata/data/dn1 -b incremental gs_ctl build -D /ogdata/data/dn1 -b full +``` + +6.登录主数据库查看逻辑复制槽状态,查看有无失效逻辑复制槽 + +``` +select * from pg_replication_slots; +``` + +7.在主库删除失效逻辑复制槽 + +```sql +select * from pg_drop_replication_slot('aohdoasdaoiodiandoan'); +``` + +为逻辑复制槽名字 + +删除失效的逻辑复制槽,主库和备库的xlog目录应该都会释放一部分空间 + +8.删除后 df -Th看空间是否下来 + +9.参数调整 + +```sql +(1)查看wal_keep_segments参数,该参数为Xlog日志文件段数量,“pg_xlog”目录下保留事务日志文件的最小数目。 +(2)查看max_size_for_xlog_prune参数,在enable_xlog_prune打开时生效,如果有备机断连且xlog日志大小大于此阈值,则回收日志。 +根据实际状况,可进行修改。 +(3)如果是PG13版本,可考虑开启max_slot_wal_keep_size参数,他是允许replication slot 保留的wal文件的最大 +大小,用于防止wal无限增大导致主库的文件系统空间被撑爆,设置该参数之后如果超过该参数值,PostgreSQL将开始删除最 +早的WAL文件。默认值是-1,-1表示表示禁用本功能。单位是MB。 +``` + +10.检查归档模式是否开启 + +``` +show archive_mode; +到归档目录下,看开启归档参数时,是否有归档。并检查归档空间,排除归档相关问题。 +``` diff --git "a/content/zh/post/ysl/MogDB openGauss\347\232\204WDR\346\212\245\345\221\212.md" "b/content/zh/post/ysl/MogDB openGauss\347\232\204WDR\346\212\245\345\221\212.md" new file mode 100644 index 0000000000000000000000000000000000000000..71bf5b751fc52871626f53047ab8edf001d124ce --- /dev/null +++ "b/content/zh/post/ysl/MogDB openGauss\347\232\204WDR\346\212\245\345\221\212.md" @@ -0,0 +1,160 @@ ++++ + +title = "MogDB/openGauss的WDR报告" + +date = "2022-04-14" + +tags = ["MogDB/openGauss的WDR报告"] + +archives = "2022-04" + +author = "阎书利" + +summary = "MogDB/openGauss的WDR报告" + +img = "/zh/post/ysl/title/img39.png" + +times = "10:20" ++++ + +# MogDB/openGauss的WDR报告 + +本文出处:https://www.modb.pro/db/95930 + +
    + +Oracle的awr报告在日常解决问题中起到了很大的便利,在遇到问题时,我们通常会查看有无对应时间段的快照,生成awr报告并进一步分析。通过分析数据库的状态,资源消耗以及等待事件等初步定位问题,并在此基础上进行验证。在MogDB/opengauss数据库中,也有着这样的“awr”,它叫做——wdr。 + +**前提:** +1.打开参数enable_wdr_snapshot + +```sql +postgres=# show enable_wdr_snapshot; +enable_wdr_snapshot +--------------------- +on +(1 row) + +``` + +WDR Snasphot在启动后,会在用户表空间"pg_default",数据库"postgres"下新建schema “snapshot”,用于持久化WDR快照数据。 + +2.WDR Snasphot性能快照数量大于等于2。 + +**操作步骤:** +1.执行以下命令查询已经生成的快照 + +```sql +postgres=# select * from snapshot.snapshot; +snapshot_id | start_ts | end_ts +-------------+-------------------------------+------------------------------- + 1 | 2021-07-08 15:02:15.990876+08 | 2021-07-08 15:02:18.555272+08 + 2 | 2021-07-08 15:08:12.470218+08 | 2021-07-08 15:08:14.514862+08 + 3 | 2021-07-08 16:02:16.709364+08 | 2021-07-08 16:02:17.643546+08 + 4 | 2021-07-08 17:02:17.617386+08 | 2021-07-08 17:02:20.626552+08 + ............ + 43 | 2021-07-10 07:02:36.418031+08 | 2021-07-10 07:02:37.380217+08 + 44 | 2021-08-05 00:21:09.062745+08 | 2021-08-05 00:21:10.33016+08 +(44 rows) +``` + +2.可以选择手从创建快照,该命令需要用户具有sysadmin权限。或者直接选取数据库中已有的快照。 + +```sql +postgres=# select create_wdr_snapshot(); + create_wdr_snapshot +----------------------------------------- +WDR snapshot request has been submitted +(1 row) + +postgres=# select * from snapshot.snapshot; +snapshot_id | start_ts | end_ts +-------------+-------------------------------+------------------------------- + 1 | 2021-07-08 15:02:15.990876+08 | 2021-07-08 15:02:18.555272+08 + 2 | 2021-07-08 15:08:12.470218+08 | 2021-07-08 15:08:14.514862+08 + 3 | 2021-07-08 16:02:16.709364+08 | 2021-07-08 16:02:17.643546+08 + 4 | 2021-07-08 17:02:17.617386+08 | 2021-07-08 17:02:20.626552+08 + ............ + 43 | 2021-07-10 07:02:36.418031+08 | 2021-07-10 07:02:37.380217+08 + 44 | 2021-08-05 00:21:09.062745+08 | 2021-08-05 00:21:10.33016+08 + 45 | 2021-08-05 00:39:43.777341+08 | 2021-08-05 00:39:44.760498+08 //这一快照为刚才手动执行生成的 +(45 rows) +``` + +3.执行如下步骤,生成node级别wdr报告 +1)查询 pgxc_node_name参数值 + +```sql +[omm@node1 ~]$ gsql -p 26000 postgres -c "show pgxc_node_name" +pgxc_node_name +---------------- +dn_6001_6002 +(1 row) + +``` + +2) \a \t \o 服务器文件路径生成格式化性能报告 +例如 + +```sql +postgres=# \a \t \o /home/omm/wdr_sarah.html +Output format is unaligned. +Showing only tuples. + +``` + +> 上述命令涉及参数说明如下: +> \a:切换非对齐模式。 +> \t:切换输出的字段名的信息和行计数脚注。 +> \o:把所有的查询结果发送至服务器文件里。 +> 服务器文件路径:生成性能报告文件存放路径。用户需要拥有此路径的读写权限。 + +3)向性能报告wdr_sarah.html中写入数据。 + +gsql -p 26000 -d postgres +select generate_wdr_report(快照id1,快照id2,‘all’,‘node’,‘pgxc_node_name参数值’); + +例如 + +```sql +postgres=# select generate_wdr_report(44,45,'all','node','dn_6001_6002'); +``` + +目录下生成对应的wdr报告 + +``` +[omm@node1 ~]$ ll +total 1080 +-rw------- 1 omm dbgrp 1317 Apr 9 15:43 single.xml +-rw------- 1 omm dbgrp 1101242 Aug 5 00:47 wdr_sarah.html +``` + +拿到浏览器上查看: + +MogDB/opengauss的awr报告类似于oracle的wdr,拥有资源消耗、等待事件、TOPSQL,以及参数设置等。 + +**快照相关参数:** +**enable_wdr_snapshot** +参数说明:是否开启数据库监控快照功能。 +该参数属于SIGHUP类型参数 +取值范围:布尔型 +on:打开数据库监控快照功能。 +off:关闭数据库监控快照功能。 + +**wdr_snapshot_retention_days** +参数说明:系统中数据库监控快照数据的保留天数。当数据库运行过程期间所生成的快照量数超过保留天数内允许生成的快照数量的最大值时,系统将每隔wdr_snapshot_interval时间间隔,清理snapshot_id最小的快照数据。 +该参数属于SIGHUP类型参数 +取值范围:整型,1~8。 +默认值:8 + +**wdr_snapshot_interval** +参数说明:后台线程Snapshot自动对数据库监控数据执行快照操作的时间间隔。 +该参数属于SIGHUP类型参数 +取值范围:整型,10~60(分钟)。 +默认值:1h + +**wdr_snapshot_query_timeout** +参数说明:系统执行数据库监控快照操作时,设置快照操作相关的sql语句的执行超时时间。如果语句超过设置的时间没有执行完并返回结果,则本次快照操作失败。 +该参数属于SIGHUP类型参数 +取值范围:整型,100~INT_MAX(秒)。 +默认值:100s diff --git "a/content/zh/post/ysl/MogDB openGauss\347\232\204txid_snapshot \346\225\260\346\215\256\347\261\273\345\236\213\345\222\214\347\233\270\345\205\263\345\207\275\346\225\260.md" "b/content/zh/post/ysl/MogDB openGauss\347\232\204txid_snapshot \346\225\260\346\215\256\347\261\273\345\236\213\345\222\214\347\233\270\345\205\263\345\207\275\346\225\260.md" new file mode 100644 index 0000000000000000000000000000000000000000..2a8dd4bc988edf411dc3f431b0dcae12aca65fff --- /dev/null +++ "b/content/zh/post/ysl/MogDB openGauss\347\232\204txid_snapshot \346\225\260\346\215\256\347\261\273\345\236\213\345\222\214\347\233\270\345\205\263\345\207\275\346\225\260.md" @@ -0,0 +1,235 @@ ++++ + +title = "MogDB/openGauss的txid_snapshot 数据类型和相关函数" + +date = "2022-04-14" + +tags = ["MogDB/openGauss的txid_snapshot 数据类型和相关函数"] + +archives = "2022-04" + +author = "阎书利" + +summary = "MogDB/openGauss的txid_snapshot 数据类型和相关函数" + +img = "/zh/post/ysl/title/img39.png" + +times = "10:20" ++++ + +# MogDB/openGauss的txid_snapshot 数据类型和相关函数 + +本文出处:https://www.modb.pro/db/216415 + +
    + +txid_snapshot的文本表示为:xmin:xmax:xip_list。 + +``` + 名称 描述 + xmin 最早的事务ID(txid)仍然活动。所有较早事务将是已经提交可见的,或者是直接回滚。 + xmax 作为尚未分配的txid。所有大于或等于此txids的都是尚未开始的快照时间,因此不可见。 + xip_list 当前快照中活动的txids。这个列表只包含在xmin和xmax之间活动的txids;有可能活动的txids高于xmax。 介于大于等于xmin、小于xmax,并且不在这个列表中的txid,在这个时间快照已经完成的,因此按照提交状态查看他是可见还是回滚。这个列表不包含子事务的txids。 +``` + +示例:10:20:10,13,15意思为:xmin=10, xmax=20, xip_list=10, 13, 15。 + +## 测试如下: + +### 1.通过设置强制对临时对象使用COMMIT而不是2PC + +``` +SET enforce_two_phase_commit TO off; +``` + +### 2.正常案例演示 + +```sql + postgres=# select '12:13:'::txid_snapshot; + ## txid_snapshot + 12:13: + (1 row) + + postgres=# select '12:18:14,16'::txid_snapshot; + ## txid_snapshot + 12:18:14,16 + (1 row) +``` + +### 3.错误案例演示 + +```sql + postgres=# select '31:12:'::txid_snapshot; + ERROR: invalid input for txid_snapshot: "31:12:" + LINE 1: select '31:12:'::txid_snapshot; + ^ + CONTEXT: referenced column: txid_snapshot +``` + +通过测试看出xmax应该大于xmin,不可为0,tixds应该按增序排列,且不为0,并且不能有重复的tixds,在使用的时候应当尽量避免。 + +### 4.创建测试表及测试数据导入 + +```sql + postgres=# create temp table snapshot_test(nr integer,snap txid_snapshot); + CREATE TABLE + postgres=# insert into snapshot_test values (1, '12:13:'); + INSERT 0 1 + postgres=# insert into snapshot_test values (2, '12:20:13,15,18'); + INSERT 0 1 + postgres=# insert into snapshot_test values (3, '100001:100009:100005,100007,100008'); + INSERT 0 1 + postgres=# insert into snapshot_test values (4, '100:150:101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131'); + INSERT 0 1 +``` + +查询数据情况: + +```sql +postgres=# select snap from snapshot_test order by nr; + snap + ------------------------------------------------------------------------------------------------------- + ------------------------------ + 12:13: + 12:20:13,15,18 + 100001:100009:100005,100007,100008 + 100:150:101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,12 + 4,125,126,127,128,129,130,131 + (4 rows) +``` + +### 5.函数测试 + +txid_snapshot_xmin()为会返回快照的xmin, + +txid_snapshot_xmax()会返回快照的xmax, + +txid_snapshot_xip()获取正在进行的事务ip,即txids。 + +```sql +postgres=# select txid_snapshot_xmin(snap), + postgres-# txid_snapshot_xmax(snap), + postgres-# txid_snapshot_xip(snap) + postgres-# from snapshot_test order by nr, 1, 2, 3; + txid_snapshot_xmin | txid_snapshot_xmax | txid_snapshot_xip + --------------------+--------------------+------------------- + 12 | 20 | 13 + 12 | 20 | 15 + 12 | 20 | 18 + 100001 | 100009 | 100005 + 100001 | 100009 | 100007 + 100001 | 100009 | 100008 + 100 | 150 | 101 + 100 | 150 | 102 + 100 | 150 | 103 + 100 | 150 | 104 + 100 | 150 | 105 +``` + +txid_visible_in_snapshot()会查看在快照中事务ID是否可见(不使用子事务ID) + +```sql +postgres=# select id, txid_visible_in_snapshot(id, snap) + postgres-# from snapshot_test, generate_series(11, 21) id + postgres-# where nr = 2; + id | txid_visible_in_snapshot + ----+-------------------------- + 11 | t + 12 | t + 13 | f + 14 | t + 15 | f + 16 | t + 17 | t + 18 | f + 19 | t + 20 | f + 21 | f + (11 rows) +``` + +### 6.其他测试 + +测试二分查找 + +```sql + postgres=# select id, txid_visible_in_snapshot(id, snap) + postgres-# from snapshot_test, generate_series(90, 160) id + postgres-# where nr = 4; + id | txid_visible_in_snapshot + -----+-------------------------- + 90 | t + 91 | t + 92 | t + 93 | t + 94 | t + 95 | t + 96 | t + 97 | t + 98 | t + 99 | t + 100 | t + 101 | f +``` + +测试当前值 + +```sql + postgres=# select txid_current() >= txid_snapshot_xmin(txid_current_snapshot()); + ## ?column? + t + (1 row) +``` + +我们不能假设当前值总是小于xmax + +```sql + postgres=# select txid_visible_in_snapshot(txid_current(), txid_current_snapshot()); + ## txid_visible_in_snapshot + f + (1 row) +``` + +测试64bitness(MOGDB/openGauss将transactionid由int32改为了int64,64位的xid永远不可能耗尽,虽然xid改为了64位,但是过期的xid依旧需要freeze清理,只是永远不用担心会发生xid回卷宕机的风险。 ) + +```sql + postgres=# select txid_snapshot '1000100010001000:1000100010001100:1000100010001012,1000100010001013'; + + ## txid_snapshot + + 1000100010001000:1000100010001100:1000100010001012,1000100010001013 + (1 row) + + + + postgres=# select txid_visible_in_snapshot('1000100010001012', '1000100010001000:1000100010001100:1000100010001012,1000100010001013'); + + ## txid_visible_in_snapshot + + f + (1 row) + + + + postgres=# select txid_visible_in_snapshot('1000100010001015', '1000100010001000:1000100010001100:1000100010001012,1000100010001013'); + + ## txid_visible_in_snapshot + + t + (1 row) +``` + +测试溢出64bit,9223372036854775807是是263-1,是乘方 也就是63位的最大二进制数字 。 + +```sql + postgres=# SELECT txid_snapshot '1:9223372036854775807:3'; + ## txid_snapshot + 1:9223372036854775807:3 + (1 row) + + postgres=# SELECT txid_snapshot '1:9223372036854775808:3'; + ERROR: invalid input for txid_snapshot: "1:9223372036854775808:3" + LINE 1: SELECT txid_snapshot '1:9223372036854775808:3'; + ^ + CONTEXT: referenced column: txid_snapshot +``` diff --git "a/content/zh/post/ysl/openGauss Copy\346\224\257\346\214\201\345\256\271\351\224\231\346\234\272\345\210\266.md" "b/content/zh/post/ysl/openGauss Copy\346\224\257\346\214\201\345\256\271\351\224\231\346\234\272\345\210\266.md" new file mode 100644 index 0000000000000000000000000000000000000000..8638d6bb7a9d453d48a059074b1d9375bcd2554c --- /dev/null +++ "b/content/zh/post/ysl/openGauss Copy\346\224\257\346\214\201\345\256\271\351\224\231\346\234\272\345\210\266.md" @@ -0,0 +1,84 @@ ++++ + +title = "openGauss Copy支持容错机制" + +date = "2022-04-06" + +tags = ["openGauss Copy支持容错机制"] + +archives = "2022-04" + +author = "阎书利" + +summary = "openGauss Copy支持容错机制" + +img = "/zh/post/ysl/title/img39.png" + +times = "10:20" + ++++ + +# openGauss Copy支持容错机制 + +## 一、COPY容错机制相关选项 + +openGauss允许用户在使用Copy From指令时指定容错选项,使得Copy From语句在执行过程中部分解析、数据格式、字符集等相关的报错不会报错中断事务、而是被记录至错误表中,使得在Copy From的目标文件即使有少量数据错误也可以完成入库操作。用户随后可以在错误表中对相关的错误进行定位以及进一步排查。 + +主要包括三个主要的选项: + +- **LOG ERRORS** + 若指定,则开启对于COPY FROM语句中数据类型错误的容错机制 + +- LOG ERRORS DATA + + LOG ERRORS DATA和LOG ERRORS的区别: + + 1. LOG ERRORS DATA会填充容错表的rawrecord字段。 + 2. 只有supper权限的用户才能使用LOG ERRORS DATA参数选项。 + +- **REJECT LIMIT 'limit’** + 与LOG ERROR选项共同使用,对COPY FROM的容错机制设置数值上限,一旦此COPY FROM语句错误数据超过选项指定条数,则会按照原有机制报错。 + 取值范围:正整数(1-INTMAX),‘unlimited’(无最大值限制) + +1646888448378.png + + + +## 二、Copy错误表创建函数 + +openGauss里已经给用户提供了封装好的Copy错误表创建函数 pg_catalog.copy_error_log_create()。执行就可以创建相应的错误表public.pgxc_copy_error_log。当然这个函数也可以手动移除重建。如下是重建的函数。 + +``` +CREATE OR REPLACE FUNCTION pg_catalog.copy_error_log_create() +RETURNS bool +AS $$ +DECLARE + query_str_create_table text; + query_str_create_index text; + query_str_do_revoke text; + BEGIN + query_str_create_table := 'CREATE TABLE public.pgxc_copy_error_log + (relname varchar, begintime timestamptz, filename varchar, lineno int8, rawrecord text, detail text)'; + EXECUTE query_str_create_table; + + query_str_create_index := 'CREATE INDEX copy_error_log_relname_idx ON public.pgxc_copy_error_log(relname)'; + EXECUTE query_str_create_index; + + query_str_do_revoke := 'REVOKE ALL on public.pgxc_copy_error_log FROM public'; + EXECUTE query_str_do_revoke; + + return true; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; + +REVOKE ALL on FUNCTION pg_catalog.copy_error_log_create() FROM public; +如果不创建copy错误表,仅仅带上容错选项的话,会有如下相关提示。 +执行 pg_catalog.copy_error_log_create()创建copy错误表 + + +``` + +## 三、COPY FROM存在错误的数据 + +再执行copy命令,报错的copy数据会被记录到public.pgxc_copy_error_log 里而不会影响其他正确的数据的导入。 +注意要指定REJECT LIMIT ‘limit’,且limit的值要足够大,否则当COPY FROM语句错误数据超过选项指定条数,则会按照原有机制报错。表里记录的内容,是通过Log_copy_error_spi函数读取缓存文件中的每一行,并组装spi要执行的查询字符串,把将错误记录插入带有spi的copy_error_Log表中。 diff --git "a/content/zh/post/ysl/openGauss1-1-0\346\265\213\350\257\225-\345\205\250\351\207\217\345\244\207\344\273\275\345\222\214\346\201\242\345\244\215-\345\242\236\351\207\217\345\244\207\344\273\275\345\222\214\346\201\242\345\244\215.md" "b/content/zh/post/ysl/openGauss1-1-0\346\265\213\350\257\225-\345\205\250\351\207\217\345\244\207\344\273\275\345\222\214\346\201\242\345\244\215-\345\242\236\351\207\217\345\244\207\344\273\275\345\222\214\346\201\242\345\244\215.md" index 89452bdebf1e4414cebc0114a0ac0b9d84bdc2ff..da3954e71fbaed9a749ea1a73df2283271f31cdb 100644 --- "a/content/zh/post/ysl/openGauss1-1-0\346\265\213\350\257\225-\345\205\250\351\207\217\345\244\207\344\273\275\345\222\214\346\201\242\345\244\215-\345\242\236\351\207\217\345\244\207\344\273\275\345\222\214\346\201\242\345\244\215.md" +++ "b/content/zh/post/ysl/openGauss1-1-0\346\265\213\350\257\225-\345\205\250\351\207\217\345\244\207\344\273\275\345\222\214\346\201\242\345\244\215-\345\242\236\351\207\217\345\244\207\344\273\275\345\222\214\346\201\242\345\244\215.md" @@ -8,7 +8,7 @@ tags = ["openGauss备份与恢复"] archives = "2021-01" -author = "ysl" +author = "阎书利" summary = "openGauss1.1.0测试:全量备份和恢复&增量备份和恢复" diff --git "a/content/zh/post/ysl/openGauss\346\225\260\346\215\256\345\272\223\346\211\247\350\241\214\350\256\241\345\210\222\347\274\223\345\255\230\345\244\261\346\225\210\346\234\272\345\210\266\347\232\204\346\265\213\350\257\225.md" "b/content/zh/post/ysl/openGauss\346\225\260\346\215\256\345\272\223\346\211\247\350\241\214\350\256\241\345\210\222\347\274\223\345\255\230\345\244\261\346\225\210\346\234\272\345\210\266\347\232\204\346\265\213\350\257\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..380efb94660038a859762ec07a86e89db8ded46e --- /dev/null +++ "b/content/zh/post/ysl/openGauss\346\225\260\346\215\256\345\272\223\346\211\247\350\241\214\350\256\241\345\210\222\347\274\223\345\255\230\345\244\261\346\225\210\346\234\272\345\210\266\347\232\204\346\265\213\350\257\225.md" @@ -0,0 +1,403 @@ ++++ + +title = "openGauss数据库执行计划缓存/失效机制的测试" + +date = "2022-04-06" + +tags = ["openGauss数据库执行计划缓存/失效机制的测试"] + +archives = "2022-04" + +author = "阎书利" + +summary = "openGauss数据库执行计划缓存/失效机制的测试" + +img = "/zh/post/ysl/title/img39.png" + +times = "11:37" + ++++ + +# openGauss数据库执行计划缓存/失效机制的测试 + +## 1.强制对临时对象使用COMMIT而不是2PC + +```sql +postgres=# SET enforce_two_phase_commit TO off; +SET +``` + +## 2.创建测试表并插入测试数据 + +```sql +postgres=# CREATE TEMP TABLE tab_test_plancache(q1 int8, q2 int8); +CREATE TABLE +postgres=# INSERT INTO tab_test_plancache VALUES(' 123 ',' 456'); +INSERT 0 1 +postgres=# INSERT INTO tab_test_plancache VALUES('123 ','4567890123456789'); +INSERT 0 1 +postgres=# INSERT INTO tab_test_plancache VALUES('4567890123456789','123'); +INSERT 0 1 +postgres=# INSERT INTO tab_test_plancache VALUES(+4567890123456789,'4567890123456789'); +INSERT 0 1 +postgres=# INSERT INTO tab_test_plancache VALUES('+4567890123456789','-4567890123456789'); +INSERT 0 1 +``` + +## 3.创建并使用缓存的计划 + +```sql +postgres=# PREPARE prepstmt AS SELECT * FROM tab_test_plancache ORDER BY q1, q2; +PREPARE +postgres=# EXECUTE prepstmt; + q1 | q2 +------------------+------------------- + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | -4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 +(5 rows) +``` + +并且包含一个带有绑定变量的 + +```sql +postgres=# PREPARE prepstmt2(bigint) AS SELECT * FROM tab_test_plancache WHERE q1 = $1 ORDER BY q1, q2; +PREPARE +postgres=# EXECUTE prepstmt2(123); + q1 | q2 +-----+------------------ + 123 | 456 + 123 | 4567890123456789 +(2 rows) +``` + +## 4.删除临时表,查看现象 + +```sql +postgres=# DROP TABLE tab_test_plancache; +DROP TABLE +postgres=# EXECUTE prepstmt; +ERROR: relation "tab_test_plancache" does not exist on dn_6001_6002 +postgres=# EXECUTE prepstmt2(123); +ERROR: relation "tab_test_plancache" does not exist on dn_6001_6002 +``` + +重建临时表 + +```sql +postgres=# select * from tab_test_plancache; + q1 | q2 +------------------+------------------- + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | -4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 +(5 rows) + +postgres=# EXECUTE prepstmt; + q1 | q2 +------------------+------------------- + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | -4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 +(5 rows) + +postgres=# EXECUTE prepstmt2(123); + q1 | q2 +-----+------------------ + 123 | 456 + 123 | 4567890123456789 +(2 rows) +``` + +这表明原始计划是纯文本的,不依赖于OID + +## 5.prepared statements应该防止在输出的tupdesc中更改, 因为clients可能不希望这种情况瞬间改变 + +```sql +postgres=# ALTER TABLE tab_test_plancache ADD COLUMN q3 bigint; +ALTER TABLE +postgres=# EXECUTE prepstmt; +ERROR: cached plan must not change result type +postgres=# EXECUTE prepstmt2(123); +ERROR: cached plan must not change result type +``` + +例子里增加了一列,但是报出了缓存的计划不能更改结果类型,可以通过还原原来表的结构解决 + +```sql +postgres=# ALTER TABLE tab_test_plancache ADD COLUMN q3 bigint; +ALTER TABLE +postgres=# select * from tab_test_plancache; + q1 | q2 | q3 +------------------+-------------------+---- + 123 | 456 | + 123 | 4567890123456789 | + 4567890123456789 | -4567890123456789 | + 4567890123456789 | 123 | + 4567890123456789 | 4567890123456789 | +(5 rows) + +postgres=# ALTER TABLE tab_test_plancache DROP COLUMN q3; +ALTER TABLE +postgres=# EXECUTE prepstmt; + q1 | q2 +------------------+------------------- + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | -4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 +(5 rows) + +postgres=# EXECUTE prepstmt2(123); + q1 | q2 +-----+------------------ + 123 | 456 + 123 | 4567890123456789 +(2 rows) +``` + +## 6.检查使用视图的有效性 + +如果尝试使用一个视图的话,这个视图不会直接用于生成的计划中,但也是有效的 + +```sql +postgres=# CREATE TEMP VIEW pcacheview AS +postgres-# SELECT * FROM tab_test_plancache; +CREATE VIEW +postgres=# PREPARE vprep AS SELECT * FROM pcacheview ORDER BY q1, q2; +PREPARE +postgres=# EXECUTE vprep; + q1 | q2 +------------------+------------------- + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | -4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 +(5 rows) + +postgres=# CREATE OR REPLACE TEMP VIEW pcacheview AS + SELECT q1, q2+1 AS q2 FROM tab_test_plancache ORDER BY q1, q2; +CREATE VIEW +postgres=# EXECUTE vprep; + q1 | q2 +------------------+------------------- + 123 | 457 + 123 | 4567890123456790 + 4567890123456789 | -4567890123456788 + 4567890123456789 | 124 + 4567890123456789 | 4567890123456790 +(5 rows) +``` + +## 7.检查基本 SPI plan 是否有效 + +```sql +postgres=# create function cache_test(int) returns int as $$ +postgres$# declare total int; +postgres$# begin +postgres$# create table t1_plancache(f1 int); +postgres$# insert into t1_plancache values($1); +postgres$# insert into t1_plancache values(11); +postgres$# insert into t1_plancache values(12); +postgres$# insert into t1_plancache values(13); +postgres$# select sum(f1) into total from t1_plancache; +postgres$# drop table t1_plancache; +postgres$# return total; +postgres$# end +postgres$# $$ language plpgsql; +CREATE FUNCTION + +postgres=# select cache_test(1); + cache_test +------------ + 37 +(1 row) + +postgres=# select cache_test(2); + cache_test +------------ + 38 +(1 row) + +postgres=# select cache_test(3); + cache_test +------------ + 39 +(1 row) + +``` + +## 8.检查plpgsql“简单表达式”的有效性 + +```sql +postgres=# create temp view v1 as +postgres-# select 2+2 as f1; +CREATE VIEW + +postgres=# create function cache_test_2() returns int as $$ +postgres$# begin +postgres$# return f1 from v1; +postgres$# end$$ language plpgsql; +CREATE FUNCTION + +postgres=# select cache_test_2(); + cache_test_2 +-------------- + 4 +(1 row) + +postgres=# create or replace temp view v1 as +postgres-# select 2+2+4 as f1; +CREATE VIEW +postgres=# select cache_test_2(); + cache_test_2 +-------------- + 8 +(1 row) +``` + +## 9.检查缓存执行计划使用与search_path影响 + +可以看到,两个schema下都有同一张表,修改了search_path后,缓存执行计划执行的是search_path下的表,所以缓存执行计划会受search_path影响。 + +```sql +postgres=# create schema s1 +postgres-# create table abc (f1 int); +CREATE SCHEMA +postgres=# create schema s2 +postgres-# create table abc (f1 int); +CREATE SCHEMA +postgres=# insert into s1.abc values(123); +INSERT 0 1 +postgres=# insert into s2.abc values(456); +INSERT 0 1 +postgres=# set search_path = s1; +SET +postgres=# prepare p1 as select f1 from abc; +PREPARE +postgres=# execute p1; + f1 +----- + 123 +(1 row) + +postgres=# set search_path = s2; +SET +postgres=# select f1 from abc; + f1 +----- + 456 +(1 row) + +postgres=# execute p1; + f1 +----- + 456 +(1 row) + +postgres=# alter table s1.abc add column f2 float8; +ALTER TABLE +postgres=# execute p1; + f1 +----- + 456 +(1 row) + +postgres=# drop schema s1 cascade; +NOTICE: drop cascades to table s1.abc +DROP SCHEMA +postgres=# drop schema s2 cascade; +NOTICE: drop cascades to table abc +DROP SCHEMA +postgres=# reset search_path; +RESET +``` + +## 10.检查regclass常量是否有效 + +~~~sql +postgres=# create sequence seq; +CREATE SEQUENCE +postgres=# prepare p2 as select nextval('seq'); +PREPARE +postgres=# execute p2; + +## nextval + +``` + 1 +``` + +(1 row) + +postgres=# drop sequence seq; +DROP SEQUENCE +postgres=# create sequence seq; +CREATE SEQUENCE +postgres=# execute p2; + +## nextval + +``` + 1 +``` + +(1 row) + +~~~ + +## 11.检查DDL,然后立即重新使用SPI plan + +```sql +postgres=# create function cachebug() returns void as $$ +postgres$# declare r int; +postgres$# begin +postgres$# drop table if exists temptable cascade; +postgres$# create temp table temptable as select * from generate_series(1,3) as f1; +postgres$# create temp view vv as select * from temptable; +postgres$# for r in select * from vv order by 1 loop +postgres$# raise notice '%', r; +postgres$# end loop; +postgres$# end$$ language plpgsql; +CREATE FUNCTION + +postgres=# select cachebug(); +NOTICE: table "temptable" does not exist, skipping +CONTEXT: SQL statement "drop table if exists temptable cascade" +PL/pgSQL function cachebug() line 4 at SQL statement +referenced column: cachebug +NOTICE: 1 +CONTEXT: referenced column: cachebug +NOTICE: 2 +CONTEXT: referenced column: cachebug +NOTICE: 3 +CONTEXT: referenced column: cachebug + cachebug +---------- + +(1 row) + +postgres=# select cachebug(); +NOTICE: drop cascades to view vv +CONTEXT: SQL statement "drop table if exists temptable cascade" +PL/pgSQL function cachebug() line 4 at SQL statement +referenced column: cachebug +NOTICE: 1 +CONTEXT: referenced column: cachebug +NOTICE: 2 +CONTEXT: referenced column: cachebug +NOTICE: 3 +CONTEXT: referenced column: cachebug + cachebug +---------- + +(1 row) +``` diff --git "a/content/zh/post/ysl/openGauss\347\264\242\345\274\225\346\216\250\350\215\220\345\217\212\350\231\232\346\213\237\347\264\242\345\274\225.md" "b/content/zh/post/ysl/openGauss\347\264\242\345\274\225\346\216\250\350\215\220\345\217\212\350\231\232\346\213\237\347\264\242\345\274\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..ceadc8ae76456a631d86fb43a56aa5779e57a55f --- /dev/null +++ "b/content/zh/post/ysl/openGauss\347\264\242\345\274\225\346\216\250\350\215\220\345\217\212\350\231\232\346\213\237\347\264\242\345\274\225.md" @@ -0,0 +1,247 @@ ++++ + +title = "openGauss索引推荐及虚拟索引" + +date = "2022-04-02" + +tags = ["openGauss索引推荐及虚拟索引"] + +archives = "2022-04" + +author = "阎书利" + +summary = "openGauss索引推荐及虚拟索引" + +img = "/zh/post/ysl/title/img39.png" + +times = "11:37" + ++++ + +# openGauss索引推荐及虚拟索引 + + + +## 索引推荐 + +在ORACLE的优化中,可能大家有接触过SQL Tuning Advisor(SQL调优顾问,STA),类似的openGauss的索引推荐(Index-advisor)功能也可以对你的查询进行分析,并提出合理的创建索引的建议。 + +如下是我对openGauss的索引推荐(Index-advisor)功能的使用测试,包括单条SQL查询索引推荐、Workload级别索引推荐(针对一批SQL语句的索引推荐)等。 + +### 一、测试数据导入 + +``` +postgres=# create database ysla; +CREATE DATABASE +postgres=# \c ysla +Non-SSL connection (SSL connection is recommended when requiring high-security) +You are now connected to database "ysla" as user "omm". +ysla=# CREATE TABLE tab_ysl_1 (col1 int, col2 int, col3 text); +CREATE TABLE +ysla=# INSERT INTO tab_ysl_1 VALUES(generate_series(1, 3000),generate_series(1, 3000),repeat( chr(int4(random()*26)+65),4)); +INSERT 0 3000 +ysla=# ANALYZE tab_ysl_1; +ANALYZE +ysla=# CREATE TABLE tab_ysl_2 (col1 int, col2 int); +CREATE TABLE +ysla=# INSERT INTO tab_ysl_2 VALUES(generate_series(1, 1000),generate_series(1, 1000)); +INSERT 0 1000 +ysla=# ANALYZE tab_ysl_2; +ANALYZE +``` + +### 二、单条SQL查询索引推荐 + +如下面所示,用gs_index_advise函数即可使用索引推荐,结果中包含表和可以创建索引的列。 + +#### 1.测试where + +``` +ysla=# SELECT * FROM gs_index_advise('SELECT * FROM tab_ysl_1 WHERE col1 = 10'); + table | column +-----------+-------- + tab_ysl_1 | (col1) +(1 row) +``` + +#### 2.测试join + +``` +ysla=# SELECT * FROM gs_index_advise('SELECT * FROM tab_ysl_1 join tab_ysl_2 on tab_ysl_1.col1 = tab_ysl_2.col1'); + table | column +-----------+-------- + tab_ysl_1 | (col1) + tab_ysl_2 | +(2 rows) +``` + +#### 3.测试多表 + +``` +ysla=# SELECT * FROM gs_index_advise('SELECT count(*), tab_ysl_2.col1 FROM tab_ysl_1 join tab_ysl_2 on tab_ysl_1.col2 = tab_ysl_2.col2 WHERE tab_ysl_2.col2 > 2 GROUP BY tab_ysl_2.col1 ORDER BY tab_ysl_2.col1'); + table | column +-----------+-------- + tab_ysl_1 | (col2) + tab_ysl_2 | (col1) +(2 rows) +``` + +#### 4.测试order by + +``` +ysla=# SELECT * FROM gs_index_advise('SELECT *, col2 FROM tab_ysl_1 ORDER BY 1, 3'); + table | column +-----------+-------- + tab_ysl_1 | +(1 row) + + +ysla=# SELECT * FROM gs_index_advise('SELECT * FROM tab_ysl_1 WHERE col1 > 10 ORDER BY 1,col2'); + table | column +-----------+-------- + tab_ysl_1 | +(1 row) +``` + +#### 5.测试过长字符串 + +``` +ysla=# SELECT * FROM gs_index_advise('SELECT * FROM tab_ysl_1 where col3 in (''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'',''bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'',''ccccccccccccccccccccccccccccccccccccccc'',''ddddddddddddddddddddddddddddddddddddddd'',''ffffffffffffffffffffffffffffffffffffffff'',''ggggggggggggggggggggggggggggggggggggggggggggggggggg'',''ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt'',''vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv'',''ggmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm'')'); +ERROR: index_advisor.cpp : 983 : The parameter destMax is too small or parameter count is larger than macro parameter SECUREC_STRING_MAX_LEN. The second case only occures in functions strncat_s/strncpy_s. +``` + +### 三、Workload级别索引推荐 + +这种方式可以针对多条SQL,可以将待优化的SQL写到文件里,通过脚本获得推荐索引。 + +脚本目录在安装目录的bin/dbmind/index_advisor下边,我的目录为 + +/opt/gaussdb/app/bin/dbmind/index_advisor/index_advisor_workload.py + +将待优化的SQL放到文件里 + +``` +[omm@node1 index_advisor]$ cat 1.sql +SELECT * FROM tab_ysl_1 WHERE col1 = 10; +SELECT count(*), tab_ysl_2.col1 FROM tab_ysl_1 join tab_ysl_2 on tab_ysl_1.col2 = tab_ysl_2.col2 WHERE tab_ysl_2.col2 > 2 GROUP BY tab_ysl_2.col1 ORDER BY tab_ysl_2.col1; +SELECT * FROM tab_ysl_1 join tab_ysl_2 on tab_ysl_1.col1 = tab_ysl_2.col1; +``` + +使用如下方式调用脚本,可以批量获取推荐索引,26000为我的数据库端口,ysla为我的数据库名,1.sql为我待优化的SQL存放的文件 + +``` +[omm@node1 index_advisor]$ pwd +/opt/gaussdb/app/bin/dbmind/index_advisor +[omm@node1 index_advisor]$ python3 ./index_advisor_workload.py 26000 ysla 1.sql + +###### ############################################################## Generate candidate indexes + +table: tab_ysl_1 columns: col1 +table: tab_ysl_1 columns: col2 +table: tab_ysl_2 columns: col1 + +###### ############################################################### Determine optimal indexes + +create index ind0 on tab_ysl_1(col1); +``` + +### 四、索引效率查看 + +这里验证下索引推荐给我们推荐的索引究竟是否起到优化作用。 + +``` +[omm@node1 index_advisor]$ cat 1.sql +SELECT * FROM tab_ysl_1 WHERE col1 = 10; + +[omm@node1 index_advisor]$ time gsql -d ysla -p 26000 -f 1.sql + col1 | col2 | col3 +------+------+------ + 10 | 10 | SSSS +(1 row) + +total time: 35 ms + +real 0m0.050s +user 0m0.007s +sys 0m0.002s +``` + +可以看到上边未优化的SQL执行时间为0m0.050s + +``` +[omm@node1 index_advisor]$ python3 ./index_advisor_workload.py 26000 ysla 1.sql + +###### ############################################################## Generate candidate indexes + +table: tab_ysl_1 columns: col1 + +###### ############################################################### Determine optimal indexes + +create index ind0 on tab_ysl_1(col1); +``` + +通过Index-advisor获取推荐索引。并创建索引 + +``` +ysla=# create index ind0 on tab_ysl_1(col1); +CREATE INDEX +``` + +可以看到查询的时间明显减少。 + +``` +[omm@node1 index_advisor]$ time gsql -d ysla -p 26000 -f 1.sql + col1 | col2 | col3 +------+------+------ + 10 | 10 | SSSS +(1 row) + +total time: 0 ms + +real 0m0.016s +user 0m0.009s +sys 0m0.000s +``` + +## 虚拟索引 + +一般在加索引时,会堵塞DML(不过PG支持并发加索引,不堵塞DML) 。只有索引真正能起到优化作用,我们建立索引才是有意义的。虚拟索引是一个很有用的东西,没有副作用,只是虚拟的索引,建立虚拟索引后,可以通过EXPLAIN来查看加索引后的成本估算,判断是否加索引COST会降低。 + +可以用虚拟索引检验索引的效果,根据效果可选择是否创建真实的索引优化查询。 + +``` +#测试建立虚拟索引(hypopg_create_index) +ysla=# SELECT * FROM hypopg_create_index('CREATE INDEX ON tab_ysl_1(col1)'); + indexrelid | indexname +------------+----------------------------- + 41453 | <41453>btree_tab_ysl_1_col1 +(1 row) + +#显示所有创建的虚拟索引信息(enable_hypo_index) +ysla=# select * from hypopg_display_index(); + indexname | indexrelid | table | column +-----------------------------+------------+-----------+-------- + <41454>btree_tab_ysl_1_col1 | 41454 | tab_ysl_1 | (col1) +(1 row) + +ysla=# set enable_hypo_index = on;explain SELECT * FROM tab_ysl_1 WHERE col1 = 100; +SET + QUERY PLAN +---------------------------------------------------------------------------------------------- + Index Scan using <41453>btree_tab_ysl_1_col1 on tab_ysl_1 (cost=0.00..8.27 rows=1 width=13) + Index Cond: (col1 = 100) +(2 rows) + +#测试删除指定虚拟索引(hypopg_display_index) +使用函数hypopg_drop_index删除指定oid的虚拟索引 +ysla=# select * from hypopg_drop_index(41454); + hypopg_drop_index +------------------- + t +(1 row) + +#使用函数hypopg_reset_index一次性清除所有创建的虚拟索引 +ysla=# SELECT * FROM hypopg_reset_index(); + hypopg_reset_index +-------------------- +``` diff --git "a/content/zh/post/yushanXD/2021-12-11-OpenGauss\345\206\205\345\255\230\345\274\225\346\223\216\344\270\255\347\232\204\347\264\242\345\274\225.md" "b/content/zh/post/yushanXD/2021-12-11-OpenGauss\345\206\205\345\255\230\345\274\225\346\223\216\344\270\255\347\232\204\347\264\242\345\274\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..2e82e2f74e88fd60f6b6642f65421fe54f977f6a --- /dev/null +++ "b/content/zh/post/yushanXD/2021-12-11-OpenGauss\345\206\205\345\255\230\345\274\225\346\223\216\344\270\255\347\232\204\347\264\242\345\274\225.md" @@ -0,0 +1,206 @@ ++++ + +title = "openGauss内存引擎中的索引" + +date = "2021-12-11" + +tags = ["openGauss内存引擎中的索引"] + +archives = "2021-12" + +author = "yushanXD" + +summary = "openGauss内存引擎中的索引" + +times = "17:30" + ++++ + + +## 一、索引 + +索引是一种用于快速查询和检索数据的数据结构。常见的索引结构有: B 树, B+树和 Hash。 + +索引的作用就相当于目录的作用。打个比方: 我们在查字典的时候,如果没有目录,那我们就只能一页一页的去找我们需要查的那个字,速度很慢。如果有目录了,我们只需要先去目录里查找字的位置,然后直接翻到那一页就行了。 + + + +## 二、内存引擎 + +在OpenGauss中内存引擎全称为内存优化表(MOT)存储引擎。 + +内存引擎作为在openGauss中与传统基于磁盘的行存储、列存储并存的一种高性能存储引擎,基于全内存态数据存储,为openGauss提高了高吞吐的实时数据处理分析能力及极低的事务处理时延,在不同业务负载场景下可以达到其他引擎事务处理能力的3~10倍。内存引擎之所以有较强的事务处理能力,更多因为其全面利用内存中可以实现的无锁化的数据及其索引结构、高效的数据管控,基于NUMA架构的内存管控,优化的数据处理算法及事务管理机制。 + +MOT与基于磁盘的普通表并排创建。MOT的有效设计实现了几乎完全的SQL覆盖,并且支持完整的数据库功能集,如存储过程和自定义函数。通过完全存储在内存中的数据和索引、非统一内存访问感知(NUMA-aware)设计、消除锁和锁存争用的算法以及查询原生编译,MOT可提供更快的数据访问和更高效的事务执行。MOT有效的几乎无锁的设计和高度调优的实现,使其在多核服务器上实现了卓越的近线性吞吐量扩展。 + +![img](./img/OpenGauss内存引擎架构图.png) + +图1 OpenGauss内存引擎架构图 + +## 三、Masstree + +### 1.概要 + +Trie 树和 B+ 树结合而成的并发算法——MassTree。 + +#### 1.1 因此首先介绍一下字典树(Trie树)。 + +Trie树,又叫字典树、前缀树(Prefix Tree)、单词查找树 或 键树,是一种多叉树结构。 + +![img](./img/trietree.png) + +图2 一棵Trie树,表示了关键字集合{“a”, “to”, “tea”, “ted”, “ten”, “i”, “in”, “inn”}。 + +***\*Trie树的基本性质:\**** + +1.根节点不包含字符,除根节点外的每一个子节点都包含一个字符。 + +2.从根节点到某一个节点,路径上经过的字符连接起来,为该节点对应的字符串。 + +3.每个节点的所有子节点包含的字符互不相同。 + +一般会在节点结构中设置一个标志,用来标记该结点处是否构成一个单词(关键字)。 + +#### 1.2然后再介绍一下B+树。 + +![img](./img/b+tree.png) + +图3 B+树样例 + +B+树是B树的一种变种,有着比B-树更高的查询性能 + +***\*一个m阶的B+树具有如下几个特征:\**** + +1.有k个子树的中间节点包含有k个元素(B树中是k-1个元素),每个元素不保存数据,只用来索引,所有数据都保存在叶子节点。 + +2.所有的叶子结点中包含了全部元素的信息,及指向含这些元素记录的指针,且叶子结点本身依关键字的大小自小而大顺序链接。 + +3.所有的中间节点元素都同时存在于子节点,在子节点元素中是最大(或最小)元素。 + +从结构上来说,Mass Tree 是由一层或多层 B+ 树组成的 Trie 树。 + +![img](./img/p4.png) + +图4 + +图4中,圆形代表内部节点(interior node,也就是 B+ 树的 branch node),矩形代表边缘节点(border node,也就是 B+ 树的 leaf node),五角星代表 value。border node 的 value 域可能存放的是数据,也可能存放的是下一层子树的根节点。 + +Masstree以键(key)的前缀作为索引,每k 个字节形成一层 B+ 树结构,在每层中处理键中这k 个 字 节 对 应 所 需 的INSERT/LOOKUP/ UPDATE/DELETE流程。图4为k=8的情况。 + +### 2.Masstree静态数据结构 + +![img](./img/p5.png) + +图5 + +## 四、OpenGauss中基于Masstree的索引 + +下面所有提到的大部分文件位于 + +openGauss-server-master\openGauss-server-master\src\gausskernel\storage\mot\core\src\storage\index中 + +### 1.数据结构 + +对应文件位置: + +openGauss-server-master\openGauss-server-master\src\gausskernel\storage\mot\core\src\storage\index\Masstree + +OpenGauss中对应Masstree的索引类名为MasstreePrimaryIndex, + +继承Index超类。 + +类MasstreePrimaryIndex拥有图6中的成员属性。 + +![img](./img/p6.png) + +图6 + +其超类Index拥有图7中的成员属性。 + +![img](./img/p7.png) + +图7 + +### 2.并发控制 + +为了防止读线程读到中间状态,叶节点被设计成最多存放 15 个 key,引入了一个8字节64位的 permutation(uint64_t),这个 permutation 被划分成16份,每份4位,其中1份代表当前节点的 key 数量,另外15份用于存放每个 key 在节点中实际位置的索引,key 的插入是顺序插入,之后只需要修改 permutation 来更新节点内 key 的索引信息,然后施加一个 release 语义,当读线程对这个节点的 permutation 施加 acquire 语义时,可以获取到完整的节点信息。 + +并发情况一般有两种竞争,OpenGauss采用一个32bit的version参数应对并发控制。 + +![img](./img/p8.png) + +图8 + +write-write 竞争:同一时刻只有一个线程可以对当前节点进行写操作 + +read-write 竞争:开始前和读结束后都需要获取当前节点的最新 version,来判断在读过程中当前节点是否发生了写操作(插入或分裂),同时对节点的写操作都需要先修改 version,在插入 key 之前需要设置 inserting 标记,插入完成之后将 insert 的 vinsert + 1;在分裂之前需要设置 splitting 标记,分裂完成之后将 split 的 vsplit + 1。 + +### 3.查找操作 + +![img](./img/p9.png) + +图9 + +首先,在开始读取节点之前,必须获得节点的 stable version,即 version 中的 inserting 和 splitting 位都为0。 + +其次,在下降之前,需要获取最新的 root,因为在开始下降前,根节点可能分裂了,导致其发生了改变。 + +最后,如果当前节点已经是叶节点,那么可以返回,否则需要进行下降,读取内部结点根据 key[x, x+8)(8字节) 获得下降节点之后,分为3种情况处理: + +1.节点在我们读取期间没有发生任何变化,我们可以安全地进行下降; + +2.节点发生了变化,而且是分裂,那么我们需要从根节点重新进行下降; + +3.节点发生了变化,但只是插入,只需要重新对当前节点进行下降。 + +### 4. 插入操作 + +![img](./img/p10.png) + +图10 + +查找key,如果找到key则tree不做改变;如果没找到, + +1.先锁住应该持有待插入key的节点 + +2.将相应节点version修改为inserting + +3.将相应节点state修改为insert + +4.更新树结构,以满足约束 + +5.更新在叶节点中的key slice ,keylen, key suffix ,key vaule + +6.Add the key's location in permutation's back.在解锁节点并将密钥输入到排列中之后将在finish_insert(从lp.finish调用)中完成。 + +![img](./img/p11.png) + +图11 OpenGauss内存引擎索引上插入操作代码部分 + +![img](./img/p12.png) + +图12 + +在lp.finish传入第一个参数为1时,调用finish_insert,finish_insert函数中实现通过在permutation插入新增节点的index使得节点对外可见。 + +### 5.删除操作 + +这里只讨论逻辑删除。 + +逻辑删除和B+树的类似,但是并不对 key 少的节点进行合并,当节点 key 减少到0时,需要标记这个节点为 deleted,然后将其从父节点删除,同时如果是叶节点的话,还需要维护叶节点的双向连接。如果某棵子树为空的话也可以删除整棵子树。当其他线程发现节点处于 deleted 状态时,需要进行重试,因为这个节点逻辑上是不存在的。 + +![img](./img/p13.png) + +图13 + +删除操作可能会遇到图13的情况,左边的线程根据 k1 定位到了位置 i,在读取 v1 之前这个节点发生了删除位于位置 i 的 k1,同时在位置 j 处插入 k2,如果 i 等于 j,可能导致左边的线程读取到 v2,为了解决这个问题,需要在索引 i 被删除后重新利用时增加节点的 vinsert 域。 + +![img](./img/p14.png) + +图14 OpenGauss内存引擎索引上删除操作代码部分 + +![img](./img/p15.png) + +图 15 lp.finish传入参数为-1时,调用finish_remove函数,通过在permutation中删除节点索引已达到逻辑删除作用 + + \ No newline at end of file diff --git "a/content/zh/post/yushanXD/2021-12-11-Opengauss\345\215\225\346\234\272\351\203\250\347\275\262.md" "b/content/zh/post/yushanXD/2021-12-11-Opengauss\345\215\225\346\234\272\351\203\250\347\275\262.md" new file mode 100644 index 0000000000000000000000000000000000000000..d2a4afa1f282246517b1fa9211c4f45a3d0f7ca5 --- /dev/null +++ "b/content/zh/post/yushanXD/2021-12-11-Opengauss\345\215\225\346\234\272\351\203\250\347\275\262.md" @@ -0,0 +1,424 @@ ++++ + +title = "openGauss单机部署" + +date = "2021-12-11" + +tags = ["openGauss单机部署"] + +archives = "2021-12" + +author = "yushanXD" + +summary = "openGauss单机部署" + +times = "17:30" + ++++ + + +## 一、安装环境 + +### 1.操作系统:虚拟机VMware、CentOS7.9 + +### 2.环境设置: + +#### (1)虚拟机内存3G、磁盘100G + +#### (2)系统版本修改 + +一开始使用了centos8,无法安装,因此降低版本,选用7.9后依然存在一些问题,因此修改/etc/redhat-release文件中系统版本为CentOS Linux release 7.6(Core) + +#### (3)配置YUM源 + +##### ①删除系统自带yum源 + +```shell +rm -rf /etc/yum.repos.d/* +``` + +##### ②下载阿里云yum源 + +```shell +wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo +``` + +##### ③生成仓库缓存 + +```shell +yum makecache +``` + +#### (4)安装依赖包 + +```shell +1 yum install ‐y libaio‐devel flex bison ncurses‐devel glibc.devel patch lsb_release +2 yum install ‐y openssl* python3 +``` + +#### (5)关闭SELINUX和Firewall + +```shell +1 setenforce 0 +2 systemctl disable firewalld.service +3 systemctl stop firewalld.service +``` + +#### (6)关闭交换内存 + +```shell +swapoff -a +``` + +#### (7)关闭透明大页 + +```shell +1 vim /etc/rc.d/rc.local +2 if test ‐f /sys/kernel/mm/transparent_hugepage/enabled; +3 then +4 echo never > /sys/kernel/mm/transparent_hugepage/enabled +5 fi +6 if test ‐f /sys/kernel/mm/transparent_hugepage/defrag; +7 then +8 echo never > /sys/kernel/mm/transparent_hugepage/defrag +9 fi +``` + + (8)修改主机名 + +```shell +1 echo "node1" > /etc/hostname +2 echo “ 192.168.17.129 node1” >>/etc/hosts +``` + + + +## 二、安装详细步骤 + +### 1.Opengauss安装 + +#### (1)下载opengauss安装包及创建用户组和目录 + +```shell +1 groupadd dbgrp +2 useradd -g dbgrp -d /home/omm -m -s /bin/bash omm +3 echo "omm" | passwd -‐stdin omm +4 mkdir -p /opt/software/openGauss +5 chmod 755 -R /opt/software +6 chown -R omm:dbgrp /opt/software/openGauss + cd /opt/software/openGauss/ +7 wget https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.0/x86/openGauss-2.0.0-CentOS-64bit-all.tar.gz +8 tar -zxvf openGauss-2.0.0-CentOS-64bit-all.tar.gz +9 tar -zxvf openGauss-2.0.0-CentOS-64bit-om.tar.gz +``` + +#### (2)单机xml配置文件 + +首先从如下地址复制文件至当前位置 + +```shell +cp script/gspylib/etc/conf/cluster_config_template.xml . +``` + +修改配置文件具体如下,配置文件中要注意配置一下几个参数:nodeNAMES、backips + +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + + + +#### (3)设置lib库 + +```shell +vim .bashrc +添加 +export GPHOME=/opt/huawei/install/om +export PATH=$GPHOME/script/gspylib/pssh/bin:$GPHOME/script:$PATH +export LD_LIBRARY_PATH=$GPHOME/lib:$LD_LIBRARY_PATH +export PYTHONPATH=$GPHOME/lib +export GAUSSHOME=/opt/huawei/install/app +export PATH=$GAUSSHOME/bin:$PATH +export LD_LIBRARY_PATH=$GAUSSHOME/lib:$LD_LIBRARY_PATH +export S3_CLIENT_CRT_FILE=$GAUSSHOME/lib/client.crt +export GAUSS_VERSION=2.0.0 +export PGHOST=/opt/huawei/tmp +export GAUSSLOG=/opt/huawei/log/omm +umask 077 +export GAUSS_ENV=2 +export GS_CLUSTER_NAME=singlenode +``` + +#### (4)执行交互式初始化 + +##### ①预安装 + +操作如下: + +```shell +1 cd /opt/software/openGauss/script +2 root@node1 script]#python3 gs_preinstall -U omm -G dbgrp -X /opt/software/openGauss/cluster_config_template.xml +Parsing the configuration file. +Successfully parsed the configuration file. +Installing the tools on the local node. +Successfully installed the tools on the local node. +Setting pssh path +Successfully set core path. +Are you sure you want to create the user[omm] and create trust for it (yes)? yes +Preparing SSH service. +Successfully prepared SSH service. +Checking OS software. +Successfully check os software. +Checking OS version. +Successfully checked OS version. +Creating cluster's path. +Successfully created cluster's path. +Setting SCTP service. +Successfully set SCTP service. +Set and check OS parameter. +Setting OS parameters. +Successfully set OS parameters. +Warning: Installation environment contains some warning messages. +Please get more details by "/opt/software/openGauss/script/gs_checkos -i A -h node1 --detail". +Set and check OS parameter completed. +Preparing CRON service. +Successfully prepared CRON service. +Setting user environmental variables. +Successfully set user environmental variables. +Setting the dynamic link library. +Successfully set the dynamic link library. +Setting Core file +Successfully set core path. +Setting pssh path +Successfully set pssh path. +Set ARM Optimization. +No need to set ARM Optimization. +Fixing server package owner. +Setting finish flag. +Successfully set finish flag. +Preinstallation succeeded. +``` + +当出现“Preinstallation succeeded.”时,预安装成功。 + + + +##### ②安装 + +进入script目录后进行正式安装,命令如下,其中“/opt/software/openGauss/cluster_config_template.xml”为前几步中编辑的配置文件。 + +此过程需要输入密码,且设置的密码要符合复杂度要求如下: + +最少包含8个字符; + +不能和用户名和当前密码(ALTER)相同,或和当前密码反序; + +至少包含大写字母(A-Z),小写字母(a-z),数字,非字母数字字符(限定为~!@#$%^&*()-_=+|[{}];:,<.>/?)四类字符中的三类字符: + +```shell +[omm@node1 openGauss]$ cd script/ +[omm@node1 script]$ gs_install -X /opt/software/openGauss/cluster_config_template.xml +Parsing the configuration file. +Check preinstall on every node. +Successfully checked preinstall on every node. +Creating the backup directory. +Successfully created the backup directory. +begin deploy.. +Installing the cluster. +begin prepare Install Cluster.. +Checking the installation environment on all nodes. +begin install Cluster.. +Installing applications on all nodes. +Successfully installed APP. +begin init Instance.. +encrypt cipher and rand files for database. +Please enter password for database: +Please repeat for database: +begin to create CA cert files +The sslcert will be generated in /opt/huawei/install/app/sslcert/om +Cluster installation is completed. +Configuring. +Deleting instances from all nodes. +Successfully deleted instances from all nodes. +Checking node configuration on all nodes. +Initializing instances on all nodes. +Updating instance configuration on all nodes. +Check consistence of memCheck and coresCheck on database nodes. +Configuring pg_hba on all nodes. +Configuration is completed. +Successfully started cluster. +Successfully installed application. +end deploy.. +``` + +测试安装是否成功,首先需要使数据库处于开启状态,然后输入”gsql -d postgres -p 26000”命令使数据库在本地运行,其中-p 为数据库端口dataPortBase,具体数值在前述过程中xml配置文件中确定,这里为26000。 + +![img](./img/command1.png) ![img](./img/command2.png) + + + +### 2.Opengauss连接设置 + +#### (1)安装java,确认jdk版本为1.8 + + + +#### (2)从官网下载jdbc压缩包后,将其解压至路径/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.302.b08-0.el7_9.x86_64/jre/lib/ext下 + + + +#### (3)配置数据库服务器中的白名单与监听名单 + +##### ①以操作系统用户omm登录数据库主节点 + + + +##### ②执行如下命令增加对外提供服务的网卡IP或者主机名(英文逗号分隔),其中NodeName为当前节点名称,如: + +```shell + gs_guc reload -N NodeName -I all -c "listen_addresses='localhost,192.168.17.129'" +``` + + + +##### ③执行如下命令在数据库主节点配置文件中增加一条认证规则。(这里假设客户端IP地址为192.168.17.129,即远程连接的机器的IP地址) + +```shell +gs_guc reload -N all -I all -h "host all yushan 192.168.17.129/32 sha256" + +- -N all表示openGauss中的所有主机。 +- -I all表示主机中的所有实例。 +- -h表示指定需要在“pg_hba.conf”增加的语句。 +- all表示允许客户端连接到任意的数据库。 +- yushan表示连接数据库的用户。 +- 192.168.17.129/32表示只允许IP地址为192.168.17.129的主机连接。在使用过程中,请根据用户的网络进行配置修改。32表示子网掩码为1的位数,即255.255.255.255 +- sha256表示连接时jack用户的密码使用sha256算法加密。 +``` + +与之效果相同的代替操作: + +在/opt/huawei/install/data/db1路径(创建的节点名叫db1)下编辑pg_hba.conf文件 + +![img](./img/pg_hba-conf.png) + + + +#### (4)通过编写java程序即可连接,example如下 + +```java +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.CallableStatement; +public class test{ +public static Connection getConnect(String username, String passwd) + { + //驱动类。 + String driver = "org.postgresql.Driver"; + //数据库连接描述符。 + String sourceURL = "jdbc:postgresql://127.0.0.1:26000/postgres"; + Connection conn = null; + + try + { + //加载驱动。 + Class.forName(driver); + } + catch( Exception e ) + { + e.printStackTrace(); + return null; + } + + try + { + //创建连接。 + conn = DriverManager.getConnection(sourceURL, username, passwd); + System.out.println("Connection succeed!"); + } + catch(Exception e) + { + e.printStackTrace(); + return null; + } + + return conn; + }; + public static void main(String[] args) { + // TODO Auto-generated method stub + Connection conn = getConnect("yushan", "1qaz@wsx"); + //BatchInsertData(conn); + try { + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} +``` + +编译执行程序后,如图,连接成功。 + +![img](./img/connect-success.png) + +## 三、安装过程中碰到的问题与解决办法 + +### 1.问题1 + +在安装结束后,准备运行后,发现gsom 无法启动。 + +解决过程: + +猜测可能是内存不足,虚拟机一开始设置的内存为1G,查阅相关博客发现,1G内存对于企业版不足,后将虚拟机内存设置为3G。 + + + +### 2.问题2 + +运行gsom后报错如下 + +![img](./img/question2-1.png) + +解决过程: + +检查发现pg_hba.conf文件配置出错,修改如下 + +![img](./img/question2-2.png) + +### 3.问题3 + +一开始安装的虚拟机为centos8,进行预安装后发现不支持该版本操作系统。 + +解决过程: + +切换为centos7.9,但依然报错,因此修改/etc/redhat-release文件中系统版本为CentOS Linux release 7.6(Core) + + \ No newline at end of file diff --git "a/content/zh/post/yushanXD/img/OpenGauss\345\206\205\345\255\230\345\274\225\346\223\216\346\236\266\346\236\204\345\233\276.png" "b/content/zh/post/yushanXD/img/OpenGauss\345\206\205\345\255\230\345\274\225\346\223\216\346\236\266\346\236\204\345\233\276.png" new file mode 100644 index 0000000000000000000000000000000000000000..2502e81da0bdadcf02446c47c4085bee05609955 Binary files /dev/null and "b/content/zh/post/yushanXD/img/OpenGauss\345\206\205\345\255\230\345\274\225\346\223\216\346\236\266\346\236\204\345\233\276.png" differ diff --git a/content/zh/post/yushanXD/img/b+tree.png b/content/zh/post/yushanXD/img/b+tree.png new file mode 100644 index 0000000000000000000000000000000000000000..c28099ce232e92721e1f7fcc6e9e41106d4191b2 Binary files /dev/null and b/content/zh/post/yushanXD/img/b+tree.png differ diff --git a/content/zh/post/yushanXD/img/command1.png b/content/zh/post/yushanXD/img/command1.png new file mode 100644 index 0000000000000000000000000000000000000000..bcbacff4517158209f0c0e94d14b968194d7acf4 Binary files /dev/null and b/content/zh/post/yushanXD/img/command1.png differ diff --git a/content/zh/post/yushanXD/img/command2.png b/content/zh/post/yushanXD/img/command2.png new file mode 100644 index 0000000000000000000000000000000000000000..18cf6daa85b2fd6a06e46d3648bcd3abf4396a44 Binary files /dev/null and b/content/zh/post/yushanXD/img/command2.png differ diff --git a/content/zh/post/yushanXD/img/connect-success.png b/content/zh/post/yushanXD/img/connect-success.png new file mode 100644 index 0000000000000000000000000000000000000000..a175c0287ccf70f3c55edf63d99e354ba1a5a456 Binary files /dev/null and b/content/zh/post/yushanXD/img/connect-success.png differ diff --git a/content/zh/post/yushanXD/img/p10.png b/content/zh/post/yushanXD/img/p10.png new file mode 100644 index 0000000000000000000000000000000000000000..fc96baf82cf55e2fe543ee61735d2a6178e52c41 Binary files /dev/null and b/content/zh/post/yushanXD/img/p10.png differ diff --git a/content/zh/post/yushanXD/img/p11.png b/content/zh/post/yushanXD/img/p11.png new file mode 100644 index 0000000000000000000000000000000000000000..f2858e890e8f086740a7ff0e73702ac692ed4f4f Binary files /dev/null and b/content/zh/post/yushanXD/img/p11.png differ diff --git a/content/zh/post/yushanXD/img/p12.png b/content/zh/post/yushanXD/img/p12.png new file mode 100644 index 0000000000000000000000000000000000000000..4bf851d53c597c339ecacdaf82836e11a78750c3 Binary files /dev/null and b/content/zh/post/yushanXD/img/p12.png differ diff --git a/content/zh/post/yushanXD/img/p13.png b/content/zh/post/yushanXD/img/p13.png new file mode 100644 index 0000000000000000000000000000000000000000..ac23f68935c47a9c28393e6b2ae2173c33c71f68 Binary files /dev/null and b/content/zh/post/yushanXD/img/p13.png differ diff --git a/content/zh/post/yushanXD/img/p14.png b/content/zh/post/yushanXD/img/p14.png new file mode 100644 index 0000000000000000000000000000000000000000..68a24f48c8745690a1181659f85e3dc51bc5d9a2 Binary files /dev/null and b/content/zh/post/yushanXD/img/p14.png differ diff --git a/content/zh/post/yushanXD/img/p15.png b/content/zh/post/yushanXD/img/p15.png new file mode 100644 index 0000000000000000000000000000000000000000..2c3a0b661f311f73e1ab1e38255d179ccaa58e96 Binary files /dev/null and b/content/zh/post/yushanXD/img/p15.png differ diff --git a/content/zh/post/yushanXD/img/p4.png b/content/zh/post/yushanXD/img/p4.png new file mode 100644 index 0000000000000000000000000000000000000000..a5449d4f50a5cf901cd87a13f62623a82cb0086a Binary files /dev/null and b/content/zh/post/yushanXD/img/p4.png differ diff --git a/content/zh/post/yushanXD/img/p5.png b/content/zh/post/yushanXD/img/p5.png new file mode 100644 index 0000000000000000000000000000000000000000..d1a2207df9416527ff8e590c8d5a15fe30966fbe Binary files /dev/null and b/content/zh/post/yushanXD/img/p5.png differ diff --git a/content/zh/post/yushanXD/img/p6.png b/content/zh/post/yushanXD/img/p6.png new file mode 100644 index 0000000000000000000000000000000000000000..f5b85986af512076f27a1fe379fa0501d50de1a1 Binary files /dev/null and b/content/zh/post/yushanXD/img/p6.png differ diff --git a/content/zh/post/yushanXD/img/p7.png b/content/zh/post/yushanXD/img/p7.png new file mode 100644 index 0000000000000000000000000000000000000000..341facbf4d41d113e8cd5e4693b3515268dbcfcd Binary files /dev/null and b/content/zh/post/yushanXD/img/p7.png differ diff --git a/content/zh/post/yushanXD/img/p8.png b/content/zh/post/yushanXD/img/p8.png new file mode 100644 index 0000000000000000000000000000000000000000..02139a1103125834597299de5ecba9d248cd136f Binary files /dev/null and b/content/zh/post/yushanXD/img/p8.png differ diff --git a/content/zh/post/yushanXD/img/p9.png b/content/zh/post/yushanXD/img/p9.png new file mode 100644 index 0000000000000000000000000000000000000000..9ca067b381ddba998201af70f0de432027bb21b1 Binary files /dev/null and b/content/zh/post/yushanXD/img/p9.png differ diff --git a/content/zh/post/yushanXD/img/pg_hba-conf.png b/content/zh/post/yushanXD/img/pg_hba-conf.png new file mode 100644 index 0000000000000000000000000000000000000000..77b7fb411e79de94a1a219622108ce7fca8b5840 Binary files /dev/null and b/content/zh/post/yushanXD/img/pg_hba-conf.png differ diff --git a/content/zh/post/yushanXD/img/question2-1.png b/content/zh/post/yushanXD/img/question2-1.png new file mode 100644 index 0000000000000000000000000000000000000000..bf0af9fbe22086f18b7f881a71668c04b348e7c8 Binary files /dev/null and b/content/zh/post/yushanXD/img/question2-1.png differ diff --git a/content/zh/post/yushanXD/img/question2-2.png b/content/zh/post/yushanXD/img/question2-2.png new file mode 100644 index 0000000000000000000000000000000000000000..5cb43620f8f7278111c24044ee5662640b95226c Binary files /dev/null and b/content/zh/post/yushanXD/img/question2-2.png differ diff --git a/content/zh/post/yushanXD/img/trietree.png b/content/zh/post/yushanXD/img/trietree.png new file mode 100644 index 0000000000000000000000000000000000000000..4f55a62f8c19f1dc1207baf286f6686291d5cc7a Binary files /dev/null and b/content/zh/post/yushanXD/img/trietree.png differ diff --git a/content/zh/post/zhangcuiping/title/img.png b/content/zh/post/zhangcuiping/title/img.png new file mode 100644 index 0000000000000000000000000000000000000000..86a420b92fb8289658d807d49f137b6d13862f6d Binary files /dev/null and b/content/zh/post/zhangcuiping/title/img.png differ diff --git a/content/zh/post/zhangcuiping/title/img6.png b/content/zh/post/zhangcuiping/title/img6.png new file mode 100644 index 0000000000000000000000000000000000000000..2ddddfa2858d77999b4cfec8e97e4f29ac0cab79 Binary files /dev/null and b/content/zh/post/zhangcuiping/title/img6.png differ diff --git "a/content/zh/post/zhangcuiping/\345\275\223\344\275\277\347\224\250gs_probackup\345\244\207\344\273\275\346\225\260\346\215\256\346\227\266\357\274\214\346\217\220\347\244\272\346\227\240\346\263\225\350\277\236\346\216\245\345\210\260\346\225\260\346\215\256\345\272\223\346\227\266\346\200\216\344\271\210\345\212\236.md" "b/content/zh/post/zhangcuiping/\345\275\223\344\275\277\347\224\250gs_probackup\345\244\207\344\273\275\346\225\260\346\215\256\346\227\266\357\274\214\346\217\220\347\244\272\346\227\240\346\263\225\350\277\236\346\216\245\345\210\260\346\225\260\346\215\256\345\272\223\346\227\266\346\200\216\344\271\210\345\212\236.md" new file mode 100644 index 0000000000000000000000000000000000000000..cf3677d00264730a3f843c059c34ef665eb3514d --- /dev/null +++ "b/content/zh/post/zhangcuiping/\345\275\223\344\275\277\347\224\250gs_probackup\345\244\207\344\273\275\346\225\260\346\215\256\346\227\266\357\274\214\346\217\220\347\244\272\346\227\240\346\263\225\350\277\236\346\216\245\345\210\260\346\225\260\346\215\256\345\272\223\346\227\266\346\200\216\344\271\210\345\212\236.md" @@ -0,0 +1,78 @@ ++++ + +title = "当使用gs_probackup备份数据时,提示无法连接到数据库时怎么办?" + +date = "2022-04-18" + +tags = ["当使用gs_probackup备份数据时,提示无法连接到数据库时怎么办?"] + +archives = "2022-04" + +author = "张翠娉" + +summary = "当使用gs_probackup备份数据时,提示无法连接到数据库时怎么办?" + +img = "/zh/post/zhangcuiping/title/img.png" + +times = "10:20" ++++ + +# 当使用gs_probackup备份数据时,提示无法连接到数据库时怎么办? + +本文出处:https://www.modb.pro/db/124909 + +gs_probackup是一个用于管理MogDB数据库备份和恢复的工具。它对MogDB实例进行定期备份,以便在数据库出现故障时能够恢复服务器。 + +- 可用于备份单机数据库或者集群主节点数据库,为物理备份。 +- 可备份外部目录的内容,如脚本文件、配置文件、日志文件、dump文件等。 +- 支持增量备份、定期备份和远程备份。 +- 可设置备份的留存策略。 + +## 备份步骤 + +1、初始化备份目录。执行如下命令在指定的目录下创建backups/和wal/子目录,分别用于存放备份文件和WAL文件,例如指定目录为/opt/software/mogdb/backup_dir。 + +[root@mogdb-kernel-0005 backup_dir]#gs_probackup init -B /opt/software/mogdb/backup_dir +INFO: Backup catalog '/opt/software/mogdb/backup_dir' successfully inited + +2、添加一个新的备份实例。gs_probackup可以在同一个备份目录下存放多个数据库实例的备份。例如数据目录为/cd opt/mogdb/data/db1。 + +``` +[root@mogdb-kernel-0005 backup_dir]#gs_probackup add-instance -B /opt/software/mogdb/backup_dir -D /opt/mogdb/data/db1 --instance instance1 +INFO: Instance 'instance1' successfully inited +``` + +3、创建指定实例的备份。在进行增量备份之前,必须至少创建一次全量备份。 + +``` +[root@mogdb-kernel-0005 instance1]# gs_probackup backup -B /opt/software/mogdb/backup_dir --instance instance1 -b FULL +INFO: Backup start, gs_probackup version: 2.4.2, instance: instance1, backup ID: R08KCK, backup mode: FULL, wal mode: STREAM, remote: false, compress-algorithm: none, compress-level: 1 +LOG: Backup destination is initialized +ERROR: could not connect to database root: connect to server failed: No such file or directory +``` + +**注意:当创建指定实例的备份时,系统上报如上错误,提示不能连接到数据库。这是因为没有在pg_probackup.conf配置文件中添加数据库连接信息。此时执行如下命令建立数据库连接后,必须切换到omm用户后再次执行备份,即可成功。如果直接在root用户下执行备份,仍然会报错提示无法连接到数据库。** + +``` +[root@mogdb-kernel-0005 instance1]#gs_probackup set-config -B /opt/software/mogdb/backup_dir --instance=instance1 -d postgres -p 26000 +[root@mogdb-kernel-0005 instance1]#su - omm + +[omm@mogdb-kernel-0005 instance1]#gs_probackup backup -B /opt/software/mogdb/backup_dir --instance instance1 -b FULL +INFO: Syncing backup files to disk +INFO: Backup files are synced, time elapsed: 5s +INFO: Validating backup R08LWJ +INFO: Backup R08LWJ data files are valid +INFO: Backup R08LWJ resident size: 686MB +INFO: Backup R08LWJ completed +``` + +4、从指定实例的备份中恢复数据。 + +``` +gs_probackup restore -B /opt/software/mogdb/backup_dir --instance instance1 -D /opt/mogdb/data/db1 -i R08LWJ +INFO: Backup files are restored. Transfered bytes: 686MB, time elapsed: 1s +INFO: Restore incremental ratio (less is better): 102% (686MB/670MB) +INFO: Syncing restored files to disk +INFO: Restored backup files are synced, time elapsed: 5s +INFO: Restore of backup R08LWJ completed. +``` diff --git "a/content/zh/post/zhangfan/JDBC\351\251\261\345\212\250\350\277\236\346\216\245MogDB openGauss.md" "b/content/zh/post/zhangfan/JDBC\351\251\261\345\212\250\350\277\236\346\216\245MogDB openGauss.md" new file mode 100644 index 0000000000000000000000000000000000000000..0ad5dfd6d05166d1f8738a669523ef2ef4d634f3 --- /dev/null +++ "b/content/zh/post/zhangfan/JDBC\351\251\261\345\212\250\350\277\236\346\216\245MogDB openGauss.md" @@ -0,0 +1,211 @@ ++++ + +title = "JDBC驱动连接MogDB/openGauss" + +date = "2022-04-07" + +tags = ["JDBC驱动连接MogDB/openGauss"] + +archives = "2022-04" + +author = "云和恩墨-张凡" + +summary = "JDBC驱动连接MogDB/openGauss" + +img = "/zh/post/zhangfan/title/img20.png" + +times = "10:20" + ++++ + +# JDBC驱动连接MogDB/openGauss + +## 一、环境说明 + +```sql +[root@node1 ~]# cat /etc/redhat-release +CentOS Linux release 7.6.1810 (Core) +[root@node1 ext]# java -version +java version "1.8.0_301" +Java(TM) SE Runtime Environment (build 1.8.0_301-b09) +Java HotSpot(TM) 64-Bit Server VM (build 25.301-b09, mixed mode) +``` + +## 二、数据库配置 + +### 1.配置数据库参数,允许用户登录 + +```sql +数据库配置文件postgresql.conf和pg_hba.conf中加上如下内容 +[omm@node1 data]$ tail -4 postgresql.conf +listen_addresses = '0.0.0.0' +password_encryption_type = 0 +log_directory = 'pg_log' +remote_read_mode=non_authentication +[omm@node1 data]$ tail -1 pg_hba.conf +host all all 0.0.0.0/0 md5 +重启数据库 +gs_om -t stop +gs_om -t start +``` + +### 2.创建连接用户及数据库 + +```sql +postgres=# create database jdbc_db; +CREATE DATABASE +postgres=# create user jdbc_usr password 'jdbc@123'; +NOTICE: The encrypted password contains MD5 ciphertext, which is not secure. +CREATE ROLE +postgres=# alter user jdbc_usr sysadmin; +ALTER ROLE +postgres=# +``` + +## 三、Java程序编写 + +```java +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +public class ConnTest { + //创建数据库连接。 + public static Connection GetConnection(String username, String passwd) { + String driver = "org.postgresql.Driver"; + String sourceURL = "jdbc:postgresql://8.131.53.xxx:26000/jdbc_db"; + Connection conn = null; + try { + //加载数据库驱动。 + Class.forName(driver).newInstance(); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + + try { + //创建数据库连接。 + conn = DriverManager.getConnection(sourceURL,"jdbc_usr", "jdbc@123"); + System.out.println("连接成功!"); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + return conn; + } + ; + /** + * 把查询到的结果放入ResultSet + * 通过迭代的方法去读取结果集中的查询结果 + * 输出查询结果 + */ + public static void Select(Connection conn) { + PreparedStatement ps = null; + ResultSet rs = null; + String sql = "SELECT version()"; + try { + ps = conn.prepareStatement(sql); + rs = ps.executeQuery(); //将查询的结果放入ResultSet结果集中 + /** + * 从结果集ResultSet中迭代取出查询结果并输出 + */ + while(rs.next()) { +// String values = rs.getString("id"); + String values = rs.getString("version"); + + System.out.println( "数据库版本:"+values); + } + } catch (SQLException e) { + System.out.println("操作失败o(╥﹏╥"); + e.printStackTrace(); + } + } + /** + * 主程序,逐步调用各静态方法。 + * @param args + */ + public static void main(String[] args) { + //创建数据库连接。 + Connection conn = GetConnection("jdbc_usr", "jdbc@123"); + Select(conn); + //关闭数据库连接。 + try { + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} +``` + +## 四、程序测试 + +### 1.放置jDBC驱动 + +```java +将jdbc驱动放到jdk中的如下目录,让程序能找到驱动包 +[root@node1 ext]# pwd +/usr/java/jdk1.8.0_301-amd64/jre/lib/ext +[root@node1 ext]# wget https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.1/x86/openGauss-2.0.0-JDBC.tar.gz +2021-12-01 17:30:52 (13.2 MB/s) - 已保存 “openGauss-2.0.0-JDBC.tar.gz” [4937896/4937896]) +[root@node1 ext]# tar -zxvf openGauss-2.0.0-JDBC.tar.gz +postgresql.jar +``` + +### 2.运行程序 + +这里采用了俩种方式运行程序,一种是单个程序直接运行,另一个则是将Java程序打成jar在运行,这里简单介绍一下 + +#### (1)单个程序运行 + +```java +[root@node1 hello]# ls +conn.jar ConnTest.java MANIFEST.MF postgresql.jar +[root@node1 hello]# pwd +/root/java_program/hello +[root@node1 hello]# javac ConnTest.java +[root@node1 hello]# java ConnTest +连接成功! +数据库版本:PostgreSQL 9.2.4 (MogDB 2.0.1 build f892ccb7) compiled at 2021-07-09 16:12:59 commit 0 last mr on x86_64-unknown-linux-gnu, compiled by g++ (GCC) 7.3.0, 64-bit +``` + +#### (2)jar包运行 + +##### 编译ConnTest.java + +```java +[root@node1 hello]# javac ConnTest.java +``` + +##### 编写MANIFEST.MF文件 + +```java +MANIFEST.MF文件介绍 +META-INF文件夹相当于一个信息包,目录中的文件和目录获得Java 2平台的认可与解释,用来配置应用程序、扩展程序、类加载器和服务。这个文件夹和其中的 MANIFEST.MF文件,在用jar打包时自动生成。执行jar文件的时候,这个jar里是需要具备 META-INF/MANIFEST.MF的,否则java -jar就找不到main class。 +[root@node1 hello]# cat MANIFEST.MF +Manifest-Version: 1.0 +Main-Class: ConnTest +``` + +##### 程序打包 + +```java +[root@node1 hello]# jar -cvfm conn.jar MANIFEST.MF ConnTest.class +已添加清单 +正在添加: ConnTest.class(输入 = 2126) (输出 = 1212)(压缩了 42%) +``` + +##### 运行程序 + +```java +[root@node1 hello]# java -jar conn.jar +连接成功! +数据库版本:PostgreSQL 9.2.4 (MogDB 2.0.1 build f892ccb7) compiledat 2021-07-09 16:12:59 commit 0 last mr onx86_64-unknown-linux-gnu, compiled by g++ (GCC) 7.3.0, 64-bit +``` + +## 五、总结 + +上述文章简单介绍了JDBC连接MogDB数据库,数据如何配置,以及JDBC驱动如何加载,如何配置,并运行在Linux上。更多细节参考官方文档https://docs.mogdb.io/zh/mogdb/v2.0.1/1-development-based-on-jdbc-overview + diff --git a/content/zh/post/zhangfan/title/img20.png b/content/zh/post/zhangfan/title/img20.png new file mode 100644 index 0000000000000000000000000000000000000000..ce35c3cd313c8e4ed939ae18b91b9a64767ab504 Binary files /dev/null and b/content/zh/post/zhangfan/title/img20.png differ diff --git "a/content/zh/post/zhangfan/\350\277\201\347\247\273\345\267\245\345\205\267MTK\345\222\214ora2pg\350\277\201\347\247\273BLOB\345\255\227\346\256\265\346\225\260\346\215\256\345\210\260MogDB\346\200\247\350\203\275\345\257\271\346\257\224.md" "b/content/zh/post/zhangfan/\350\277\201\347\247\273\345\267\245\345\205\267MTK\345\222\214ora2pg\350\277\201\347\247\273BLOB\345\255\227\346\256\265\346\225\260\346\215\256\345\210\260MogDB\346\200\247\350\203\275\345\257\271\346\257\224.md" new file mode 100644 index 0000000000000000000000000000000000000000..8aa6b215c0b9f6af1ff8b16f5f0c0575bf104647 --- /dev/null +++ "b/content/zh/post/zhangfan/\350\277\201\347\247\273\345\267\245\345\205\267MTK\345\222\214ora2pg\350\277\201\347\247\273BLOB\345\255\227\346\256\265\346\225\260\346\215\256\345\210\260MogDB\346\200\247\350\203\275\345\257\271\346\257\224.md" @@ -0,0 +1,231 @@ ++++ + +title = "迁移工具MTK和ora2pg迁移BLOB字段数据到MogDB性能对比" + +date = "2022-04-25" + +tags = ["迁移工具MTK和ora2pg迁移BLOB字段数据到MogDB性能对比"] + +archives = "2022-04" + +author = "张凡" + +summary ="迁移工具MTK和ora2pg迁移BLOB字段数据到MogDB性能对比" + +img = "/zh/post/zhangfan/title/img20.png" + +times = "10:20" ++++ + +# 迁移工具MTK和ora2pg迁移BLOB字段数据到MogDB性能对比 + +本文出处:[https://www.modb.pro/db/240146](https://www.modb.pro/db/240146) + +背景介绍: +mtk全称为 The Database Migration Toolkit,是一个云和恩墨自主研发的可以将Oracle/DB2/MySQL/openGauss数据库的数据结构,全量数据高速导入到MogDB的工具。ora2pg是一款免费迁移工具,能将oracle迁移到pg。以下是迁移数据说明,在Oracle中创建25张带有BLOB字段的表,每张表数据50000条,用迁移工具MTK和ora2pg,分别对比迁移1张表、5张表、10张表、20张表的迁移时间,从而对比其迁移性能。数据库磁盘使用的是nvme磁盘,写入速度高达1400M/s,不用考虑i/o对其性能的影响。 + +## 一、容器版oracle安装部署 + +### 1、Oracle容器部署 + +``` +docker pull registry.cn-hangzhou.aliyuncs.com/lhrbest/oracle_11g_ee_lhr_11.2.0.4:1.0 +docker run -itd --name oracle -h oracle --privileged=true -p 1521:1521 -p 222:22 -p 1158:1158 lhrbest/oracle_11g_ee_lhr_11.2.0.4:1.0 init +``` + +### 2、安装Oracle客户端 + +``` +wget https://download.oracle.com/otn_software/linux/instantclient/214000/oracle-instantclient-basic-21.4.0.0.0-1.el8.x86_64.rpm +wget https://download.oracle.com/otn_software/linux/instantclient/214000/oracle-instantclient-sqlplus-21.4.0.0.0-1.el8.x86_64.rpm +wget https://download.oracle.com/otn_software/linux/instantclient/214000/oracle-instantclient-devel-21.4.0.0.0-1.x86_64.rpm +wget https://download.oracle.com/otn_software/linux/instantclient/214000/oracle-instantclient-jdbc-21.4.0.0.0-1.x86_64.rpm +[root@ecs-1b06 oracle]# rpm -ivh oracle-instantclient-basic-21.4.0.0.0-1.x86_64.rpm +[root@ecs-1b06 oracle]# rpm -ivh oracle-instantclient-sqlplus-21.4.0.0.0-1.x86_64.rpm +[root@ecs-1b06 oracle]# rpm -iv oracle-instantclient-jdbc-21.4.0.0.0-1.x86_64.rpm +[root@ecs-1b06 oracle]# rpm -iv oracle-instantclient-devel-21.4.0.0.0-1.x86_64.rpm +[root@ecs-1b06 oracle]# export LD_LIBRARY_PATH=/usr/lib/oracle/21/client64/lib +[root@ecs-1b06 oracle]# export ORACLE_HOME=/usr/lib/oracle/21/client64 +``` + +## 二、安装ora2pg + +### 1、安装依赖 + +``` +[root@ecs-1b06 ora2pg]# yum install -y perl perl-ExtUtils-CBuilder perl-ExtUtils-MakeMaker +``` + +### 2、安装DBI模块 + +``` +[root@ecs-1b06 local]# wget https://cpan.metacpan.org/authors/id/T/TI/TIMB/DBI-1.643.tar.gz +[root@ecs-1b06 local]# pwd +[root@ecs-1b06 local]# tar -xf DBI-1.643.tar.gz +[root@ecs-1b06 local]# cd DBI-1.643/ +[root@ecs-1b06 DBI-1.643]# perl Makefile.PL +[root@ecs-1b06 DBI-1.643]#make +[root@ecs-1b06 DBI-1.643]#make insatll +``` + +### 3、安装DBD:oracle模块 + +``` +[root@ecs-1b06 DBD-Oracle-1.80]# wget https://cpan.metacpan.org/authors/id/M/MJ/MJEVANS/DBD-Oracle-1.80.tar.gz +[root@ecs-1b06 dbd]# tar -xf DBD-Oracle-1.80.tar.gz +[root@ecs-1b06 oracle]# export LD_LIBRARY_PATH=/usr/lib/oracle/21/client64/lib/ +[root@ecs-1b06 oracle]# export ORACLE_HOME=/usr/lib/oracle/21/client64 +[root@ecs-1b06 dbd]#cd DBD-Oracle-1.80 +[root@ecs-1b06 dbd]perl Makefile.PL +[root@ecs-1b06 dbd]make && make +``` + +### 4、安装DBD:pg模块 + +``` +[root@ecs-1b06 DBI-1.643]# yum install -y postgresql* +[root@ecs-1b06 local]# wget https://cpan.metacpan.org/authors/id/T/TU/TURNSTEP/DBD-Pg-3.15.0.tar.gz +[root@ecs-1b06 DBD-Pg-3.15.0]# tar -xf DBD-Pg-3.15.0.tar.gz +[root@ecs-1b06 DBD-Pg-3.15.0]# cd DBD-Pg-3.15.0 +[root@ecs-1b06 DBD-Pg-3.15.0]# perl Makefile.PL +[root@ecs-1b06 DBD-Pg-3.15.0]# make && make install +``` + +### 5、安装ORA2PG + +``` +[root@ecs-1b06 local]# wget https://sourceforge.net/projects/ora2pg/files/23.0/ora2pg-23.0.tar.bz2 --no-check-certificate +[root@ecs-1b06 ora2pg-23.0]# perl Makefile.PL +[root@ecs-1b06 ora2pg-23.0]# make && make install +``` + +### 6、检查是否安装成功 + +``` +[root@mogdb-kernel-0004 ~]# cat check.pl +#!/usr/bin/perl +use strict; +use ExtUtils::Installed; +my $inst=ExtUtils::Installed->new(); +my @modules = $inst->modules(); +foreach(@modules){ + my $ver = $inst->version($_) || "???"; + printf("%-12s -- %s\n",$_,$ver); + } +exit; +[root@ecs-1b06 dbd]# perl check.pl +DBD::Oracle -- 1.80 +DBD::Pg -- 3.15.0 +DBI -- 1.643 +Ora2Pg -- 23.0 +Perl -- 5.16.3 +``` + +## 三、MTK安装 + +### 1.下载软件 + +``` +根据系统架构选择对应的版本 +wget https://cdn-mogdb.enmotech.com//mtk/v2.2.1/mtk_2.2.1_linux_arm64.tar.gz +tar -xf mtk_2.2.1_linux_arm64.tar.gz +``` + +### 2.申请license + +``` +生成license.json,即可使用 +[root@node151 mtk_2.2.1_linux_arm64]# ./mtk license gen +License File Not Found (default license.json) +许可证无效,开始申请 +✗ Email: █ +[root@node151 mtk_2.2.1_linux_arm64]# vi license.json +[root@node151 mtk_2.2.1_linux_arm64]# ll +总用量 33M +-rw-r--r-- 1 root root 29K 1月 21 10:09 CHANGELOG.md +drwxr-xr-x 2 root root 4.0K 1月 24 13:52 example +-rw-r--r-- 1 root root 531 1月 24 13:57 license.json +-rwxr-xr-x 1 root root 33M 1月 21 10:08 mtk +-rw-r--r-- 1 root root 2.1K 1月 11 16:51 README.md +``` + +## 四、Oracle准备数据 + +### 1、docker进入oracle + +``` +[root@ecs-1b06 ~]# docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +52dcc856bf99 lhrbest/oracle_11g_ee_lhr_11.2.0.4:1.0 "init" 36 minutes ago Up 36 minutes 0.0.0.0:1158->1158/tcp, 0.0.0.0:1521->1521/tcp, 0.0.0.0:222->22/tcp oracle +[root@ecs-1b06 ~]# docker exec -it 52dcc856bf99 bash +[root@oracle /]# su - oracle +[oracle@oracle ~]$ sqlplus /nolog +@> conn / as sysdba +Connected to an idle instance. +SYS@LHR11G> startup +ORACLE instance started. +SYS@LHR11G> create user test identified by test123; +User created. +SYS@LHR11G> grant dba to test; +Grant succeeded. +``` + +### 2、生成数据 + +``` +[oracle@oracle image]$ ll|wc -l +50000 +[oracle@oracle image]$ pwd +/home/oracle/image +[oracle@oracle image]$ du -sh . +7.3G . +[oracle@oracle ~]$ sqlplus / as sysdba +SYS@LHR11G> conn test/test123 +Connected. +SYS@LHR11G> create table testimg1(id int,photo blob); +SYS@LHR11G> create or replace directory imgpath as '/home/oracle/image'; +Directory created. +declare +l_blob blob; +l_bfile bfile; +begin +for i in 1..5000 loop +insert into testimg1(id,photo) +values(1,empty_blob()) +returning photo into l_blob; +l_bfile :=bfilename('IMGPATH',i||'.jpg'); +dbms_lob.fileopen(l_bfile); +dbms_lob.loadfromfile(l_blob,l_bfile,dbms_lob.getlength(l_bfile)); +dbms_lob.fileclose(l_bfile); +end loop; +commit; +end; + 16 / +PL/SQL procedure successfully completed. +.....省略生成数据的部分内容 +SQL> SELECT TABLE_NAME,NUM_ROWS FROM USER_TABLES; + +TABLE_NAME NUM_ROWS +------------------------------ ---------- +TESTIMG1 50000 +TESTIMG2 50000 +TESTIMG3 50000 +TESTIMG4 50000 +TESTIMG5 50000 +TESTIMG6 50000 +TESTIMG7 50000 +TESTIMG8 50000 +TESTIMG9 50000 +TESTIMG10 50000 +共循环生成25张表。每张表数据50000条 +``` + +## 四、总结 + +![394261643179237_.pic.jpg](../images/20220126-98542ae6-b0c2-48a2-b5fb-98f09414798b.jpg) + +![图片1.png](../images/20220126-ac5c9236-ce4f-4f3a-842f-0d8865c920e0.png) + +## 结论 + +从表格数据对比,ora2pg迁移带有BLOB字段的表性能略优于MTK。从安装部署来看,ora2pg的安装部署过于复杂,MTK的安装则非常简便。在对性能要求不那么严格的情况下,可以选择性能和ora2pg相差不多,部署方式简单的MTK进行数据迁移。 diff --git a/content/zh/post/zhangxb/gdb_opengauss.md b/content/zh/post/zhangxb/gdb_opengauss.md new file mode 100644 index 0000000000000000000000000000000000000000..80a26fd1c142cfb68ffc22ac500aedd42dfd4892 --- /dev/null +++ b/content/zh/post/zhangxb/gdb_opengauss.md @@ -0,0 +1,126 @@ ++++ +title = "openGauss使用gdb进行开发调试" +date = "2021-12-27" +tags = ["openGauss使用gdb进行开发调试"] +archives = "2021-12-27" +author = "zhangxb" +summary = "openGauss使用gdb进行开发调试" +img = "/zh/post/zhangxb/title/img.png" +times = "19:30" ++++ + +### 使用gdb工具对openGauss进行开发调试 + +#### 概述 + +本文简单介绍了下如何使用gdb工具,在开发过程中对opengauss源码进行编译和调试。 + +数据库的编译可以在root和子用户下,但是数据库运行只在在子用户下,因此我们如下的操作都在子用户下进行。 + +如下命令: +``` +groupadd omm +useradd -g omm -m omm +su - omm +mkdir -p /home/omm/ogcompile +cd /home/omm/ogcompile +``` + +#### 前提准备 + + +1. 下载openGauss源码到Linux服务器上面,此处以2.1.0版本为例。(推荐使用Centos7-x86_64,或者openEuler20.03LTS-arm,或openEuler20.03LTS-x86_64)。 + ``` + git clone https://gitee.com/opengauss/openGauss-server.git -b 2.1.0 + ``` + +2. 下载最新的三方库二进制包并解压。 + ``` + wget https://gitee.com/link?target=https%3A%2F%2Fopengauss.obs.cn-south-1.myhuaweicloud.com%2F2.1.0%2FopenGauss-third_party_binarylibs.tar.gz + tar -zxf openGauss-third_party_binarylibs.tar.gz + ``` + +3. 安装三方依赖 + ``` + sudo yum install libaio-devel flex bison ncurses-devel glibc-devel patch lsb_release readline-devel libaio-devel -y + ``` + + +#### 编译数据库 + +1. 导入环境变量 +``` +export CODE_BASE=/home/omm/ogcompile/openGauss-server +export BINARYLIBS=/home/omm/ogcompile/openGauss-third_party_binarylibs +export GAUSSHOME=$CODE_BASE/dest/ +export GCC_PATH=$BINARYLIBS/buildtools/centos7.6_x86_64/gcc7.3/ +export CC=$GCC_PATH/gcc/bin/gcc +export CXX=$GCC_PATH/gcc/bin/g++ +export LD_LIBRARY_PATH=$GAUSSHOME/lib:$GCC_PATH/gcc/lib64:$GCC_PATH/isl/lib:$GCC_PATH/mpc/lib/:$GCC_PATH/mpfr/lib/:$GCC_PATH/gmp/lib/:$LD_LIBRARY_PATH +export PATH=$GAUSSHOME/bin:$GCC_PATH/gcc/bin:$PATH +``` + + 如上环境变量中,`centos7.6_x86_64`为当前系统平台信息。以实际为准。可以通过在 `openGauss-server` 源码目录下执行命令 `sh src/get_PlatForm_str.sh` 查询。 + +2. 编译数据库 + + 导入上面一步中的环境变量之后,进入到openGauss-server源码目录下,执行下面几步进行编译数据库. + + 配置: + ``` + ./configure --gcc-version=7.3.0 CC=g++ CFLAGS='-O0' --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-debug --enable-cassert --enable-thread-safety --with-readline --without-zlib + ``` + + 编译: + ``` + make -sj10 + make install -sj + ``` + + 等待编译成功后,编译的结果在该环境变量所示路径中`export GAUSSHOME=$CODE_BASE/dest/`,即在`/home/omm/ogcompile/openGauss-server/dest`目录下。 + +#### 初始化和启动数据库 + +1. 初始化数据库目录 + + ``` + gs_initdb -D /home/omm/ogcompile/datanode/dn1 --nodename=single -w openGauss@123 + ``` + +2. 拉起数据库 + ``` + gs_ctl start -D /home/omm/ogcompile/datanode/dn1 + ``` + +#### 调试数据库 + +##### 调试数据库内核 + +1. 查看数据库实例进程 + ``` + ps -ef | grep gaussdb + ``` + 获取并记录进程号 + +2. 进入调试终端 + + ``` + gdb attach + ``` + 为第一步记录的进程号。 + +3. 常用gdb命令 + + (1) `b` 设置断点,例如 `b xlog.cpp:1021`,为xlog.cpp文件第1021行设置断点。 + + (2) `n` 单步运行,不进入函数内部 + + (3) `s` 单步运行,可以进入函数内部 + + (4) `p` 打印变量值。 如 `p var1` + + (5) `bt` 打印堆栈 + + (6) `info threads` 查看所有线程。openGauss属于一进程+多线程的模型,通过该命令查询存在线程。 + + (7) `q` 退出调试终端 \ No newline at end of file diff --git a/content/zh/post/zhangxb/images/dis-pkg.png b/content/zh/post/zhangxb/images/dis-pkg.png new file mode 100644 index 0000000000000000000000000000000000000000..a39ecb5b23e73911c6a3835252ea3ca17b843724 Binary files /dev/null and b/content/zh/post/zhangxb/images/dis-pkg.png differ diff --git "a/content/zh/post/zhangxb/openGauss\345\270\270\347\224\250gsql\345\221\275\344\273\244\351\233\206\345\220\210.md" "b/content/zh/post/zhangxb/openGauss\345\270\270\347\224\250gsql\345\221\275\344\273\244\351\233\206\345\220\210.md" new file mode 100644 index 0000000000000000000000000000000000000000..3daded4e33923532d5e4d43a6bec428e62008c23 --- /dev/null +++ "b/content/zh/post/zhangxb/openGauss\345\270\270\347\224\250gsql\345\221\275\344\273\244\351\233\206\345\220\210.md" @@ -0,0 +1,147 @@ ++++ +title = "openGauss常用gsql命令集合" +date = "2021-12-27" +tags = ["openGauss常用gsql命令集合"] +archives = "2021-12-27" +author = "zhangxb" +summary = "openGauss常用gsql命令集合" +img = "/zh/post/zhangxb/title/img.png" +times = "19:30" ++++ + +### 概述 + +openGauss与其他数据库一样,都遵标准的SQL规范。即其增删改查以及存储过程等语法与其他数据库基本相似。 + +有区别的点在与openGauss的gsql工具的使用,例如数据库查询、增加,表的查看等方面。 因此本章节重点介绍下gsql工具的常用命令。 + +### 命令介绍 + +1. 登录 + + 命令: `gsql -d postgres -p 5432 -r` \ + 说明: 该命令登录到数据库客户端, `-d postgres` 执行要链接的数据库。 `-p 5432` 链接数据库的端口。 `-r` 开启快捷键。 + +2. 退出 + + 命令: `\q` \ + 说明: 退出gsql客户端。 + +3. 查看数据库 + + 命令: `\l` `\l+` \ + 说明: 查看所有的数据库, `\l+`可以看每个库的空间大小。 + +4. 切换数据库 + + 命令: `\c dbname` \ + 说明: dbname为要切换的数据库名称。切换到指定的数据库里面。 + +5. 数据库增删 + + 创建数据库: `create database db_test;` \ + 删除数据库: `drop database db_test;` + +6. 查看所有表 + + 命令: `\d` `\d+` \ + 说明: 查看当前库下所有表。 `\d+`可以看到每个表占用空间大小。 + + 命令: `\d table_name` \ + 说明: 显示指定表的表结构 + +7. 查看表空间 + + 命令: `\db` \ + 说明: 查看所有表空间 + +8. 查看索引 + + 命令: `\di` \ + 说明: 查看所有索引 + +9. 查看所有用户 + + 命令: `\du` \ + 说明: 查看所有用户 + +10. 设置GUC参数 + + 命令: `\set KEY VALUE` \ + 说明: 设置参数,在当前会话生效。 例如 `\set schema public`设置当亲模式为public。 + + 命令: `ALTER SYSTEM SET parameter TO value;` \ + 说明: 此命令会将参数写入配置文件,永久生效。 例如 `ALTER SYSTEM SET PORT TO 8000;`设置端口为8000,写入到配置文件中。 + +11. 批量执行脚本 + + 命令: `gsql -d postgres -p 5432 -c "insert into t1 values(1)"` \ + 说明: 使用gsql执行一条sql语句. + + 命令: `gsql -d postgres -p 5432 -f /home/omm/test.sql` \ + 说明: 使用gsql命令,一次执行一个sql文件。执行到指定的postgres数据库中。 + +12. 系统表 + + 命令: `\dS` \ + 说明: 查看所有的系统表,其中每一个表都可以使用select语句查看详细信息。 + +#### 系统函数 + +1. 字符串 + + (1)字符串长度: `select char_length('hello word');` + + (2)字符串出现位置: `select position(substring in string)` 如 `select position('word' in 'helloword');` + + (3)逆序字符串: `select reverse('hello');` + + (4)拼接字符串: `select 'aaa' || 'aaa';` + + (5)转换为大写: `select upper('tom');` + + (6)转换为小写: `select lower('TOM');` + +2. 数字 + + (1)加、减、乘、除、取余 + ``` + select 10 + 2; + select 10 - 2; + select 10 * 2; + select 10 / 2; + select 10 % 3; + ``` + + (2)绝对值: `select abs(-5);` + + (3)阶乘: `select 10!;` + + (4)0-1之前随机数: `select random();` + + +3. 聚集函数 + + (1)所有输入行的和sum函数: `select sum(expression);` + + 例如 `select sum(id) from t1;` + + (2)输入行的最大值: `select max(expression);` + + 例如 `select max(id) from t1;` + + (3)输入行的最小值: `select min(expression);` + + 例如 `select min(id) from t1;` + + (4)输入行的平均值: `select avg(expression);` + + 例如 `select avg(id) from t1;` + + (5)输入行的总数: `select count(expression);` + + 例如 `select count(id) from t1;` + + (6)输入行的中位数: `select median(expression);` + + 例如 `select median(id) from t1;` \ No newline at end of file diff --git a/content/zh/post/zhangxb/title/img.png b/content/zh/post/zhangxb/title/img.png new file mode 100644 index 0000000000000000000000000000000000000000..65e2d4c4751f069c64357704715e2ba99beb511a Binary files /dev/null and b/content/zh/post/zhangxb/title/img.png differ diff --git "a/content/zh/post/zhangxb/\345\256\211\350\243\205\345\215\225\346\234\272\347\211\210openGauss\346\225\260\346\215\256\345\272\223.md" "b/content/zh/post/zhangxb/\345\256\211\350\243\205\345\215\225\346\234\272\347\211\210openGauss\346\225\260\346\215\256\345\272\223.md" new file mode 100644 index 0000000000000000000000000000000000000000..14c4d382e2b8fcd1052728d04c78b9109835f94a --- /dev/null +++ "b/content/zh/post/zhangxb/\345\256\211\350\243\205\345\215\225\346\234\272\347\211\210openGauss\346\225\260\346\215\256\345\272\223.md" @@ -0,0 +1,180 @@ ++++ +title = "安装单机版openGauss数据库" +date = "2021-12-27" +tags = ["安装单机版openGauss数据库"] +archives = "2021-12-27" +author = "zhangxb" +summary = "安装单机版openGauss数据库" +img = "/zh/post/zhangxb/title/img.png" +times = "19:30" ++++ + +### 安装单机版数据库 + +#### 概述 + +本章节介绍安装一个单实例版本的openGauss数据库。 + +#### 前提准备 + +1. 请自行准备Centos7.8_x86_64平台的Linux操作系统。(centos7.8_x86镜像下载地址:https://mirrors.huaweicloud.com/centos-vault/7.8.2003/isos/x86_64/CentOS-7-x86_64-DVD-2003.iso +) + +2. 下载openGauss在Centos平台下的软件包:https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.1.0/x86/openGauss-2.1.0-CentOS-64bit-all.tar.gz + +#### 上传软件包 + +1. 在Linux系统下,创建目录来放软件包 +``` +mkdir -p /opt/omm/ +``` +2. 通过ftp等工具,将openGauss-2.1.0-CentOS-64bit-all.tar.gz包放到/opt/omm目录下 + +#### 创建xml配置文件 + +进入到 /opt/omm 路径下, `cd /opt/omm/` + +`vi clusterconfig.xml` + +将下面的配置修改后,粘贴到进去。 + +需要修改的配置项: +ecs-6ac8 -> 改为当前服务器实际的名称,可以用 hostname 命令查看。 +192.168.0.2 -> 配置为当前服务器的Ip地址,使用 ifconfig 名称查看。 +``` + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +#### 解压软件包 + +先解压总包: +``` +tar -zxf openGauss-2.1.0-CentOS-64bit-all.tar.gz +``` + +在解压OM包: +``` +tar -zxf openGauss-2.1.0-CentOS-64bit-om.tar.gz +``` + +解压完成后,总体的文件如下: +![](../images/dis-pkg.png) + +#### 预安装数据库 + +预安装会安装上面配置的xml文件,进行创建规划目录、用户、校验操作系统等步骤,需要在root用户下执行。 + +1. 设置目录权限 + +``` +chmod -R 750 /opt/omm/ +``` + +2. 安装依赖 + +``` +yum install libaio-devel -y +``` + +3. 进入到 script目录下,预安装数据库 + +``` +cd /opt/omm/script +./gs_preinstall -U omm -G omm -X /opt/omm/clusterconfig.xml --sep-env-file=/home/omm/envfile +``` + +预安装过程中,遇到: + +Are you sure you want to create the user[omm] and create trust for it (yes/no)? 请输入yes,这里会创建omm数据库管理用户。 + +Please enter password for cluster user. +Password: +请输入为创建的omm用户设置的密码,例如openGauss@123 +下一步再输入一遍进行确认。 + +等待预安装过程完成。 + +#### 安装数据库 + +在预安装数据库完成后,进行安装数据库操作。数据库的安装需要在omm用户下执行。 + +1. 切换到omm用户,并导入环境变量 + +``` +su - omm +source /home/omm/envfile +``` + +2. 执行数据库安装命令 + +``` +gs_install -X /opt/omm/clusterconfig.xml +``` + +在安装过程中,需要设置数据库的管理密码。 +(密码要求长度大于8位,并至少包含数字、字母、特殊字符在内的三种类型。如openGauss@123 ) + +Please enter password for database: 请输入数据库密码 +Please repeat for database: 请再输入一次数据库密码 + +等待安装完成。 + +#### 数据库使用 + +1. 状态查询 + ``` + gs_om -t status --detail + ``` + +2. 数据库启动、停止 + + 停止: `gs_om -t stop` + 启动:`gs_om -t start` + +3. 登录数据库进行sql操作 + ``` + gsql -d postgres -p 2000 -r + ``` + + 创建数据库: `create database test_db;` + + 切换到test_db数据库: `\c test_db` + + 创建表:`create table students(id int, name varchar, age int);` + + 插入数据: + ``` + insert into students values(1000, 'xiaoming', 22); + insert into students values(1001, 'liqiang', 23); + insert into students values(1002, 'zhanghua', 21); + ``` + + 查询所有人员: + ``` + select * from students; + ``` diff --git "a/content/zh/post/zhangzhijing/images/Dashboard\346\230\276\347\244\272.png" "b/content/zh/post/zhangzhijing/images/Dashboard\346\230\276\347\244\272.png" new file mode 100644 index 0000000000000000000000000000000000000000..f363208d2bee2c4d100538245083eaf230a51b74 Binary files /dev/null and "b/content/zh/post/zhangzhijing/images/Dashboard\346\230\276\347\244\272.png" differ diff --git a/content/zh/post/zhangzhijing/images/opengauss-export.png b/content/zh/post/zhangzhijing/images/opengauss-export.png new file mode 100644 index 0000000000000000000000000000000000000000..0a39a1e0c0434eb8e66c532f50746d728ab9a953 Binary files /dev/null and b/content/zh/post/zhangzhijing/images/opengauss-export.png differ diff --git a/content/zh/post/zhangzhijing/images/pushGateway.png b/content/zh/post/zhangzhijing/images/pushGateway.png new file mode 100644 index 0000000000000000000000000000000000000000..78f2ea2b71ded8e4ac93b51a2dde79b02955e510 Binary files /dev/null and b/content/zh/post/zhangzhijing/images/pushGateway.png differ diff --git a/content/zh/post/zhangzhijing/images/tpmC.png b/content/zh/post/zhangzhijing/images/tpmC.png new file mode 100644 index 0000000000000000000000000000000000000000..6ebd268258746c3b6e50a5bef391e50cc128bb38 Binary files /dev/null and b/content/zh/post/zhangzhijing/images/tpmC.png differ diff --git "a/content/zh/post/zhangzhijing/images/\346\225\260\346\215\256\346\272\220\347\232\204\351\205\215\347\275\256.png" "b/content/zh/post/zhangzhijing/images/\346\225\260\346\215\256\346\272\220\347\232\204\351\205\215\347\275\256.png" new file mode 100644 index 0000000000000000000000000000000000000000..f562593fcba9f50287cbbf974ba151f9045673f9 Binary files /dev/null and "b/content/zh/post/zhangzhijing/images/\346\225\260\346\215\256\346\272\220\347\232\204\351\205\215\347\275\256.png" differ diff --git "a/content/zh/post/zhangzhijing/images/\346\231\256\347\275\227\347\261\263\344\277\256\346\226\257\345\220\257\345\212\250\346\225\210\346\236\234.png" "b/content/zh/post/zhangzhijing/images/\346\231\256\347\275\227\347\261\263\344\277\256\346\226\257\345\220\257\345\212\250\346\225\210\346\236\234.png" new file mode 100644 index 0000000000000000000000000000000000000000..7816baf3787e53675849fa39c7e57f7d8dee6457 Binary files /dev/null and "b/content/zh/post/zhangzhijing/images/\346\231\256\347\275\227\347\261\263\344\277\256\346\226\257\345\220\257\345\212\250\346\225\210\346\236\234.png" differ diff --git "a/content/zh/post/zhangzhijing/\346\231\256\347\275\227\347\261\263\344\277\256\346\226\257\347\233\221\346\216\247openGauss.md" "b/content/zh/post/zhangzhijing/\346\231\256\347\275\227\347\261\263\344\277\256\346\226\257\347\233\221\346\216\247openGauss.md" new file mode 100644 index 0000000000000000000000000000000000000000..37962274eb38712f6fc4c7f081585fc126f77284 --- /dev/null +++ "b/content/zh/post/zhangzhijing/\346\231\256\347\275\227\347\261\263\344\277\256\346\226\257\347\233\221\346\216\247openGauss.md" @@ -0,0 +1,169 @@ ++++ +title = "普罗米修斯监控openGauss" +date = "2021-04-08" +tags = ["普罗米修斯监控openGauss"] +archives = "2021-04" +author = "zhangzhijing" +summary = "普罗米修斯监控openGauss" +img = "/zh/post/xingchen/title/img1.png" +times = "20:30" ++++ + +# 普罗米修斯监控openGauss + +## 1、前期准备 + +### 1.1项目依赖链接 + +| 依赖开源工程 | 工程链接 | +| ---------------- | ---------------------------------------------- | +| 普罗米修斯 | https://github.com/prometheus/pushgateway | +| opengauss_export | https://gitee.com/opengauss/openGauss-prometheus-exporter | +| pushgateway | https://github.com/prometheus/pushgateway | +| openGauss | https://gitee.com/opengauss/openGauss-server | + + + +### 1.2 工具包准备 + +| 系统环境 | 工具版本 | +| ------------- | ------------------------------------------------------------ | +| ARM+openEuler | prometheus-2.21.0.linux-arm64.tar.gz、pushgateway-1.2.0.linux-arm64.tar.gz、grafana-7.1.5-1.aarch64.rpm | +| X86+openEuler | prometheus-2.21.0.linux-amd64.tar.gz、pushgateway-1.3.0.linux-amd64.tar.gz、grafana-7.2.1-1.x86_64.rpm | + + + +## 2、监控系统安装部署 + +### 2.1 tpmC采集和发送 + +```python +''' +功能描述:在benchmarksql工具测试过程的输出信息通过tee命令 固定输出到/tmp/tpcc.log(便于在不同环境上部署),采集函数从/tmp/tpcc.log文件获取到tpmC值,发送函数将值发送到pushgateway服务。 +''' +#日志信息采集并转换为tpmC +def collect_tpmc(): + log_file = "/tmp/tpcc.log" + cmd = "tail -1 %s > /home/tpmc;awk -F ':' '{print $(NF-2)}' /home/tpmc | awk '{print $1}'" % log_file + tpmc = os.popen(cmd).read().strip() + tpmc = float(tpmc) * 0.45 + count_tpmc = "count_tpmc{count_tpmc=\"count_tpmc\"} " + str(tpmc) + "\n" + print("count_tpmc : %s" %count_tpmc) + return count_tpmc +#向pushgateway发送采集到的数据 +def send_data(data_type, node): + if data_type == "cpu": + send_cmd = "cat {file_cpu} | curl --data-binary @- pushgateway_ip:port/metrics/job/{node}/instance/{data_type}".format( + file_cpu=file_cpu, node=node, data_type=data_type) + os.popen(send_cmd) +``` + +### 2.2 pushgateway的使用 + +``` +cd pushgateway-1.2.0.linux-arm64 +./pushgateway +``` + +​ 网页显示效果如下 + +![pushGateway](../images/pushGateway.png) + + + +### 2.3 opengauss_exporter的使用 + +``` +GO GET配置 +git config --global http.proxy http://域账号:密码 +git config --global https.proxy https://域账号:密码 +git config --global http.sslverify false +git config --global https.sslverify false +GO MODULE配置 +export GO111MODULE=on +export GOPROXY=http://***/ +export GONOSUMDB=* + +配置数据库白名单 +host all all ip md5 + +配置启动opengauss_exporter +cd opengauss_exporter-master +make build +export DATA_SOURCE_NAME="postgresql://tpcc:******@ip:port/tpcc?sslmode=disable" +./bin/opengauss_exporter --config="og_exporter_default.yaml" +注:默认端口为9187 +``` + +​ 启动效果 + +![1616723392175](../images/opengauss-export.png) + +### 2.4 prometheus的使用 + +```yml +prometheus.yml 文件配置参考 +scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'prometheus' + + # metrics_path defaults to '/metrics' + # scheme defaults to 'http'. + + static_configs: + - targets: ['prometheus_ip:9090'] + + - job_name: 'pushgateway' + static_configs: + - targets: ['pushgateway_ip:9091'] + labels: + instance: pushgateway + + - job_name: 'opgs_report' + static_configs: + - targets: ['opgs_report:9187'] + labels: + instance: opgs_report + + #启动方式 + ./prometheus +``` + +​ 启动效果 + +![1616723796627](../images/普罗米修斯启动效果.png) + + + + + +### 2.5 grafana的使用 + +``` +yum install grafana-7.1.5-1.aarch64.rpm +service grafana-server start + +网访问prometheus_ip:3000 +初始用户名密码均为admin +``` + +​ 数据源的配置 + +![1616724507663](../images/数据源的配置.png) + +​ Dashboard的导入 + +``` +openGauss_exporter工程 界面模板路径如下 +opengauss_exporter-master\opengauss_exporter-master\dashboard\ +模板也可根据自己需求灵活配置 +``` + +​ 界面效果如下 + +![1616724769130](../images/Dashboard显示.png) + +​ tpmC显示panel的添加 + +![1616724930085](../images/tpmC.png) + diff --git "a/content/zh/post/zhaoyanliang/openGauss\345\255\246\344\271\240\344\270\200-centos\345\256\211\350\243\205.md" "b/content/zh/post/zhaoyanliang/openGauss\345\255\246\344\271\240\344\270\200-centos\345\256\211\350\243\205.md" new file mode 100644 index 0000000000000000000000000000000000000000..4d6aafadc42b55d3cf6d90ee888e6b98fb715158 --- /dev/null +++ "b/content/zh/post/zhaoyanliang/openGauss\345\255\246\344\271\240\344\270\200-centos\345\256\211\350\243\205.md" @@ -0,0 +1,286 @@ ++++ +title = "openGauss学习(一)--centos安装" +date = "2021-11-30" +tags = ["openGauss社区开发入门"] +archives = "2021-11-30" +author = "zhaoyanliang" +summary = "openGauss社区开发入门" +times = "13:30" + ++++ + + + +在centos上安装opengauss教程 + + + +#### 一、opengauss介绍 + +openGauss是一款开源关系型数据库管理系统,采用木兰宽松许可证v2发行。openGauss早期版本内核源自PostgreSQL,深度融合华为在数据库领域多年的经验,结合企业级场景需求,持续构建竞争力特性。 + +openGauss目前支持在centos及openEuler系统上运行 + + + +#### 二、centos安装教程 + +1. ##### 环境配置 + + VMware Workstation Pro虚拟机软件,centos7.9(最好是7.6及以上版本,因为之后会手动修改版本号到7.6,如果7.6以下版本可能不能向上兼容) + + + +2. ##### 虚拟机软件VMware Workstation Pro安装 + + 下载链接:[下载 VMware Workstation Pro | CN](https://www.vmware.com/cn/products/workstation-pro/workstation-pro-evaluation.html) + + 该软件安装较为简单,按照普通软件安装即可,如遇问题网上也有很多教程 + + + +3. ##### centos镜像下载 + + 我使用的是校园网,可以直接到清华源、中科大源等网站下载;如果是非校园网,到官网下载速度偏慢 + + 清华源链接:[清华大学开源软件镜像站 | Tsinghua Open Source Mirror](https://mirrors.tuna.tsinghua.edu.cn/) + + + + 步骤一:点击“获取下载链接” + + ![image-20211011110017529](../typora-user-images/image-20211011110017529.png) + + + + 步骤二:选择centos及版本号为7的DVD镜像文件,点击即可下载(下载为7.9版本) + + ![image-20211011110243320](../typora-user-images/image-20211011110243320.png) + + + +4. ##### centos虚拟机配置 + + 1. 启动VMware Workstation Pro,点击创建新的虚拟机 + + ![image-20211011110532504](../typora-user-images/image-20211011110532504.png) + + + + 2. 选择“自定义”,点击下一步 + + ![image-20211011111105402](../typora-user-images/image-20211011111105402.png) + + + + 3. 保持默认,点击下一步 + + ![image-20211011111152764](../typora-user-images/image-20211011111152764.png) + + + + 4. 选择“稍后安装” + + ![image-20211011112527795](../typora-user-images/image-20211011112527795.png) + + + + 5. 选择图中选项 + + ![image-20211011112604966](../typora-user-images/image-20211011112604966.png) + + + + 6. 给虚拟机随便起个名称和选择安装位置,均可自定义 + + ![image-20211011112641119](../typora-user-images/image-20211011112641119.png) + + + + 7. 配置处理器,图中为我的设置,可根据自己电脑性能配置,如果决定不妥后面可在虚拟机设置更改 + + ![image-20211011112912990](../typora-user-images/image-20211011112912990.png) + + + + 8. 设置内存大小(建议保持默认推荐的设置) + + ![image-20211011113011613](../typora-user-images/image-20211011113011613.png) + + + + 9. 选择网络类型,这里选择“网络地址转换” + + ![image-20211011114156879](../typora-user-images/image-20211011114156879.png) + + + + 10. 以下几步保持默认 + + ![image-20211011114220394](../typora-user-images/image-20211011114220394.png) + + ![image-20211011114251134](../typora-user-images/image-20211011114251134.png) + + ![image-20211011114306010](../typora-user-images/image-20211011114306010.png) + + + + 11. 磁盘分配,选择“拆分多个文件”,磁盘容量建议保持默认 + + ![image-20211011114348393](../typora-user-images/image-20211011114348393.png) + + + + 12. 以下保持默认,之后点击“完成”即可 + + ![image-20211011114445675](../typora-user-images/image-20211011114445675.png) + + + + 13. 点击“编辑虚拟机设置” + + ![image-20211011114558025](../typora-user-images/image-20211011114558025.png) + + + + 14. 选择镜像iso文件 + + ![image-20211011114657689](../typora-user-images/image-20211011114657689.png) + + + + 15. 移除打印机这个不存在的设备,之后点击“确定”保存 + + ![image-20211011114813694](../typora-user-images/image-20211011114813694.png) + + 16. 添加第二张网卡和修改模式(重要步骤) + + ![image-20211103160714362](../typora-user-images/image-20211103160714362.png) + + ![image-20211103160845726](../typora-user-images/image-20211103160845726.png) + + 17. 启动centos + + 启动安装第一界面,直接按下“Enter“键后就会进入自检界面。 + + ​ ![image-20211011120425966](../typora-user-images/image-20211011120425966.png) + + 在自检界面按下“Esc“键跳过自检,然后进入如下界面 + + 18. 选择语言 + + ![image-20211011120501686](../typora-user-images/image-20211011120501686.png) + + + + 19. 选择安装位置进行分区 + + ![image-20211011120618314](../typora-user-images/image-20211011120618314.png) + + ![image-20211011120648928](../typora-user-images/image-20211011120648928.png) + + 20. 手动配置分区,下拉选择标准分区 + + ![image-20211011120857291](../typora-user-images/image-20211011120857291.png) + + 点击“点这里创建它们” + + ![image-20211011120945745](../typora-user-images/image-20211011120945745.png) + + ![image-20211011121025609](../typora-user-images/image-20211011121025609.png) + + 接受更改 + + 21. 在安装信息摘要页面,点击“网络和主机名“进行网络和主机名设置,具体如下: + + ![image-20211011121053125](../typora-user-images/image-20211011121053125.png) + + 22. 安装信息摘要页面,点击“网络和主机名“进行网络和主机名设置,具体如下: + + 选择第一张网卡: + + ![image-20211103162822965](../typora-user-images/image-20211103162822965.png) + + 如以太网(enpOs3)网卡,先点击“关闭“边上的按钮把网卡打开。设置主机名(如:db1),并点击“应用(A)”,然后点击“配置“。 + + ​ ![image-20211103163644877](../typora-user-images/image-20211103163644877.png) + + 说明:设置主机名时一定要注意,如果在同一网段内有多位学员按此文档来安装,请尽量把主机名设成不一样 + + ![image-20211103163927741](../typora-user-images/image-20211103163927741.png) + + 在配置页中,选择“常规“,然后勾选”可用时自动链接到这个网络“,接着点击”保存“。 + + 接着照着第一张网卡设置进行第二张网卡的设置: + + ![image-20211103164056986](../typora-user-images/image-20211103164056986.png) + + ![image-20211103164127187](../typora-user-images/image-20211103164127187.png) + + 点击完成进行保存: + + ![image-20211103165558378](../typora-user-images/image-20211103165558378.png) + + 23. 在安装信息摘要页面,点击“软件选择 “进行软件安装设置,具体如下: + + 1. 2. ![image-20211103165736561](../typora-user-images/image-20211103165736561.png) + + 在此页面选择“GNOME桌面“,并在右边勾选”GNOME应用程序“、”开发工具“、”安全性工具“、”系统管理工具“。然后点击完成。 + + ​ ![image-20211103170212856](../typora-user-images/image-20211103170212856.png) + + 点击“开始安装”: + + ![image-20211103170442514](../typora-user-images/image-20211103170442514.png) + + + + 24. 安装界面设置: + + ![image-20211103171515916](../typora-user-images/image-20211103171515916.png) + + 点击“ROOT密码“,给ROOT用户设置密码(如:openGauss@123)。 + + ![image-20211103171533677](../typora-user-images/image-20211103171533677.png) + + 点击“创建用户“,在此新创建一个用户(如:用户test,密码openGauss@123),具体如下: + + ​ ![image-20211103171544484](../typora-user-images/image-20211103171544484.png) + + ​ + + 点击“完成配置“,系统安装中,等待数分钟后会出现如下界面: + + ​ ![image-20211103171600132](../typora-user-images/image-20211103171600132.png) + + 出现此界面表示,系统安装完成,然后点击“重启“。 + + ![image-20211103171632754](../typora-user-images/image-20211103171632754.png) + + 25. 接受许可证 + + ![image-20211103172432749](../typora-user-images/image-20211103172432749.png) + + 点击完成配置: + + ![image-20211103172510280](../typora-user-images/image-20211103172510280.png) + + 26. 点击用户和输入密码进入系统: + + ![image-20211103172630663](../typora-user-images/image-20211103172630663.png) + + 其他的有一些语言设置,直接选择和跳过就好,不赘诉。 + + ![image-20211103172944323](../typora-user-images/image-20211103172944323.png) + + 27. 在Linux操作系统上,通过ifconfig来查看二张网卡是否都正常启动,具体如下: + + ![image-20211103173057702](../typora-user-images/image-20211103173057702.png) + + 通过ping baidu.com确认是否能上网,具体如下: + + ![image-20211103173156715](../typora-user-images/image-20211103173156715.png) + + 出现上述页面则一切正常(按ctrl+c可停止)。 + +至此,centos完全安装完成,运行opengauss的环境全部配置完成 \ No newline at end of file diff --git "a/content/zh/post/zhaoyanliang/openGauss\345\255\246\344\271\240\344\272\214-openGauss\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205.md" "b/content/zh/post/zhaoyanliang/openGauss\345\255\246\344\271\240\344\272\214-openGauss\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205.md" new file mode 100644 index 0000000000000000000000000000000000000000..fd86e78bf386ae9cc74352cfa3edf89c76ba4567 --- /dev/null +++ "b/content/zh/post/zhaoyanliang/openGauss\345\255\246\344\271\240\344\272\214-openGauss\346\225\260\346\215\256\345\272\223\345\256\211\350\243\205.md" @@ -0,0 +1,310 @@ ++++ + +title = "openGauss学习(二)openGauss数据库安装" +date = "2021-11-30" +tags = ["openGauss社区开发入门"] +archives = "2021-11" +author = "zhaoyanliang" +summary = "openGauss社区开发入门" +times = "13:30" + ++++ + +## opengauss安装教程(二) + +前面我们已经完成了虚拟机centos的安装和环境配置,接下来我们要进入opengauss的安装了 + + + +#### 一、操作系统环境准备 + +1. **修改系统版本** + + + + 先使用su指令切换到root用户: + + ![image-20211108162752916](../typora-user-images/image-20211108162752916.png) + + 如果CentOS版本不是7.6的需要进行修改,如果是7.6则无需修改, + + 先vi /etc/redhat-releas 打开编辑文件,然后将内容改为CentOS Linux release 7.6.2003 (Core)。输入”i”切换到编辑模式,移动鼠标到修改位置修改内容,然后按下ESC键退出编辑模式,然后输入”:wq”退出并进行保存,具体如下: + + ![image-20211110161157466](../typora-user-images/image-20211110161157466.png) + +2. **关闭防火墙** + +​ 执行以下二个命令将防火墙关闭, + +​ systemctl stop firewalld.service + +​ systemctl disable firewalld.service,具体如下 + +​ ![image-20211108163254936](../typora-user-images/image-20211108163254936.png) + +3. **设置字符集及环境变量** + + + + ![image-20211108163954650](../typora-user-images/image-20211108163954650.png) + + 验证变量是否生效: + + ![image-20211108165054078](../typora-user-images/image-20211108165054078.png) + + + +4. **关闭swap内存** + + swapoff -a + + ![image-20211108165558448](../typora-user-images/image-20211108165558448.png) + +5. **准备yum环境** + + 备份原有的yum配置文件: + + mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bak + + + + ![image-20211108165833119](../typora-user-images/image-20211108165833119.png) + + + + 下载可用源的repo文件,可通过以下二种方式下载: + + 方式一: + + curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo + + 方式二: + + curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.huaweicloud.com/repository/conf/CentOS-7-anon.repo + + 如图: + + ![image-20211108170027609](../typora-user-images/image-20211108170027609.png) + + 查看repo文件内容是否正确,如果显示的内容不正确,请选择另一种方式下载可用源的repo文件。 + + ![image-20211108170353485](../typora-user-images/image-20211108170353485.png) + + + +6. **yum安装相关包。** + + 3. 执行以下命令,安装所需的包 + + yum install -y libaio-devel flex bison ncurses-devel glibc.devel patch lsb_release wget python3 + + 如下: + + ![image-20211110164431185](../typora-user-images/image-20211110164431185.png) + + 此处可能你会出错:-bash: /usr/bin/yum: /usr/bin/python: bad interpreter: No such file or directory + + 因为我也出错了。。。。 + + 解决方法: + + [-bash: /usr/bin/yum: /usr/bin/python: bad interpreter: No such file or directory_weixin_38169359的博客-CSDN博客](https://blog.csdn.net/weixin_38169359/article/details/101292719) + + 根据你的路径决定修改后python数字是2.4还是2.7亦或是其它哦(我是2.7) + +7. **设置默认Python版本为3.x。** + + ![image-20211110164207128](../typora-user-images/image-20211110164207128.png) + + 修改完成后,需要确认yum是否能使用,如果不能使用需要修改/usr/bin/yum文件,把#!/usr/bin/python这行修改为#!/usr/bin/python2.7(或者对应的python 2.x的版本)。输入”i”切换到编辑模式,移动鼠标到修改位置修改内容,然后按下ESC键退出编辑模式,然后输入”:wq”退出并进行保存。如下: + + ![image-20211110164540792](../typora-user-images/image-20211110164540792.png) + + 用yum --help命令来验证yum是否能使用: + + ![image-20211110164642691](../typora-user-images/image-20211110164642691.png) + +8. **创建数据库存放安装目录:** + + ![image-20211110164843604](../typora-user-images/image-20211110164843604.png) + +9. **下载数据库安装包** + + ![image-20211110164953102](../typora-user-images/image-20211110164953102.png) + + + +#### 二、安装opengauss数据库 + +1. **创建XML配置文件,用于数据库安装** + + ![image-20211110165208487](../typora-user-images/image-20211110165208487.png) + + 将以下内容添加进clusterconfig.xml文件中。输入”i”切换到编辑模式,复制内容黏贴到文档中,然后按下ESC键退出编辑模式,然后输入”:wq”退出并进行保存。 + + ![image-20211110165401524](../typora-user-images/image-20211110165401524.png) + + ``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ``` + + 说明:其中标红的内容,需要根据自己实际的IP和主机名进行修改,如果其中的中文出现乱码时可以删除这些行。 + +2. **将下载好的安装包解压至存放目录** + + 先解压openGauss-1.1.0-CentOS-64bit-all.tar.gz包 + + ![image-20211110170430428](../typora-user-images/image-20211110170430428.png) + + 再先解压openGauss-1.1.0-CentOS-64bit-om.tar.gz包。 + + ![image-20211110170638586](../typora-user-images/image-20211110170638586.png) + + 解压后如下,用ls命令查看如下: + + ![image-20211110170721864](../typora-user-images/image-20211110170721864.png) + + 安装包解压后,会在/opt/software/openGauss路径下自动生成script子目录,并且在script目录下生成gs_preinstall等各种OM工具脚本。 + + 更改权限。 + + ![image-20211110170828835](../typora-user-images/image-20211110170828835.png) + +3. **执行初始化脚本** + + ![image-20211110171525896](../typora-user-images/image-20211110171525896.png) + + 期间需要输入操作系统root用户的密码(如密码:openGauss@123)和创建操作系统omm用户及设置密码(如密码:openGauss@123)。密码依然不回显,直接输入密码并回车即可。 + + 当返回Preinstallation succeeded内容时,表明初始化完成。 + +4. **初始化数据库。** + + 用init 6 重启下虚拟机(主要是为了释放一些内存资源)。 + + ![image-20211110171651494](../typora-user-images/image-20211110171651494.png) + + 更新权限: + + ![image-20211110173502716](../typora-user-images/image-20211110173502716.png) + + 然后使用omm用户进行数据库初始化。 + + 注意:根据用户实际内存大小设置对应的共享内存的大小,如果对该参数进行了设置,会在数据库启动时候报错,本实验虚拟机总内存大小是2G。 + + gs_install -X /opt/software/openGauss/clusterconfig.xml --gsinit-parameter="--encoding=UTF8" --dn-guc="max_process_memory=**2GB**" --dn-guc="shared_buffers=**128MB**" --dn-guc="bulk_write_ring_size=**128MB**" --dn-guc="cstore_buffers=**16MB**" + + 具体如下: + + ![image-20211110173619641](../typora-user-images/image-20211110173619641.png) + + (我已经安装过一遍,和你的页面可能不太一样) + +5. **清理软件安装包** + + ![image-20211110174019749](../typora-user-images/image-20211110174019749.png) + + (我同样已经删过一遍) + +#### 三、数据库基础使用 + + 1. **切换用户到omm:** + + ![image-20211114002741385](../typora-user-images/image-20211114002741385.png) + + 2. **启动服务** + + 启动服务命令:**gs_om -t start** + + ![image-20211114002839042](../typora-user-images/image-20211114002839042.png) + + 3. **连接数据库** + + 连接指令:**gsql -d postgres -p 26000 -r** + + 当结果显示为如下信息,则表示连接成功。 + + ![image-20211114003113627](../typora-user-images/image-20211114003113627.png) + + 其中,postgres为openGauss安装完成后默认生成的数据库。初始可以连接到此数据库进行新数据库的创建。26000为数据库主节点的端口号,需根据openGauss的实际情况做替换,请确认连接信息获取。 + + **引申信息:** + + 使用数据库前,需先使用客户端程序或工具连接到数据库,然后就可以通过客户端程序或工具执行SQL来使用数据库了。gsql是openGauss数据库提供的命令行方式的数据库连接工具。 + + 4. **第一次连接数据库时,需要先修改omm用户密码,新密码修改为Bigdata@123(建议用户自定义密码)** + + + **alter role omm identified by *'Bigdata@123*' replace *'openGauss@123'*;** + + 显示“ALTER ROLE”则成功 + +5. **创建数据库用户**。 + + 默认只有openGauss安装时创建的管理员用户可以访问初始数据库,您还可以创建其他数据库用户帐号。 + + 指令:**CREATE USER joe WITH PASSWORD "Bigdata@123";** + + ![image-20211114004255316](../typora-user-images/image-20211114004255316.png) + + 如上创建了一个用户名为joe,密码为Bigdata@123的用户。 + +6. **创建数据库。** + + 指令:**CREATE DATABASE db_tpcc OWNER joe;** + + ![image-20211114004314410](../typora-user-images/image-20211114004314410.png) + + 退出数据库: + + ![image-20211114004408389](../typora-user-images/image-20211114004408389.png) + + 使用新用户连接到此数据库: + + 指令: **gsql -d db_tpcc -p 26000 -U joe -W Bigdata@123 -r** + + 显示如下内容表示成功: + + ![image-20211114004517198](../typora-user-images/image-20211114004517198.png) + + + +**至此,opengauss数据库安装全部完成** + + + + + +​ + diff --git "a/content/zh/post/zhaoyanliang/openGauss\346\272\220\347\240\201\345\255\246\344\271\240--SQL\350\247\243\346\236\220\346\250\241\345\235\227.md" "b/content/zh/post/zhaoyanliang/openGauss\346\272\220\347\240\201\345\255\246\344\271\240--SQL\350\247\243\346\236\220\346\250\241\345\235\227.md" new file mode 100644 index 0000000000000000000000000000000000000000..5f10a22925c4b446d0bbdf9c9fa79a2f4169ab1d --- /dev/null +++ "b/content/zh/post/zhaoyanliang/openGauss\346\272\220\347\240\201\345\255\246\344\271\240--SQL\350\247\243\346\236\220\346\250\241\345\235\227.md" @@ -0,0 +1,259 @@ ++++ + +title = "openGauss源码学习--SQL解析模块" +date = "2021-11-30" +tags = ["openGauss社区开发入门"] +archives = "2021-11" +author = "zhaoyanliang" +summary = "openGauss社区开发入门" +times = "13:30" + ++++ + +## openGauss源码解析 ------ SQL语句解析模块 + + + +#### 一、概述 + +openGauss数据库是华为深度融合在数据库领域多年经验,结合企业级场景要求推出的新一代企业级开源数据库。openGauss是关系型数据库,采用客户端/服务器,单进程多线程架构;支持单机和一主多备部署方式,同时支持备机可读、双机高可用等特性。 + +openGauss是基于postgresql数据库开发的。 + +开源地址:[openGauss/openGauss-server - 码云 - 开源中国 (gitee.com)](https://gitee.com/opengauss/openGauss-server?_from=gitee_search) + + + +#### 二、SQL解析 + +数据库的SQL引擎作为SQL解析模块是数据库重要的子系统之一,它对上负责承接应用程序发送的SQL语句,对下负责指挥执行器运行执行计划,是整个数据库的第一个执行的模块。具体而言,就是讲用户输入的SQL语句转换为具体的能被机器识别的要求从而被执行,类似与各种编程语言的编译器。 + +完整过程: + +![img](https://img-blog.csdnimg.cn/d31a28994415490dbaec2fde6d09ec1e.png#pic_center) + +本文主要讲解前两部分:词法解析和语法解析。 + +源码文件夹:/src/common/backend/parser + + + +#### 三、SQL解析总体功能 + +1. 当openGauss的后台服务进程openGuass收到前台发来的查询语句后,首先将其传递到查询分析模块,进行词法分析,语法分析和语义分析。 +2. 若是功能性命令(例如create table,create user和backup命令等)则将其分配到功能性命令处理模块; +3. 对于查询处理命令(SELECT/INSERT/DELETE/UPDATE)则为其构建查询语法树,交给查询重写模块。 + +总的来说流程如下: + +SQL命令 --(词法和语法分析)--> 分析树 --(语义分析)--> 查询树 + +在代码里的调用路径如下(方框内为函数,数字显示了调用顺序) + +![img](https://images2015.cnblogs.com/blog/579102/201611/579102-20161108231520936-605686873.png) + +#### 四、源码文件及作用: + +| parser.cpp | **解析主程序** | +| ----------------------- | ---------------------------------------- | +| scan.l | 词法分析,分解查询成token | +| scansup.cpp | 处理查询语句转义符 | +| kwlookup.cpp | 将关键词转化为具体的token | +| keywords.cpp | 标准关键词列表 | +| analyze.cpp | 语义分析 | +| gram.y | 语法分析,解析查询tokens并产生原始解析树 | +| parse_agg.cpp | 处理聚集操作,比如SUM(col1),AVG(col2) | +| parse_clause.cpp | 处理子句,比如WHERE,ORDER BY | +| parse_compatibility.cpp | 处理数据库兼容语法和特性支持 | +| parse_coerce.cpp | 处理表达式数据类型强制转换 | +| parse_collate.cpp | 对完成表达式添加校对信息 | +| parse_cte.cpp | 处理公共表格表达式(WITH 子句) | +| parse_expr.cpp | 处理表达式,比如col, col+3, x = 3 | +| parse_func.cpp | 处理函数,table.column和列标识符 | +| parse_node.cpp | 对各种结构创建解析节点 | +| parse_oper.cpp | 处理表达式中的操作符 | +| parse_param.cpp | 处理参数 | +| parse_relation.cpp | 支持表和列的关系处理程序 | +| parse_target.cpp | 处理查询解析的结果列表 | +| parse_type.cpp | 处理数据类型 | +| parse_utilcmd.cpp | 处理实用命令的解析分析 | + + + +#### 五、词法解析部分 + +**对于字符串流的输入,根据词表,将关键字、变量等转化成自定义逻辑结构,用于下一步的语法分析** + +分为三部分:定义段、规则段、用户程序段 + +- 定义段: + + 这一部分一般是一些声明及选项设置等; + + C语言的注释、头文件包含等一般就放在%{%}之间,这一部分的内容会被直接复制到生成的C文件中,还有一些参数项通过%option来设置; + + 采用正则表达式定义词法规范; + + 只有符合规范的关键词才允许接受,否则报错。 + +- 规则段: + + 规则段为一系列匹配模式和动作,模式一般使用正则表达式书写,动作部分为C代码; + + 规则段模板: + + 模式1 + + { + + 动作1 (C代码) + + } + + 在输入和模式1匹配的时候,执行动作部分的代码 + +- 用户程序段: + + 用户自定义的程序,无固定模式· + + + +#### 六、词法解析代码举例 + +##### 定义段: + +头文件、宏定义等: + +![image-20211113113829772](../typora-user-images/image-20211113113829772.png) + +%option 此部分是Flex(词法工具)支持的一些参数,通过%option 来设置 + +![image-20211113113850972](../typora-user-images/image-20211113113850972.png) + +![image-20211113114005056](../typora-user-images/image-20211113114005056.png) + +%option reentrant 可重入词法分析器:传统词法分析器只能一次处理一个输入流,所以很多变量都定义的为静态变量这样分析器才能记住上次分析的地方继而可以继续分析。但是不能同时处理多个输入流。为了解决这个问题引入了可重入词法分析器。通过参数reentrant来控制。 + +%option bison-bridge :bison桥模式 + +bison的发展和flex的发展沟通并不是很密切,导致二者对yyles的调用参数不一致。所以在flex中提拱了桥模式,如果按%option bison-bridge做了声明,那么在flex中yylex将被声明为int yylex(YYSTYPE* lvalp, yyscan_t scaninfo),这样就兼容了bison。 + +其它的读者可自行搜索。 + + + +词法规则制定:采用正则表达式规定可接受的字符组合 + +![image-20211113114229404](../typora-user-images/image-20211113114229404.png) + + + +##### 规则段: + +表示匹配到了某字符组合该执行什么动作: + +![image-20211113114352188](../typora-user-images/image-20211113114352188.png) + + + +#### 七、语法解析部分: + +**以词法分析器生成的单词符号序列作为输入,根据语言的语法规则识别出各种语法成分(如表达式、语句、程序段乃至整个程序等),并在分析过程中进行语法检查,检查所给单词符号序列是否是该语言的文法的一个句子。** + +同样分为三段,定义段,规则段和代码段。也是通过%%做三个段的分割。源码文件为gram.y, 最后通过Bison 编译源文件生成 gram.c + +- 定义段:{% ... %}中的代码将被原样copy到生成的文件gram.c中.其中包含头文件包含,结构体定义和函数声明等,与词法分析一致 +- 规则段:主要是文法产生式,规定规约的规则,对于输入的SQL语句只要能规约到文法产生式顶层非终结符,则判断该SQL语句是语法合法的。在规约过程中顺便构建起语法分析树,为后面的语义分析做铺垫。 + +**总体流程**: + +![image-20211113115728294](../typora-user-images/image-20211113115728294.png) + +**具体流程**: + +![image-20211113115755438](../typora-user-images/image-20211113115755438.png) + + + +#### 八、语法解析代码举例: + +##### 定义段: + +基本设置: + +![image-20211113115919830](../typora-user-images/image-20211113115919830.png) + + + +%pure-parser 声明此语法分析器是纯语法分析器。这样可以实现可重入。 + +%expect 0 ,意思是期待0个冲突。即不希望有任何冲突出现。 + +%name-prefix="base_yy" 代表生成的函数和变量名从yy改成base_yy,同flex,为了在一个产品里使用多个语法分析器,分析不同的数据类。 + + %locations 声明使用位置信息。 + +union表示联合体: + +![image-20211113120551288](../typora-user-images/image-20211113120551288.png) + +%union{} 定义yylval类型,在flex中通过yylval的返回匹配的值。 + +type表示非终结符: + +![image-20211113120618565](../typora-user-images/image-20211113120618565.png) + +非终结符用于文法产生式,为生成语法分析树服务 + +优先级定义: + +![image-20211113120715393](../typora-user-images/image-20211113120715393.png) + +优先级和左右结合的定义可以解决一些语法上的矛盾。 + +具体文法产生式: + +![image-20211113120741461](../typora-user-images/image-20211113120741461.png) + +Opengauss总的文法产生式极其复杂,这里只节选。 + + + +#### 九、具体案例 + +**SQL语句:** + +INSERT INTO films (code, title, did, date_prod, kind) VALUES ('T_601', 'Yojimbo', 106, '1961-06-16', 'Drama’); + +**产生的函数调用:** + +PostgresMain->exec_simple_query->pg_parse_query->raw_parser->base_yyparse(yyscanner) + + + +词法匹配(SCAN.I): + +![image-20211113120940437](../typora-user-images/image-20211113120940437.png) + +Identifier可以匹配到insert。 + +根据规则执行动作: + +![image-20211113121016189](../typora-user-images/image-20211113121016189.png) + +代码中keywordopengauss内置的关键字,像insert就是一个关键字keyword。 + +可以看到一个判断是keyword非空即检测到关键字时,根据关键字不同类型执行动作。 + +在yylex返回INSERT 这个token.然后分析gram.y中这个token 对应的规则 由于flex 默认向前查看一个token, 根据第二部可知第二个token 为INTO.在规则段中找到如下规则: + +![image-20211113121303515](../typora-user-images/image-20211113121303515.png) + +opt_with_clause 可以为空,并且后面跟着一个INSERT INTO, 所以即匹配上这个规则。 + +**类似上面方法继续分析剩下的SQL语句** + +结果放到InsertStmt中,后面继续根据以下规则做规约处理,由于在规则段中第一个出现的非终结符号,stmtblock是我们要的结果。通过不断的规约即reduce,最后的分析结果即剩下stmtblock这一个符号即开始符号,所以是匹配成功的。 + +![image-20211113121416459](../typora-user-images/image-20211113121416459.png) + diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210409231723572.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210409231723572.png new file mode 100644 index 0000000000000000000000000000000000000000..3f78f8f3e5abb9582f156b174435d64a66b97b30 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210409231723572.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210409231816694.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210409231816694.png new file mode 100644 index 0000000000000000000000000000000000000000..aae1f9ab0e4a2938abc2b88c081c5e2a810950dd Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210409231816694.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210409231916046.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210409231916046.png new file mode 100644 index 0000000000000000000000000000000000000000..435d5831874cfb34442bdf021908fb92d6e40e87 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210409231916046.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410165806718.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410165806718.png new file mode 100644 index 0000000000000000000000000000000000000000..4ab1c08e1bd05262ff078d87a4e55e5004e8c554 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410165806718.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410170533123.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410170533123.png new file mode 100644 index 0000000000000000000000000000000000000000..4419b30d80c2c2fc77b19d565f22c41395d09f96 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410170533123.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410171204086.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410171204086.png new file mode 100644 index 0000000000000000000000000000000000000000..d14134c49d745b9df0eb47e4699d131b9a8f90c8 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410171204086.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410171334764.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410171334764.png new file mode 100644 index 0000000000000000000000000000000000000000..99e07798b64b2892970a7892198e6215a9c54075 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410171334764.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410171438626.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410171438626.png new file mode 100644 index 0000000000000000000000000000000000000000..43f4d2100c89eb6045a8cb2789f50406fdedab8d Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410171438626.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410171527486.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410171527486.png new file mode 100644 index 0000000000000000000000000000000000000000..27b1c52cd5fc92b7b7b9e0adc821f4c8c8eb666a Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410171527486.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410171820904.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410171820904.png new file mode 100644 index 0000000000000000000000000000000000000000..b4d7439a3ee887beba419d7aafa86ac2d7d8fff6 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410171820904.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410172000577.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410172000577.png new file mode 100644 index 0000000000000000000000000000000000000000..4786128f7384debf1cea2c9d48222a5e6f764194 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410172000577.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410172021522.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410172021522.png new file mode 100644 index 0000000000000000000000000000000000000000..576c853308c746ace6d319d20354e801ea2fd0fc Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410172021522.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410172750306.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410172750306.png new file mode 100644 index 0000000000000000000000000000000000000000..3e11db9e9131123923bcb1037885ced2d49bada9 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410172750306.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410173040178.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410173040178.png new file mode 100644 index 0000000000000000000000000000000000000000..0c621f4b71d66bb72326638e02adfa34fbf914db Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410173040178.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410183156486.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410183156486.png new file mode 100644 index 0000000000000000000000000000000000000000..f6b8598ed16bb5681b7a560b7f4702934b233144 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410183156486.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410183753203.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410183753203.png new file mode 100644 index 0000000000000000000000000000000000000000..e643ecb5b945543d41a21ffda7642372952300e0 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410183753203.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410183816600.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410183816600.png new file mode 100644 index 0000000000000000000000000000000000000000..cd78f1a609d191e3f851ebb9cb2038a63d76db76 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410183816600.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410183913584.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410183913584.png new file mode 100644 index 0000000000000000000000000000000000000000..49dd2a1969ab2c2f896889244987fd9fe0d5af16 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410183913584.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410184058507.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410184058507.png new file mode 100644 index 0000000000000000000000000000000000000000..e67783592855a44a22940b0dd5fa41392ff3d051 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410184058507.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410184227918.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410184227918.png new file mode 100644 index 0000000000000000000000000000000000000000..12d1fcc1c10f9045b74bc1f9876c05a69630ec8b Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410184227918.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410185248960.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410185248960.png new file mode 100644 index 0000000000000000000000000000000000000000..3e5508595160a4e03b6c83e43a3b343f804aebb3 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410185248960.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410200213417.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410200213417.png new file mode 100644 index 0000000000000000000000000000000000000000..7bc36ffaac833b9c01b3a425ecbfe71a1103e474 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410200213417.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410200918334.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410200918334.png new file mode 100644 index 0000000000000000000000000000000000000000..25c0d312504e953e572867f45e4cd530dc700c0e Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410200918334.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210410200959197.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410200959197.png new file mode 100644 index 0000000000000000000000000000000000000000..dde24199d03e31f19f21b8cc96c2a7a964d65a40 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210410200959197.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210414170544924.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414170544924.png new file mode 100644 index 0000000000000000000000000000000000000000..ccdfedcf59c243c80d056a65315b489022a5b31b Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414170544924.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210414170612785.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414170612785.png new file mode 100644 index 0000000000000000000000000000000000000000..3ae43753003d77db8df92a4f86c9a7139ce8dcee Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414170612785.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210414171819694.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414171819694.png new file mode 100644 index 0000000000000000000000000000000000000000..769a015a6eb32ebbedbc775a3f349e7731b0ed6e Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414171819694.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210414173424185.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414173424185.png new file mode 100644 index 0000000000000000000000000000000000000000..9248f0d02b3bac03ad0a207ef6281b3c22bbe668 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414173424185.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210414173431455.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414173431455.png new file mode 100644 index 0000000000000000000000000000000000000000..9248f0d02b3bac03ad0a207ef6281b3c22bbe668 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414173431455.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210414174526874.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414174526874.png new file mode 100644 index 0000000000000000000000000000000000000000..8cd6f42f41b2299b8bc61ed0b56e954759b39747 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414174526874.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210414174933874.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414174933874.png new file mode 100644 index 0000000000000000000000000000000000000000..d6435865e0ac92b24eb9d6587f95b6a32b7468c7 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414174933874.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210414180150139.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414180150139.png new file mode 100644 index 0000000000000000000000000000000000000000..e81a937ddf0c9f0afa33bc8ce6da5d1d6cb47f7d Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414180150139.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210414180232823.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414180232823.png new file mode 100644 index 0000000000000000000000000000000000000000..dc06225f518d69031a827649f31f353e485f5543 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414180232823.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210414180244951.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414180244951.png new file mode 100644 index 0000000000000000000000000000000000000000..dc06225f518d69031a827649f31f353e485f5543 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414180244951.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210414231729676.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414231729676.png new file mode 100644 index 0000000000000000000000000000000000000000..1ab54b9176a77610889e89e5a0f360efb0c631fc Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414231729676.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210414231801275.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414231801275.png new file mode 100644 index 0000000000000000000000000000000000000000..a01cbca3fc55f92ef6e42a74283a7df1a9af885b Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414231801275.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210414234618695.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414234618695.png new file mode 100644 index 0000000000000000000000000000000000000000..ce3b0d025fe7ff6ebc01de447abb7c0c82af055a Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414234618695.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210414234648361.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414234648361.png new file mode 100644 index 0000000000000000000000000000000000000000..fbf5ec9fd1e8f4dcb19188615445eb161d2e7eca Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414234648361.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210414234744037.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414234744037.png new file mode 100644 index 0000000000000000000000000000000000000000..885b278569a5733046e43af14ca22954c5dcce91 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414234744037.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210414234828351.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414234828351.png new file mode 100644 index 0000000000000000000000000000000000000000..ff723b4b8962459a4da49d32721b3425caf2f846 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414234828351.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210414235224879.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414235224879.png new file mode 100644 index 0000000000000000000000000000000000000000..a52b48c942a05755908d4e59944c6d4f11133193 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414235224879.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210414235251700.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414235251700.png new file mode 100644 index 0000000000000000000000000000000000000000..2555d7f99e8d53d72f8fb232947e29db73198753 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414235251700.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210414235427043.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414235427043.png new file mode 100644 index 0000000000000000000000000000000000000000..bc4841bc174a70a2fa88f4cc32e95065a00ff88c Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414235427043.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210414235452231.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414235452231.png new file mode 100644 index 0000000000000000000000000000000000000000..6ac4d61b6bbc64690f1eb88acb14761fc4f7464c Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210414235452231.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210415155133077.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210415155133077.png new file mode 100644 index 0000000000000000000000000000000000000000..8933c1615394cd13d25d53f86e6e0ebedb67cce1 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210415155133077.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210415155227025.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210415155227025.png new file mode 100644 index 0000000000000000000000000000000000000000..19e2d104a9dbfc23bdd3a3d0a5416972d910bf30 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210415155227025.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210415155301254.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210415155301254.png new file mode 100644 index 0000000000000000000000000000000000000000..d786899a4476873be56861696d654beb2e4efe98 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210415155301254.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210415235714996.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210415235714996.png new file mode 100644 index 0000000000000000000000000000000000000000..71da63561b8743e443ca16a54acbf357d71c31c2 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210415235714996.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210416000032855.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210416000032855.png new file mode 100644 index 0000000000000000000000000000000000000000..31725b8a8eeb1afb6f458bfe578648b89962640b Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210416000032855.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210416001249844.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210416001249844.png new file mode 100644 index 0000000000000000000000000000000000000000..fa3a37d0c9f52dc83bbbfbb1d051d124f69dbab3 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210416001249844.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210416001655408.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210416001655408.png new file mode 100644 index 0000000000000000000000000000000000000000..87b7ba8c18042cf50ae95279da78ce08c1c27d6c Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210416001655408.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210416001942033.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210416001942033.png new file mode 100644 index 0000000000000000000000000000000000000000..284079bde312e052d765d1193bc06f43b7a6560d Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210416001942033.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210416002937200.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210416002937200.png new file mode 100644 index 0000000000000000000000000000000000000000..3b9895bd5de6549d9390a7f6fa182afcfbebef65 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210416002937200.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210416003427231.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210416003427231.png new file mode 100644 index 0000000000000000000000000000000000000000..03aa83f6a8ddfd727acad82e72f2e069e04000a8 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210416003427231.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210421111337905.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210421111337905.png new file mode 100644 index 0000000000000000000000000000000000000000..36473bd1acba40a9d7010cb87ce707883696a5bc Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210421111337905.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210421111420590.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210421111420590.png new file mode 100644 index 0000000000000000000000000000000000000000..7ad06f3a295638d0d273182881bc316032505a8d Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210421111420590.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210421111546571.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210421111546571.png new file mode 100644 index 0000000000000000000000000000000000000000..71eb268fd8f87332104a3df92d49dc0c7dadf501 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210421111546571.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210421111747018.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210421111747018.png new file mode 100644 index 0000000000000000000000000000000000000000..5f2e7808e6ee4711789e5d2d2239ee1c24d32633 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210421111747018.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210421111939931.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210421111939931.png new file mode 100644 index 0000000000000000000000000000000000000000..aac0cc59d50f2afdd55ed5545f62272d3d6ca878 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210421111939931.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210421112025415.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210421112025415.png new file mode 100644 index 0000000000000000000000000000000000000000..f7bb2b26887049f3be5e1cc324480eb7227caa7d Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210421112025415.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210421112144920.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210421112144920.png new file mode 100644 index 0000000000000000000000000000000000000000..4a80b44240641e354591e9724c1194d2eeb3f279 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210421112144920.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210421112216536.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210421112216536.png new file mode 100644 index 0000000000000000000000000000000000000000..e20aa437c8886fbbe1ffadeb6497a5a8d9bef142 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210421112216536.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210421112245365.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210421112245365.png new file mode 100644 index 0000000000000000000000000000000000000000..9218e2d2ec9506ae54de5bee4bb2496cd97f4370 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210421112245365.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210421112312984.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210421112312984.png new file mode 100644 index 0000000000000000000000000000000000000000..a6270b67d3a55ae0bb1eda8f79539ba2b12516ba Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210421112312984.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210425174450291.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210425174450291.png new file mode 100644 index 0000000000000000000000000000000000000000..a9ba631293115f45b3bf4efe8e14f1b481702880 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210425174450291.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210426005225446.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210426005225446.png new file mode 100644 index 0000000000000000000000000000000000000000..5a1ceb385b53fad7a7f43ebcd909361db9720820 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210426005225446.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210426005420286.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210426005420286.png new file mode 100644 index 0000000000000000000000000000000000000000..49c698e1fd2a9a6619365f1b1a9c4a1372b28a51 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210426005420286.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210426005438245.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210426005438245.png new file mode 100644 index 0000000000000000000000000000000000000000..ad2fb575982aec680422041aae28ee8eedb98ebb Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210426005438245.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210426010042396.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210426010042396.png new file mode 100644 index 0000000000000000000000000000000000000000..268822a9207ded4efa10658711571a3a177c528c Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210426010042396.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210427172735131.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210427172735131.png new file mode 100644 index 0000000000000000000000000000000000000000..d0634d97c263c19c0b8101622297d3db7830765b Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210427172735131.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210427172840732.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210427172840732.png new file mode 100644 index 0000000000000000000000000000000000000000..4fe01334bd46fee049645d75cf21cc2488413ba6 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210427172840732.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210427173051324.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210427173051324.png new file mode 100644 index 0000000000000000000000000000000000000000..9855ea08a84eb0d47e4160f6d1af78e5a0bc02fc Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210427173051324.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210427173139611.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210427173139611.png new file mode 100644 index 0000000000000000000000000000000000000000..6298a9cd3e0046f326d1d079f43e091397450405 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210427173139611.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210428164317101.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210428164317101.png new file mode 100644 index 0000000000000000000000000000000000000000..17fde5a9044d8cafb3183a35502feeda88dc3643 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210428164317101.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210521111625379.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210521111625379.png new file mode 100644 index 0000000000000000000000000000000000000000..c36d6ffec1cd0fb9a94a732cdf222a185b6b0984 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210521111625379.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210522213903343.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210522213903343.png new file mode 100644 index 0000000000000000000000000000000000000000..9810936366822222cd8e4dd399029da67dd31b81 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210522213903343.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210523215835871.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210523215835871.png new file mode 100644 index 0000000000000000000000000000000000000000..484210218d03d595372cc34008f7789cef3cd647 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210523215835871.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210528165540556.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210528165540556.png new file mode 100644 index 0000000000000000000000000000000000000000..2907b451703b51ce7a70fc97aaa5cb91b4444b40 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210528165540556.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210528170527786.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210528170527786.png new file mode 100644 index 0000000000000000000000000000000000000000..8a7392338d47494668028a1b28c320a420cde41a Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210528170527786.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210528171216068.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210528171216068.png new file mode 100644 index 0000000000000000000000000000000000000000..0cb868950a6f2e0313a02b9bf48e6a524138ca18 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210528171216068.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210601215036827.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210601215036827.png new file mode 100644 index 0000000000000000000000000000000000000000..2b988fc6e47ebeabbf48efe21dc938cde78c0477 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210601215036827.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210603205304500.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210603205304500.png new file mode 100644 index 0000000000000000000000000000000000000000..348224cef2606d814f11af53fd7308ec81dc201e Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210603205304500.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210603231353074.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210603231353074.png new file mode 100644 index 0000000000000000000000000000000000000000..a3a7147e0ba95ae82f7776697ad38dd01f30d224 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210603231353074.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210605215332403.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210605215332403.png new file mode 100644 index 0000000000000000000000000000000000000000..d065879ce951a8d7779e90507ee65593c704811d Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210605215332403.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210606213755610.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210606213755610.png new file mode 100644 index 0000000000000000000000000000000000000000..7fcc74d8f88a1df7a22181876ec9e4c536d38a58 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210606213755610.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210606213817816.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210606213817816.png new file mode 100644 index 0000000000000000000000000000000000000000..b9f1dc02955ee572b83740b47f35bbc3c38e5cda Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210606213817816.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210607115527763.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210607115527763.png new file mode 100644 index 0000000000000000000000000000000000000000..f01cee3df5fea44f4069ab46111f1dadab5f94b8 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210607115527763.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210608154132840.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210608154132840.png new file mode 100644 index 0000000000000000000000000000000000000000..0943301f3f0cef3fc2c6f9728989a1fc3420d6c4 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210608154132840.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210608154603179.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210608154603179.png new file mode 100644 index 0000000000000000000000000000000000000000..2f155981576f7c8065cc0b511d405d8cf0953ec0 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210608154603179.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210609005020494.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210609005020494.png new file mode 100644 index 0000000000000000000000000000000000000000..e8550d14245b65b2f8d1ed5803efe745ca316805 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210609005020494.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210626103132595.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210626103132595.png new file mode 100644 index 0000000000000000000000000000000000000000..10373ddd5943a4fc6b66d15b8f95891570fd4085 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210626103132595.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210707010817335.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210707010817335.png new file mode 100644 index 0000000000000000000000000000000000000000..facf80c83f6ddbe2f5dbc9eb1be6554f203495b1 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210707010817335.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210707010858059.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210707010858059.png new file mode 100644 index 0000000000000000000000000000000000000000..9c9015c585a6b31b1c17317e595701f6b3431e5b Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210707010858059.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210708005645530.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210708005645530.png new file mode 100644 index 0000000000000000000000000000000000000000..1ffb5a827b8d79b252d1c071e4761811a56347cc Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210708005645530.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210711004257969.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210711004257969.png new file mode 100644 index 0000000000000000000000000000000000000000..68a3114538b175bbf6f92f4e84a3f3ea2bae3d50 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210711004257969.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210711004320197.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210711004320197.png new file mode 100644 index 0000000000000000000000000000000000000000..84b4e9ae4ebc25da4aed45ec8e849848f9352e02 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210711004320197.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210726112632256.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210726112632256.png new file mode 100644 index 0000000000000000000000000000000000000000..2c9c9b5fc4812b2e2aeed7d7e68f69d45c03e8d9 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210726112632256.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210726112925983.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210726112925983.png new file mode 100644 index 0000000000000000000000000000000000000000..c7fc8cd95e7aa3d08bd19a00292a176d97c3a36f Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210726112925983.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210726113404743.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210726113404743.png new file mode 100644 index 0000000000000000000000000000000000000000..25e7c49815eaece0ac6a629dbf3faa2c1c4f1de1 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210726113404743.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210727004420674.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210727004420674.png new file mode 100644 index 0000000000000000000000000000000000000000..9b25b485f7b9ae324ed70287a6813893f3ec441b Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210727004420674.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210729003119610.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210729003119610.png new file mode 100644 index 0000000000000000000000000000000000000000..2341aecf360f60233221336a59606f26d8d20ab5 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210729003119610.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906085136116.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906085136116.png new file mode 100644 index 0000000000000000000000000000000000000000..f74d321dea6e947edbffbdc32360b98731beb228 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906085136116.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906093736834.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906093736834.png new file mode 100644 index 0000000000000000000000000000000000000000..40e316c2646ff83c58afc07f827a84195dfe45ad Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906093736834.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906093802284.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906093802284.png new file mode 100644 index 0000000000000000000000000000000000000000..b9c577d9e4833a674af9175bb81090fb61ca888b Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906093802284.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906093815730.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906093815730.png new file mode 100644 index 0000000000000000000000000000000000000000..53291e842c36a59acc90bc5e4947ff11e5473350 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906093815730.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906094120554.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906094120554.png new file mode 100644 index 0000000000000000000000000000000000000000..4b80933810da133c32c93d582b73720a9e56ef58 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906094120554.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906094338758.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906094338758.png new file mode 100644 index 0000000000000000000000000000000000000000..4a442baa1d45015809eb55af3d7fea4bd1413f41 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906094338758.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906094439740.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906094439740.png new file mode 100644 index 0000000000000000000000000000000000000000..d99ccdd924f97dd3ee6afde1b909bcbf6d22f33a Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906094439740.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906094512505.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906094512505.png new file mode 100644 index 0000000000000000000000000000000000000000..337c6239d5ba3608f97d379af1420241c0607cd5 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906094512505.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906094545799.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906094545799.png new file mode 100644 index 0000000000000000000000000000000000000000..d22249a3fd10453742a4ff2ceb9027520ac6d84d Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906094545799.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095052495.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095052495.png new file mode 100644 index 0000000000000000000000000000000000000000..b616a0cf9b973e314fba0a74640daf37fa337319 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095052495.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095119541.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095119541.png new file mode 100644 index 0000000000000000000000000000000000000000..0dce0196ce62b23119b80228a64840598b04797b Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095119541.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095134629.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095134629.png new file mode 100644 index 0000000000000000000000000000000000000000..d9b104e0e38c21e83e145d5a529bcca21462e88f Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095134629.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095210681.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095210681.png new file mode 100644 index 0000000000000000000000000000000000000000..7bbc0c5b41e9806522c6340fd4a9ebeed49378f7 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095210681.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095339299.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095339299.png new file mode 100644 index 0000000000000000000000000000000000000000..1b0503490983c39843124fb4584652fa0d15deff Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095339299.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095401366.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095401366.png new file mode 100644 index 0000000000000000000000000000000000000000..f83997eafc8567b8823eb35257265f2c4ff3d145 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095401366.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095508139.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095508139.png new file mode 100644 index 0000000000000000000000000000000000000000..0876032004fddf37fbb265d8ad5bc128fb056062 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095508139.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095518063.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095518063.png new file mode 100644 index 0000000000000000000000000000000000000000..6a414e07b605df605f3dad9dba352e3883603513 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095518063.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095623056.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095623056.png new file mode 100644 index 0000000000000000000000000000000000000000..e9478ce3608abced0bc3118cc6ad9014c12464f2 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095623056.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095646194.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095646194.png new file mode 100644 index 0000000000000000000000000000000000000000..25ebbcfe272ac7d72b4f67e288d0e0d49338310e Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095646194.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095800744.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095800744.png new file mode 100644 index 0000000000000000000000000000000000000000..7364e585a4a624225266281654ec7ab98ce55e9e Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095800744.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095945450.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095945450.png new file mode 100644 index 0000000000000000000000000000000000000000..6d18da1089d37ccf123a7e8ebc71dc42f48794df Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906095945450.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906100345458.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906100345458.png new file mode 100644 index 0000000000000000000000000000000000000000..92e4a79e9df7d68c69fd718a9e8ff9afa3d7aee3 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906100345458.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906100758902.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906100758902.png new file mode 100644 index 0000000000000000000000000000000000000000..e9d5f641ab3e37b8f3ec5026d894312553fc75da Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906100758902.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906101329908.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906101329908.png new file mode 100644 index 0000000000000000000000000000000000000000..e173b1e38550a6e016b87f788ad898449ed0528d Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906101329908.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906101409726.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906101409726.png new file mode 100644 index 0000000000000000000000000000000000000000..e51af95bc948adce697cd898e2bd3e8c6cdd00b3 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906101409726.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906101444492.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906101444492.png new file mode 100644 index 0000000000000000000000000000000000000000..9643a698084d20cbdaf19347d5ad75e1733a49e6 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906101444492.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906101816470.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906101816470.png new file mode 100644 index 0000000000000000000000000000000000000000..5aa48e94b35448f7693523cb2d22e5498ab74043 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906101816470.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906102556148.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906102556148.png new file mode 100644 index 0000000000000000000000000000000000000000..617dd286873f02844773d2508fa25026e2b8e9fb Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906102556148.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906105912330.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906105912330.png new file mode 100644 index 0000000000000000000000000000000000000000..3552b2918b8953246fec3661580250d274817309 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906105912330.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906114808788.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906114808788.png new file mode 100644 index 0000000000000000000000000000000000000000..240be674bb8ac22875decd918945cbc018db720b Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906114808788.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906114859548.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906114859548.png new file mode 100644 index 0000000000000000000000000000000000000000..2aa430e72590ce183c977820c601444581e56e46 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906114859548.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906114924545.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906114924545.png new file mode 100644 index 0000000000000000000000000000000000000000..587b1c4433434c3517a76d9a34e81ad7bf8a4c1f Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906114924545.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906120219459.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906120219459.png new file mode 100644 index 0000000000000000000000000000000000000000..31a481163bd843a929814609eef866f2c7d615fd Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906120219459.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210906121107707.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906121107707.png new file mode 100644 index 0000000000000000000000000000000000000000..42c690239fbfdd832eb4c53facee16d25fc41d9f Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210906121107707.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20210914171238237.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20210914171238237.png new file mode 100644 index 0000000000000000000000000000000000000000..e7656bbb839ba9d3b5c7b67e05a49f9289667e43 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20210914171238237.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011110017529.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011110017529.png new file mode 100644 index 0000000000000000000000000000000000000000..b38f7f072ed49dedd6ca1daccbc0ac71e1e730a6 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011110017529.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011110243320.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011110243320.png new file mode 100644 index 0000000000000000000000000000000000000000..6aec095197ce0d1837cf29a994617230490cc5d2 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011110243320.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011110532504.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011110532504.png new file mode 100644 index 0000000000000000000000000000000000000000..882e7214dbb685b041ec993df8a6a849ec91ebab Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011110532504.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011110636572.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011110636572.png new file mode 100644 index 0000000000000000000000000000000000000000..2f8932b1d21ef77e713eb2efce2fa2e06008f8b9 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011110636572.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011111105402.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011111105402.png new file mode 100644 index 0000000000000000000000000000000000000000..2f8932b1d21ef77e713eb2efce2fa2e06008f8b9 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011111105402.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011111152764.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011111152764.png new file mode 100644 index 0000000000000000000000000000000000000000..2b4beaa7f6d45677bce1a0f3b071729192104ea1 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011111152764.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011112527795.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011112527795.png new file mode 100644 index 0000000000000000000000000000000000000000..6cd8fb9d98f5c5c7a6627621d8002b34a62036ac Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011112527795.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011112604966.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011112604966.png new file mode 100644 index 0000000000000000000000000000000000000000..8d4583d4f3b362f22a28a19d9536e9087bfff954 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011112604966.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011112641119.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011112641119.png new file mode 100644 index 0000000000000000000000000000000000000000..d2fd6799badd02cad258c8d7b9efed7655ee5ceb Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011112641119.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011112912990.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011112912990.png new file mode 100644 index 0000000000000000000000000000000000000000..170dee95f36b4cb22cb3af8cff08e91bddbbf0ce Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011112912990.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011113011613.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011113011613.png new file mode 100644 index 0000000000000000000000000000000000000000..58cb1156efe2dda61faca46e15711b51af465820 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011113011613.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114156879.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114156879.png new file mode 100644 index 0000000000000000000000000000000000000000..ffbf49e672a1e54c08ac1275bd8e957f8d7bbe44 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114156879.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114220394.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114220394.png new file mode 100644 index 0000000000000000000000000000000000000000..d212a1d80ce15d5794e5cfa1fc0c87faf7822822 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114220394.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114251134.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114251134.png new file mode 100644 index 0000000000000000000000000000000000000000..f2911514a4fac78916129105a7852ef498a1e735 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114251134.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114306010.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114306010.png new file mode 100644 index 0000000000000000000000000000000000000000..3bfe4f0f29a445c02740d290b4c0c205503aac95 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114306010.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114348393.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114348393.png new file mode 100644 index 0000000000000000000000000000000000000000..aa7dd4811ce763367c3287c5510c123ffb958e5c Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114348393.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114445675.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114445675.png new file mode 100644 index 0000000000000000000000000000000000000000..1204ed16aa9b932e96a3e3fd4c5d8a1f705521b3 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114445675.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114558025.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114558025.png new file mode 100644 index 0000000000000000000000000000000000000000..fe9c5226b7d1f7cdec055fbeb9eb57695e454899 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114558025.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114657689.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114657689.png new file mode 100644 index 0000000000000000000000000000000000000000..c7923b966a7dcac49a1319462d8873ec98b12996 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114657689.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114813694.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114813694.png new file mode 100644 index 0000000000000000000000000000000000000000..447096c2fa68a6971e7fd0a69076aceb8ea21eb7 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011114813694.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120425966.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120425966.png new file mode 100644 index 0000000000000000000000000000000000000000..45fcfcd30faf763f88371b90bbd4c36d4452e497 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120425966.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120501686.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120501686.png new file mode 100644 index 0000000000000000000000000000000000000000..e8b4a4974207455a90fe02e28b68e30ccbac1d4c Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120501686.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120618314.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120618314.png new file mode 100644 index 0000000000000000000000000000000000000000..b077ae3d18746f5ebb8a17a174251678084c5a55 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120618314.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120648928.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120648928.png new file mode 100644 index 0000000000000000000000000000000000000000..d45b010f23f4b0562da4f9a33d36c308eb489c6f Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120648928.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120757521.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120757521.png new file mode 100644 index 0000000000000000000000000000000000000000..8cc6d5294393725831b07a7537a0eea5ff03cc90 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120757521.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120857291.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120857291.png new file mode 100644 index 0000000000000000000000000000000000000000..8247a0b48984c87932a2c0eec569fce903b194c9 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120857291.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120945745.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120945745.png new file mode 100644 index 0000000000000000000000000000000000000000..7398650dbc4bd6039e73a7a35ce2c2143fc9fa03 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011120945745.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011121025609.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011121025609.png new file mode 100644 index 0000000000000000000000000000000000000000..dea9adc8b03fce140fc1cba56750e6ce7754e8f6 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011121025609.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011121035495.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011121035495.png new file mode 100644 index 0000000000000000000000000000000000000000..dea9adc8b03fce140fc1cba56750e6ce7754e8f6 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011121035495.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211011121053125.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011121053125.png new file mode 100644 index 0000000000000000000000000000000000000000..ec3077693977174877e3f5e57ca50618949f3957 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211011121053125.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103160714362.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103160714362.png new file mode 100644 index 0000000000000000000000000000000000000000..1447247a9b33ea85e8a60464720081758678fdb0 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103160714362.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103160845726.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103160845726.png new file mode 100644 index 0000000000000000000000000000000000000000..7c21ac54965cc8313cff0cc237f320024b3b49bf Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103160845726.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103162822965.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103162822965.png new file mode 100644 index 0000000000000000000000000000000000000000..c89e41c050d8fa7761373bc68bc85a42bd920032 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103162822965.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103163644877.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103163644877.png new file mode 100644 index 0000000000000000000000000000000000000000..b1f9d88c5abb828f4896a21b97f4c09156dd849e Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103163644877.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103163927741.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103163927741.png new file mode 100644 index 0000000000000000000000000000000000000000..fc7644507177acab9a460ca1a6fefdd90f30ceb5 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103163927741.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103164056986.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103164056986.png new file mode 100644 index 0000000000000000000000000000000000000000..392e55bc428502b9208297a77b122e1d1ec93b4d Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103164056986.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103164127187.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103164127187.png new file mode 100644 index 0000000000000000000000000000000000000000..50a17a6e58c93c80318bba0e03216e304f9c74c6 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103164127187.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103165558378.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103165558378.png new file mode 100644 index 0000000000000000000000000000000000000000..a3418c6d9d4eb8fb9aefab15d1acbb7bb966bb65 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103165558378.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103165736561.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103165736561.png new file mode 100644 index 0000000000000000000000000000000000000000..94a626db3dae5e66be7d75520bcbe8851bf8dfe8 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103165736561.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103170212856.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103170212856.png new file mode 100644 index 0000000000000000000000000000000000000000..a9ba1649e705713ced8592b0b6b4700e7f8ccba0 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103170212856.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103170442514.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103170442514.png new file mode 100644 index 0000000000000000000000000000000000000000..b1605f3b68386d13dcf50bb802e2f95ef46d4a7d Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103170442514.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171515916.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171515916.png new file mode 100644 index 0000000000000000000000000000000000000000..8373a1a18515a5e08c6301264bf0a2e16539f11f Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171515916.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171528174.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171528174.png new file mode 100644 index 0000000000000000000000000000000000000000..108b3b79693f3c5f90744d2f6a34bbb411d266e5 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171528174.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171533677.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171533677.png new file mode 100644 index 0000000000000000000000000000000000000000..108b3b79693f3c5f90744d2f6a34bbb411d266e5 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171533677.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171544484.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171544484.png new file mode 100644 index 0000000000000000000000000000000000000000..b2e48a25a65af4177c91ccf9d8d80a6f6afa76f8 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171544484.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171600132.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171600132.png new file mode 100644 index 0000000000000000000000000000000000000000..7652922de7499345a577be87bbd97349dd6eb3f8 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171600132.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171616597.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171616597.png new file mode 100644 index 0000000000000000000000000000000000000000..ebd3a51507cfaebdab783152c70541cb21b29b47 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171616597.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171632754.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171632754.png new file mode 100644 index 0000000000000000000000000000000000000000..ebd3a51507cfaebdab783152c70541cb21b29b47 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103171632754.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103172432749.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103172432749.png new file mode 100644 index 0000000000000000000000000000000000000000..13c45de949d76cf1e99ee9a66b8a790dbe66e301 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103172432749.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103172510280.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103172510280.png new file mode 100644 index 0000000000000000000000000000000000000000..04da7b827eab44873f9498d3dbe34096b37ca424 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103172510280.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103172630663.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103172630663.png new file mode 100644 index 0000000000000000000000000000000000000000..47092c541c4066b4666a0011732b461ea714339a Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103172630663.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103172944323.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103172944323.png new file mode 100644 index 0000000000000000000000000000000000000000..9640f4be40c8a72492f9b995002414eadb2ea67e Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103172944323.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103173057702.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103173057702.png new file mode 100644 index 0000000000000000000000000000000000000000..310168f53f11296dae272bdd0719526648bed092 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103173057702.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211103173156715.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103173156715.png new file mode 100644 index 0000000000000000000000000000000000000000..f353960d9c90ecaf5b2f4e3c3e2fb665bec48fa9 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211103173156715.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211108162752916.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108162752916.png new file mode 100644 index 0000000000000000000000000000000000000000..da9274e0dab5586b22e50af9acd2481d4aa7b6df Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108162752916.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211108162912478.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108162912478.png new file mode 100644 index 0000000000000000000000000000000000000000..99a6dd3ada2f98bf2f6f29a14bc15afe5ba44535 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108162912478.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211108163254936.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108163254936.png new file mode 100644 index 0000000000000000000000000000000000000000..ac01a6edbbc34ddc78a8685425d994b7abc48216 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108163254936.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211108163954650.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108163954650.png new file mode 100644 index 0000000000000000000000000000000000000000..0ac3c8399c852862b48c5ac49c59b5c926f366a4 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108163954650.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211108164554159.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108164554159.png new file mode 100644 index 0000000000000000000000000000000000000000..01b640444edf5f208574245fe31012b36c807f20 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108164554159.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211108164754161.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108164754161.png new file mode 100644 index 0000000000000000000000000000000000000000..5b118ecbeb08cb3b51abc161b9cab428dd6234a4 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108164754161.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211108165054078.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108165054078.png new file mode 100644 index 0000000000000000000000000000000000000000..0f3fa9ae2f337af4522440e7cb77af69543d7f36 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108165054078.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211108165558448.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108165558448.png new file mode 100644 index 0000000000000000000000000000000000000000..afe04976961e911a699559a609b1779da4740605 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108165558448.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211108165833119.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108165833119.png new file mode 100644 index 0000000000000000000000000000000000000000..5f0829ce820d969c79179083c42aa903432ea974 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108165833119.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211108170027609.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108170027609.png new file mode 100644 index 0000000000000000000000000000000000000000..f96d8d5544c888f26fb3e1309851810d9dc62f4f Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108170027609.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211108170137048.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108170137048.png new file mode 100644 index 0000000000000000000000000000000000000000..5ab3ade670b52f726563e628864693a15d53514e Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108170137048.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211108170353485.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108170353485.png new file mode 100644 index 0000000000000000000000000000000000000000..f366890d4b2a3edd8347c223aca567cb3671a011 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108170353485.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211108170956152.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108170956152.png new file mode 100644 index 0000000000000000000000000000000000000000..ad32e12e7077938af407a70bb40ef7a73f06a269 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108170956152.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211108173058954.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108173058954.png new file mode 100644 index 0000000000000000000000000000000000000000..8791c461bb15e6d08da704f7c9de5e80024d0efb Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108173058954.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211108173108532.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108173108532.png new file mode 100644 index 0000000000000000000000000000000000000000..8791c461bb15e6d08da704f7c9de5e80024d0efb Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211108173108532.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211110161157466.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110161157466.png new file mode 100644 index 0000000000000000000000000000000000000000..ecd46fb242de3279afc3b6e056ee64bc5fcd148f Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110161157466.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211110164207128.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110164207128.png new file mode 100644 index 0000000000000000000000000000000000000000..2b276d0efebca3930a5c3ed11c42ce3d83ffe5c1 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110164207128.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211110164431185.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110164431185.png new file mode 100644 index 0000000000000000000000000000000000000000..ad32e12e7077938af407a70bb40ef7a73f06a269 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110164431185.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211110164540792.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110164540792.png new file mode 100644 index 0000000000000000000000000000000000000000..b24ccf7d04e891d323aefe5d9a013ef4b551a6b8 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110164540792.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211110164642691.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110164642691.png new file mode 100644 index 0000000000000000000000000000000000000000..05fbd3f469847e0351e9223069e4a8f146e6cdc6 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110164642691.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211110164843604.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110164843604.png new file mode 100644 index 0000000000000000000000000000000000000000..77e0f170d4b7aedb933de47faba2bfc1ccaac0f5 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110164843604.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211110164953102.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110164953102.png new file mode 100644 index 0000000000000000000000000000000000000000..2a3d395539ffd2151e487bf015c4a2af5cc8e08d Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110164953102.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211110165208487.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110165208487.png new file mode 100644 index 0000000000000000000000000000000000000000..b0213e199306e36ca78e9ea6ba3e18f01072bca3 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110165208487.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211110165401524.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110165401524.png new file mode 100644 index 0000000000000000000000000000000000000000..f591ea4a47438817465cf4decc2ed9d861b4c537 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110165401524.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211110170430428.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110170430428.png new file mode 100644 index 0000000000000000000000000000000000000000..e07045f660580ab1dac8333d9c1c2914ce51898a Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110170430428.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211110170638586.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110170638586.png new file mode 100644 index 0000000000000000000000000000000000000000..8d6775d342c103d4d24f07c9abf4b78c3a81f782 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110170638586.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211110170721864.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110170721864.png new file mode 100644 index 0000000000000000000000000000000000000000..e3337b32df8aff5b79da8d9d48a7c42ed9c532aa Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110170721864.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211110170828835.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110170828835.png new file mode 100644 index 0000000000000000000000000000000000000000..84cf07f353c06112d5a70e339314c1757b54d020 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110170828835.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211110171525896.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110171525896.png new file mode 100644 index 0000000000000000000000000000000000000000..072f36801d107924f58135f137b8672cca34ac83 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110171525896.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211110171651494.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110171651494.png new file mode 100644 index 0000000000000000000000000000000000000000..fc2a1cf9d9074a519ff2e3fae1fe7013d80b55b5 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110171651494.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211110173419566.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110173419566.png new file mode 100644 index 0000000000000000000000000000000000000000..c58335ccbfb3cec3184c5b490f110f1f91780877 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110173419566.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211110173502716.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110173502716.png new file mode 100644 index 0000000000000000000000000000000000000000..a10040f69f4e232ec78bcbc8e4154edc655389cf Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110173502716.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211110173619641.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110173619641.png new file mode 100644 index 0000000000000000000000000000000000000000..86ca368daf36150fc0d82098fbd0a611b3f63512 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110173619641.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211110174019749.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110174019749.png new file mode 100644 index 0000000000000000000000000000000000000000..d7a9d2aff6140759a5dc1124e58ace4e5b541440 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211110174019749.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211113113829772.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113113829772.png new file mode 100644 index 0000000000000000000000000000000000000000..2d04d2424617c5735aac629e374196d0dc45d2e0 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113113829772.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211113113850972.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113113850972.png new file mode 100644 index 0000000000000000000000000000000000000000..59aa75918e2f7b085607b86122ba9c6063dfc789 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113113850972.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211113113959234.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113113959234.png new file mode 100644 index 0000000000000000000000000000000000000000..59aa75918e2f7b085607b86122ba9c6063dfc789 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113113959234.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211113114005056.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113114005056.png new file mode 100644 index 0000000000000000000000000000000000000000..59aa75918e2f7b085607b86122ba9c6063dfc789 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113114005056.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211113114019598.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113114019598.png new file mode 100644 index 0000000000000000000000000000000000000000..59aa75918e2f7b085607b86122ba9c6063dfc789 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113114019598.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211113114229404.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113114229404.png new file mode 100644 index 0000000000000000000000000000000000000000..909cafffdf7794240df94a8b61db9b61dccdc81e Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113114229404.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211113114352188.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113114352188.png new file mode 100644 index 0000000000000000000000000000000000000000..2e7e8527503d576850c13e32beb10f4fadbf47ad Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113114352188.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211113115728294.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113115728294.png new file mode 100644 index 0000000000000000000000000000000000000000..682cb17eedd4db0c56790bb869bfb2c516d595d7 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113115728294.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211113115755438.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113115755438.png new file mode 100644 index 0000000000000000000000000000000000000000..5ef223f568989de57ef9f3af9cb458a45ce4f98f Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113115755438.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211113115919830.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113115919830.png new file mode 100644 index 0000000000000000000000000000000000000000..ca4ccc472f20d079175d1e177673ac54fd5d0b3f Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113115919830.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211113120551288.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113120551288.png new file mode 100644 index 0000000000000000000000000000000000000000..ee80b783679d062b627be2be123b0442cfaa62ae Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113120551288.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211113120618565.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113120618565.png new file mode 100644 index 0000000000000000000000000000000000000000..9bd80328a7ef616e774f4aba1154466efb2e5a49 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113120618565.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211113120715393.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113120715393.png new file mode 100644 index 0000000000000000000000000000000000000000..55b4fbc7adfc4bd72ec7fd872ea20facf4006ac1 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113120715393.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211113120741461.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113120741461.png new file mode 100644 index 0000000000000000000000000000000000000000..eeb1c117c9b016bd78afa1f26db0afa7e4c052f0 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113120741461.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211113120940437.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113120940437.png new file mode 100644 index 0000000000000000000000000000000000000000..d14ebc95fa27515d96bacfc43a24e065699a6e1c Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113120940437.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211113121016189.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113121016189.png new file mode 100644 index 0000000000000000000000000000000000000000..f05aa6ecf200c47dd15c49de5e034f69f03da865 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113121016189.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211113121303515.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113121303515.png new file mode 100644 index 0000000000000000000000000000000000000000..8f0eee6a9b6efda83e629f7beddb37f637940507 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113121303515.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211113121416459.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113121416459.png new file mode 100644 index 0000000000000000000000000000000000000000..48a75f6e6a9e05a10d79d496e565b87241a2d6fb Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113121416459.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211113145619607.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113145619607.png new file mode 100644 index 0000000000000000000000000000000000000000..71ddc9ca15e9003408cf45d864ffad482175bc6b Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211113145619607.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211114002741385.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211114002741385.png new file mode 100644 index 0000000000000000000000000000000000000000..339572cba4a49b822797f8edb527da79043cc91f Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211114002741385.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211114002839042.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211114002839042.png new file mode 100644 index 0000000000000000000000000000000000000000..047797d3d13552749db2f3e8c5f683ce5ff9e478 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211114002839042.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211114003113627.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211114003113627.png new file mode 100644 index 0000000000000000000000000000000000000000..265654cbf620095e655980fc02c19632ac801ed8 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211114003113627.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211114004047393.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211114004047393.png new file mode 100644 index 0000000000000000000000000000000000000000..cf96328760db6ad4656041b80c28f08ef0ca88d9 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211114004047393.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211114004255316.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211114004255316.png new file mode 100644 index 0000000000000000000000000000000000000000..4f97bf2e96b4ef5e46dd6286c6cf1a7e4979aaff Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211114004255316.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211114004314410.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211114004314410.png new file mode 100644 index 0000000000000000000000000000000000000000..bb9d384f66348374df4ae58ba2151404a83eb751 Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211114004314410.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211114004408389.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211114004408389.png new file mode 100644 index 0000000000000000000000000000000000000000..8978909acf0e3c26865c9d9f5be34ddb7f3c7afd Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211114004408389.png differ diff --git a/content/zh/post/zhaoyanliang/typora-user-images/image-20211114004517198.png b/content/zh/post/zhaoyanliang/typora-user-images/image-20211114004517198.png new file mode 100644 index 0000000000000000000000000000000000000000..47fe1619611ad9e0b27df2fd54c9c6c5746bb5ab Binary files /dev/null and b/content/zh/post/zhaoyanliang/typora-user-images/image-20211114004517198.png differ diff --git a/content/zh/post/zhengtongyan/figures/DBMS_Architecture01.PNG b/content/zh/post/zhengtongyan/figures/DBMS_Architecture01.PNG new file mode 100644 index 0000000000000000000000000000000000000000..db5893ecec30cdd8a308949f9d717db8435bd82a Binary files /dev/null and b/content/zh/post/zhengtongyan/figures/DBMS_Architecture01.PNG differ diff --git a/content/zh/post/zhengtongyan/figures/DBMS_Architecture02.PNG b/content/zh/post/zhengtongyan/figures/DBMS_Architecture02.PNG new file mode 100644 index 0000000000000000000000000000000000000000..ef724bd8cc0c2fccd1c8090c7deaca5f0cf8a04d Binary files /dev/null and b/content/zh/post/zhengtongyan/figures/DBMS_Architecture02.PNG differ diff --git "a/content/zh/post/zhengtongyan/openGauss\345\222\214PostgreSQL\347\232\204\346\272\220\347\240\201\347\273\223\346\236\204\345\257\271\346\257\224.md" "b/content/zh/post/zhengtongyan/openGauss\345\222\214PostgreSQL\347\232\204\346\272\220\347\240\201\347\273\223\346\236\204\345\257\271\346\257\224.md" new file mode 100644 index 0000000000000000000000000000000000000000..14d8dcc56e58c5a6fc20dabd406435adc7d8d53d --- /dev/null +++ "b/content/zh/post/zhengtongyan/openGauss\345\222\214PostgreSQL\347\232\204\346\272\220\347\240\201\347\273\223\346\236\204\345\257\271\346\257\224.md" @@ -0,0 +1,423 @@ ++++ + +title = "openGauss和PostgreSQL的源码目录结构对比" +date = "2021-07-10" +tags = ["openGauss源码目录结构"] +archives = "2021-07" +author = "zhengtongyan" +summary = "openGauss和PostgreSQL的源码目录结构对比" +img = "/zh/post/zhengtongyan/titles/img.png" +times = "23:00" + ++++ + +# **openGauss和PostgreSQL的源码目录结构对比** + +> 前言:openGauss内核虽然源于PostgreSQL,但是华为在多个维度进行了深度的改进。本文从源目录的组织结构入手来研究openGauss,笔者在不断深入的研究中不禁惊叹于openGauss先进且合理的源码组织结构,这里面体现了华为对于数据库架构和技术的深刻理解,值得我们反复品味和学习! + +从源码入手是研究一款开源数据库的重要方法之一,对源代码的理解可以从宏观和微观两个层面入手。为了避免陷入局部代码之中,第一步我们应该抛开微观层面上具体的代码和实现细节,从宏观层面上的目录和组织结构入手,来窥探整个数据库的架构和实现逻辑,以及开发人员在实现层面的考量。对源代码的全局结构有了清晰的认识之后,我们便可以对查询优化、存储、事务、进程管理、内存管理等各个功能模块的代码进行深入的研究。 + +openGauss内核源于PostgreSQL 9.2.4版本,因此本文中我们通过对比的方式来探寻openGauss和PostgreSQL在源码目录和组织结构的异同。 + +## **1. GaussDB为什么选择PG?** + +首先我们需要弄清楚openGauss的产品定位,以及它和PostgreSQL的关系,这有助于我们理解openGauss的整个源码体系和结构。openGauss是华为于2020年6月开源的单机版GaussDB。华为决定自主研发GaussDB时为什么选择了PG,而不是其他的开源数据库如MySQL,我们或许可以从GaussDB的发展历程中寻找答案。 + +GaussDB并非是一个产品,而是一系列产品的统称,目前GaussDB产品线主要包括GaussDB T (OLTP)和GaussDB A (OLAP)。其中GaussDB T的前身是GaussDB 100,是华为自2007年开始在自研内存数据库基础上全面改造而来的一款分布式数据库,此前华为由于在电信计费领域的需求而自主研发了一款内存数据库。GaussDB A的前身是GaussDB 200,是华为自2011年开始基于PostgreSQL 9.2.4自主研发的一款具备多模分析及混合负载能力的大规模并行处理分布式数据库,支持行列混合存储以及线程化,支持高达2048节点的集群规模,提供PB(Petabyte)级数据分析能力、多模分析能力和实时处理能力。 + +openGauss内核虽然源于PostgreSQL,但华为在开发过程中结合企业级场景需求,通过C++语言(PostgreSQL是用C语言写的)对80+%的数据库内核代码进行了重构,修改和新增了70万行核心代码。着重在整体架构、数据库内核三大引擎 (优化器、执行引擎、存储引擎)、事务、以及鲲鹏芯片等方面做了大量的深度优化。 + +例如,通过引入向量化引擎和编译执行引擎等从多个维度重构了执行引擎,通过列存及自适应压缩等全新重构了存储引擎。除了数据库内核,在高可用、数据库安全和AI特性方面,openGauss数据库也做了极大的增强。PG11.3版本数据库中共有290个数据库参数,而openGauss目前有500多个数据库参数,每个参数对应一个数据库内核功能,所以可以看到华为对PG的内核做了非常大的改造和增强。 + +做数据库内核开发的技术难度很大,哪怕开发团队对内核架构与机制的制定上出现了丝毫的问题,上线后都极有可能会出现后果严重。有时一旦确定项目无法进行下去,甚至可能需要推倒重来。所以基于一款已经成熟的开源数据库进行自主研发就是一个很好的选择。那为什么选择PG而不是在互联网公司已经得到广泛使用的MySQL,可能是华为在调研分析后看中了PG各方面优秀的特性: + +- **代码质量高**:作为学院派的代表,PG的代码简洁、规范、结构清晰,非常适合从源码级进行二次研发。相比之下,修改MySQL的代码会困难很多。 +- **功能完善强大**:PG支持的数据类型丰富(多模能力),SQL语法完善(高级SQL特性),查询优化性能强。以JSON支持为例,PG从2012年的9.2版本就已经添加了对JSON数据类型的支持,相比之下Oracle从2014年发布12c才开始支持JSON,而MySQL直到2015年发布5.7.8版本才开始原生支持JSON。以join算法为例,PG几乎支持所有的多表连接算法;以SQL为例,PG支持大多数SQL语法,相比之下MySQL支持较弱;此外PG的查询优化处理能力,例如复杂子查询等都要强于MySQL。 +- **技术先进**:PG号称是世界最先进的开源数据库,其先进性不仅体现在基本的存储、事务、查询处理等方面,更多的是体现在其新技术上,比如JIT查询计划的即时编译和外部表技术等。 +- **扩展性强**:良好的扩展性使得PG非常适合进行二次开发,例如在PG基础架构之上引入MPP框架可以构建分布式数据仓库GreenPlum(MySQL基本不适合做数据仓库);在PG上引入OpenCypher可以构建具备图数据存储和查询能力的多模数据库AgensGraph;在PG架构上通过将数据自动按时间和空间分片可以构建时序数据库Timescale。 + +我觉得GaussDB发展的10年历程说明华为选择PG是一个十分正确的选择。目前PG的用户增长迅速,生态发展的也比MySQL要好,这说明越来越多的公司和开发者都意识到PG的确是一款优秀的开源数据库。其实在早年间,也有一些公司曾在MySQL上进行自主研发,比如阿里巴巴之前在MySQL社区版的基础上做了大量的性能与功能的优化改进,自主研发了AliSQL用于支撑淘宝双十一等业务,但相比PG来说,这样二次研发的成功案例要少很多。 + +至此我们理清了openGauss和PostgreSQL的联系,接下来我们一起通过对比二者源代码的组织结构,来窥探二者在数据库架构和实现方面的异同,这样对比学习的方式有助于同时加深我们的二者的认识。 + +## **2. 源代码目录结构对比** + +本文中我们进行对比的源代码版本分别是PostgreSQL 9.2.4 (发布于2013年4月4日,截至2020年7月9日PG已更新到14beat2版本)和openGauss 2.0.1 (截至2020年7月9日发布的最新版)。 + +进入PostgreSQL和openGauss的源码目录后,可以看到第一级目录下都有一个src目录,该目录就是数据库源代码目录。本文中我们重点关注src目录下的代码结构,因为src目录是整个数据库的核心代码。 + + +### **2.1 数据库管理系统的架构和主要组件** + +了解传统的关系数据库管理系统(RDBMS)的架构能帮助我们更好地理解源代码的各个模块和其组织结构,下图显示了一个RDBMS的架构和主要组件。 + +![](figures/DBMS_Architecture02.PNG) + +> 图片来源于经典论文: +Hellerstein, J. M., Stonebraker, M., & Hamilton, J. (2007). [**Architecture of a Database System.**](https://www.nowpublishers.com/article/Details/DBS-002) Foundations and Trends® in Databases, 1(2), 141-259. + +图中显示了一个RDBMS包含的5个主要的功能模块: + +- 客户端通信管理器(Client Communications Manager) +- 进程管理器(Process Manager) +- 关系查询处理器(Relational Query Processor) +- 事务和存储管理器(Transactional Storage Manager) +- 共享组件和工具(Shared Components and Utilities) + +考虑一个简单而典型的数据库查询应用实例-“查询某次航班的所有旅客名单”,这个操作所引发的的查询请求大致按如下方式进行处理: +1. 机场登机口的PC机(客户端)调用API与DBMS的客户端通信管理器(Client +Communications Manager)建立网络连接; +1. 在收到客户端的请求后,DBMS必须为之分配一个计算线程。系统必须确保该线程的数据以及控制输出是通过通信管理器与客户端连接的,这些工作由进程管理器(Process Manager)来管理。 +2. 分配控制进程之后,接下来便可以通过关系查询处理器(Relational Query Processor)来处理查询了。该模块会检查用户是否有查询权限,然后将用户的SQL语句编译为查询计划,并将查询计划交给查询执行器来执行。 +3. 在查询计划的底层,会有若干操作从数据库请求数据。这些操作通过事务和存储管理器(Transactional Storage Manager)读取数据并保证事务的“ACID”性质。此外还有一个缓冲管理器,用来控制内存缓冲区和磁盘之间的数据传输。 +4. 最后,查询处理器将数据库的数据组织成结果元组,结果元组生成后被放入客户通信管理器的缓冲区中,然后该通信管理器将结果发送给调用者。 + +上述例子我们没有提到共享组件和工具(Shared Components and Utilities), 但它们对于一个功能完整的DBMS是十分重要的,这些组件独立运行于任何查询,它们使数据库保持稳定性和整体性。比如目录管理器和内存管理器在传输数据时被作为工具来调用,在认证、解析以及查询优化过程中,查询处理器都会用到目录。同样,内存管理器也广泛应用于整个DBMS运行过程中的动态内存分配和释放。 + +### **2.2 src目录结构对比** + +``` +PostgreSQL-9.2.4\src +├─backend (后端代码,包括解析器、优化器、执行器、存储、命令、进程等) +├─bin (psql等命令的代码) +├─include (头文件) +├─interfaces (前端代码) +├─makefiles (平台相关的make的设置值) +├─pl (存储过程语言的代码) +├─port (平台移植相关的代码) +├─template (平台相关的设置值) +├─test (测试脚本) +├─timezone (时区相关代码) +├─tools (开发工具和文档) +└─tutorial (教程) +``` + +``` +openGauss-2.0.1\src +├─bin (gsql等命令的代码) +├─common (公共功能模块代码) +├─gausskernel (高斯内核代码) +├─include (头文件) +├─lib (库文件,包括) +├─makefiles (平台相关的make的设置值) +├─test (测试脚本) +└─tools (开发工具和文档) +``` + +与PostgreSQL相比,openGauss在src目录下的组织方式有以下变化: + +- 保留了bin、include、makefiles、test和tools这5个目录; +- 新建了**gausskernel**目录,用于存放整个**高斯内核**的代码,backend目录下的bootstrap、optimizer、executor、storage等模块被移动到gausskernel目录下; +- 新建了**common**目录,用于存放**公共功能模块**的代码,interfaces、pl、port、template、timezone和tutorial这6个目录的全部内容,以及backend目录的剩余内容(如libpq、nodes、parser等)被移动到common目录下。 + +接下来我们会对以上的变化进行详细的说明。 + +### **2.3 从backend到common和gausskernel的变化** + +由于PostgreSQL采用C/S(客户机/服务器)模式结构,客户端为前端(Frontend),服务器端为后端(Backend),所以PostgreSQL的backend目录是整个数据库服务的核心代码目录。 + +openGauss对PG的backend目录进行了功能上的细化分类,将optimizer、executor、storage等高斯内核的核心功能组件移动到新建的gausskernel目录下,其他一些公共功能模块则被移动到新建的common目录下。 + + +``` +PostgreSQL-9.2.4\src +├─backend (后端源码目录) +│ ├─access (各种数据的存储访问方法,如支持堆、索引等数据存取) +│ ├─bootstrap (支持Bootstrap运行模式,用来创建初始的模板数据库) +│ ├─catalog (系统目录) +│ ├─commands (执行非计划查询的SQL命令,如创建表命令等) +│ ├─executor (执行器,执行生成的查询计划) +│ ├─foreign (FDW:Foreign Data Wrapper处理) +│ ├─lib (共同函数) +│ ├─libpq (处理与客户端通信库函数,几乎所有的模块都依赖它) +│ ├─main (主程序模块,负责将控制权转到Postmaster进程或Postgres进程) +│ ├─nodes (定义系统内部用到的节点、链表等结构,以及处理这些结构的函数) +│ ├─optimizer (优化器,根据查询树创建最优的查询路径和查询计划) +│ ├─parser (解析器,将SQL查询转化为内部查询树) +│ ├─po +│ ├─port (平台兼容性处理相关的函数) +│ ├─postmaster (监听用户请求的进程,并控制Postgres进程的启动和终止) +│ ├─regex (正规表达式库及相关函数) +│ ├─replication (流复制) +│ ├─rewrite (查询重写) +│ ├─snowball (全文检索相关) +│ ├─storage (存储管理,包括内存、磁盘、缓存等管理) +│ ├─tcop (Postgres服务进程的主要处理部分,调用parser、optimizer、executor和commands中的函数来执行客户端提交的查询) +│ ├─tsearch (全文检索) +│ └─utils (各种支持函数,如错误报告、各种初始化操作等) +``` + +``` +openGauss-2.0.1\src +├─common (公共功能模块代码) +│ ├─backend +│ │ ├─catalog +│ │ ├─client_logic +│ │ ├─lib +│ │ ├─libpq +│ │ ├─nodes +│ │ ├─parser +│ │ ├─pgxc_single +│ │ ├─po +│ │ ├─port +│ │ ├─regex +│ │ ├─snowball +│ │ ├─tsearch +│ │ └─utils +│ ├─interfaces +│ ├─pgxc +│ ├─pl +│ ├─port +│ ├─template +│ ├─timezone +│ └─tutorial +``` + +``` +openGauss-2.0.1\src +├─gausskernel (高斯内核) +│ ├─bootstrap +│ ├─cbb +│ ├─dbmind (AI4DB和DB4AI功能模块) +│ ├─optimizer +│ ├─process (进程和线程管理模块) +│ ├─runtime (执行器模块) +│ ├─security +│ └─storage +``` + +#### **(1) gausskernel内核整体目录结构对比** + +openGauss对gausskernel内核部分代码进行了较大的变动,而内核又是数据库最核心最重要的部分,所以我们需要重点关注内核部分的源代码结构。PostgreSQL中的内核代码都在backend目录下,而openGauss的内核代码则主要在gausskernel目录下(从gausskernel的名称就可以看出来)。 + +openGauss之所以创建gausskernel目录,我想可能有以下几点原因: +1. 创建内核目录彰显了openGauss对于内核的重视,而不是像PG一样将所有的功能模块都放到backend目录下; +2. 突出华为在数据库内核方面所作的重大改进和优化工作; +3. 单独将内核部分代码单独提出来可以方便项目开发和后期代码维护。 + +gausskernel在代码目录的组织结构上主要有以下变化: + +1. 保持bootstrap、optimizer和storage这3个目录,但是这几个目录中所包含的内容发生了变化(后文会讲到); +2. 新增了cbb、dbmind和security这3个目录,其中dbmind目录包含了人工智能和数据库结合的最新研究成果; +3. 新建process目录,原来PG中的postmaster目录被移动到process目录下作为子目录之一,说明华为在进程和线程管理方面做了很多改进; +4. 新建runtime目录,原来PG中的executor目录被移动到runtime目录下作为子目录之一,说明华为在执行器方面做了很多增强,比如增加了向量化执行引擎。 + + +#### **(2) 公共组件common目录结构对比** + +openGauss将PG的backend目录的公共功能模块都统一移动到新建的common目录下,这样做的原因可能有两点: +1. openGuass认为这些模块是数据库系统共有的公共组件或者功能模块,比如PG中backend目录下的catalog、lib、libpq等模块; +2. openGuass基本都保留了这些模块的接口和公共函数代码,所以openGauss与现有的PG生态兼容性较好。openGauss仅对这些代码做了适当优化,所以单独创建common目录可以和gausskernel这样修改较大的模块区分开来。 + +注意openGauss也有backend目录,但是该目录只保留了一些公用的功能模块,并且被移动到了common目录下。 + +#### **(3) optimizer目录的变化** + +``` +PostgreSQL-9.2.4\src +├─backend +│ ├─commands +│ ├─optimizer +│ │ ├─geqo (遗传算法查询优化) +│ │ ├─path (使用parser的输出创建查询路径) +│ │ ├─plan (优化path输出生成查询计划) +│ │ ├─prep (处理特殊的查询计划) +│ │ └─util (优化器支持函数) +│ ├─rewrite +``` + +``` +openGauss-2.0.1\src +├─gausskernel (高斯内核) +│ ├─optimizer +│ │ ├─commands +│ │ ├─geqo +│ │ ├─path +│ │ ├─plan +│ │ ├─prep +│ │ ├─rewrite +│ │ └─util +``` + +openGuass在优化器目录中的变化主要是将PG中和optimzier同一目录级别的commands和rewrite移动到optimzier目录下,这说明openGauss将命令模块和查询重写模块归为优化器的一部分。 + +#### **(4) 从postmaster到process的变化** + +在架构层面PostgreSQL是多进程架构,为了提高并发度,openGauss将其进一步优化成了多线程架构,openGauss属于单进程多线程模型的数据库。 + +``` +PostgreSQL-9.2.4\src +├─backend +│ ├─postmaster +│ ├─tcop +``` + +``` +openGauss-2.0.1\src +├─gausskernel +│ ├─process +│ │ ├─datasource +│ │ ├─globalplancache +│ │ ├─job +│ │ ├─main +│ │ ├─postmaster +│ │ ├─stream +│ │ ├─tcop +│ │ └─threadpool (线程池) +``` + +从上面的对比可以看出,openGauss在gausskernel目录下新建了process目录,将PG的postmaster和tcop目录移动到process目录下,并且增加了很多的其他的功能模块,比如线程池threadpool模块等。 + + +#### **(5) 从executor到runtime的变化** + +``` +PostgreSQL-9.2.4\src +├─backend +│ ├─executor +``` + +``` +openGauss-2.0.1\src +├─gausskernel +│ ├─runtime +│ │ ├─codegen (代码生成) +│ │ │ ├─codegenutil +│ │ │ ├─executor +│ │ │ ├─llvmir (LLVM动态编译) +│ │ │ └─vecexecutor +│ │ ├─executor +│ │ └─vecexecutor (向量化执行引擎) +│ │ ├─vecnode +│ │ ├─vecprimitive +│ │ └─vectorsonic +``` + +从上面的对比可以看出,openGauss在gausskernel目录下新建了runtime目录,将PG的executor目录移动到runtime目录下,并且增加了codegen和vecexecutor两个目录。codegen目录中用到了业界流行的开源编译框架LLVM,用于生成高性能的代码来进一步提升性能;vecexecutor目录则包含了向量化执行引擎的相关代码,用于提升SQL引擎的计算性能。 + +代码生成和向量化执行是当前学术界和工业界用于提升SQL计算引擎性能的两种有效方法,而这两种方法在openGauss中都已经实现了。 + +#### **(6) access目录的变化** + +openGauss将从backend目录下的access目录移动到gausskernel/storag目录下,这是因为对数据的访问是和数据库的存储结构密切相关的。数据一般存储在磁盘上的,所以数据在磁盘上组织形式决定了访问数据的效率,比如是堆文件还是顺序文件,以及读取时是顺序读取还是通过索引来读取。 + +``` +PostgreSQL-9.2.4\src +├─backend +│ ├─access +│ │ ├─common (公共存取函数) +│ │ ├─gin +│ │ ├─gist (可自定义的存取方法) +│ │ ├─hash (哈希用于存取表) +│ │ ├─heap (堆用于存取表) +│ │ ├─index (索引存取表) +│ │ ├─nbtree (Lehman and Yao的btree管理算法) +│ │ ├─spgist +│ │ └─transam (事务管理器) +``` + +``` +openGauss-2.0.1\src +├─gausskernel +│ └─storage +│ ├─access +│ │ ├─cbtree +│ │ ├─common +│ │ ├─dfs +│ │ ├─gin +│ │ ├─gist +│ │ ├─hash +│ │ ├─hbstore +│ │ ├─heap +│ │ ├─index +│ │ ├─nbtree +│ │ ├─obs +│ │ ├─psort +│ │ ├─redo +│ │ ├─rmgrdesc +│ │ ├─spgist +│ │ ├─table +│ │ └─transam +``` + +#### **(7) storage目录的变化** + +``` +PostgreSQL-9.2.4\src +├─backend +│ ├─storage +│ │ ├─buffer (行存储共享缓冲区模块) +│ │ ├─file (文件操作和虚拟文件描述符模块) +│ │ ├─freespace (行存储空闲空间模块) +│ │ ├─ipc (进程间通信模块) +│ │ ├─large_object (大对象模块) +│ │ ├─lmgr (锁管理模块) +│ │ ├─page (页面模块) +│ │ └─smgr (存储介质管理模块) +``` + +``` +openGauss-2.0.1\src +├─gausskernel +│ └─storage +│ ├─access +│ ├─buffer +│ ├─bulkload (外表批量导入模块) +│ ├─cmgr (列存储只读共享缓冲区模块) +│ ├─cstore (列存储访存模块) +│ ├─dfs (外表服务器连接模块) +│ ├─file +│ ├─freespace +│ ├─ipc +│ ├─large_object +│ ├─lmgr +│ ├─mot (内存引擎模块) +│ ├─page +│ ├─remote (备机页面修复模块) +│ ├─replication +│ └─smgr +``` + +从上面的对比可以看出,openGauss在storage目录的变化主要包括: + +- 新增了列存储相关的功能模块如cmgr和cstore,这是openGauss相比PG的一大增强,通过增加列存储使得openGauss能适用于更多的场景; +- 新增了mot模块,mot模块是openGauss引入的MOT(Memory-Optimized Table)存储引擎,是openGauss数据库最先进的生产级特性,它针对多核和大内存服务器进行了优化,能为事务性工作负载提供更高的性能; +- 新增了外表功能的相关模块,如dfs和bulkload等; +- 新增了备机页面修复模块remote; +- 将replication模块从backend目录移动到storage目录下; +- 保留了buffer、file、freespace、ipc、large_object、lmgr、page和smgr等8个模块。 + +#### **(8) security目录:数据安全的保障** + +``` +openGauss-2.0.1\src +├─gausskernel +│ ├─security +│ │ ├─gs_policy +│ │ ├─iprange +│ │ └─keymanagement +``` +openGauss在gausskernel目录下新建了security目录,用于存放数据库安全的相关功能模块的代码,比如安全认证、角色管理、审计与追踪以及数据加密等模块的源代码。 + + +#### **(9) dbmind目录:数据库的AI大脑** + +AI与数据库结合是近年的研究热点,据我所知,即使最新版的PostgreSQL和MySQL目前仍然不具备这样的功能,可以说openGauss在这个领域走在了业界前列。AI与数据库结合的相关源代码都在dbmind目录下。值得注意的是dbmind位于gausskernel下说明华为是将数据库的AI能力作为未来数据库内核的一种基础能力来进行构建的。 + +``` +openGauss-2.0.1\src +├─gausskernel +│ ├─dbmind (AI4DB和DB4AI模块) +│ │ ├─deepsql (DB4AI: 库内AI算法) +│ │ │ └─madlib_modules (开源的MADlib机器学习框架) +│ │ └─tools (AI4DB工具集) +│ │ ├─anomaly_detection (数据库指标采集、预测与异常监控) +│ │ ├─index_advisor (索引推荐) +│ │ ├─predictor (AI查询时间预测) +│ │ ├─sqldiag (慢SQL诊断发现) +│ │ └─xtuner (参数调优与诊断) +``` + +AI和数据库结合一般可分为AI4DB与DB4AI两个方向: +- **AI4DB**指利用AI技术来优化数据库的性能或者增强运维管理的能力,主要包括基于AI的自调优、自诊断、自安全、自运维、自愈等。openGauss目前在dbmind/tools目录下已经提供了5个功能模块。 +- **DB4AI**指打通数据库到人工智能应用的端到端流程,达到高性能和节约成本等目的。目前主要手段是将常用的机器学习算法封装为SQL语句,从而可以直接在SQL语句中调用机器学习算法,来充分发挥openGauss数据库高并行、列存储等优势。deepsql目录实现了库内AI算法,目前已经支持60多个常用算法,主要通过开源的MADlib机器学习框架来实现。 \ No newline at end of file diff --git a/content/zh/post/zhengtongyan/titles/img.png b/content/zh/post/zhengtongyan/titles/img.png new file mode 100644 index 0000000000000000000000000000000000000000..86a420b92fb8289658d807d49f137b6d13862f6d Binary files /dev/null and b/content/zh/post/zhengtongyan/titles/img.png differ diff --git "a/content/zh/post/zhengwen2/OpenGauss\347\264\242\345\274\225\350\257\246\350\247\243.md" "b/content/zh/post/zhengwen2/OpenGauss\347\264\242\345\274\225\350\257\246\350\247\243.md" new file mode 100644 index 0000000000000000000000000000000000000000..536acdf3504dfcfc765a058334d3cb8123089ed9 --- /dev/null +++ "b/content/zh/post/zhengwen2/OpenGauss\347\264\242\345\274\225\350\257\246\350\247\243.md" @@ -0,0 +1,206 @@ ++++ + +title = "openGauss索引详解" + +date = "2021-07-10" + +tags = [ "openGauss索引详解"] + +archives = "2021-07" + +author = "吴松" + +summary = "OpenGauss索引详解" + +img = "/zh/post/zhengwen2/img/img22.jpg" + +times = "12:30" + ++++ + +# openGauss索引详解 + + + + +

    本文主要介绍openGauss中常见的索引结构,索引相关元数据,并结合代码重点讲解B-tree索引使用过程中的重要流程,希望对大家理解openGauss中的索引有所帮助。

    +

    索引方法

    +

    B-Tree索引

    +

    B-tree索引适合比较查询和范围查询,当查询条件使用(>,=,<,>=,<=)时,可以使用B-tree索引。B-tree索引是PostgreSQL和OpenGauss的默认索引方式。
    +image.png

    +
    +

                                                                        图-1 B-tree索引结构

    +
    +

    B-tree索引页分为几种:meta-page、root-page、branch-page和leaf-page,如图-1所示。

    +

    meta-page: B-tree索引的元数据页,主要存储B-tree索引的元数据信息,可以通过meta page找到root page信息。

    +

    root-page:B-tree的根节点。

    +

    branch-page:内部节点,B-tree中根节点和叶子节点外的其他节点。

    +

    leaf-page:叶子节点,其中的ctid指向heap tuple,非叶子节点的ctid指向其子节点。

    +

    安装pageinspect后,可以通过

    +

    select * from bt_metap(‘tab_pkey’) 查看meta-page 信息

    +

    select * from bt_page_stats(‘tab_pkey’,1) 查看索引页信息

    +

    select * from bt_page_items(‘tab_pkey’,1) 查看页内tuple信息

    +

    index page 结构如图-2所示,

    +

    High-Key表示此page的右兄弟节点的最小值,由于page之间数据是有序的,当前page内所有key <= High-Key的值。对unique index而言,当前page内所有key < High-Key的值。

    +

    每一层的最右侧节点,由于没有右兄弟节点,因此page内没有High-Key。

    +

    Special Space为索引页特有,由于存储每个page左右两边page的页号,可通过Special Space找到左右page。
    +image.png

    +
    +

                                                                        图-2 B-tree索引页结构

    +
    +

    以上是行存引擎的B-tree索引结构,列存的B-tree索引整体结构上与行存相同。leaf-page上行存存储的是key到ctid的映射关系,行存可以直接ctid中的block number及offset找到heap tuple的位置。列存的ctid中记录的是(cu_id, offset),还需要再对应的CUDesc表中根据cu_id列的索引找到对应的CUDesc记录,打开对应的CU文件,根据offset找到数据。

    +

    列存上的B-tree索引不支持创建表达式索引、部分索引和唯一索引。

    +

    GiST索引

    +

    GiST(Generalized Search Tree)也是一棵平衡树,B-tree和比较语义强关联,适用于(>、>=、=、<=、<)这五个操作符。但现代数据库中存储的一些数据,如地理位置、图像数据等这五个操作符可能没有实际意义,GiST索引允许定义规则来将数据分布到平衡树中,并允许定义方法来访问数据。例如,GiST索引可以定义一棵存储空间数据的R-Tree,支持相对位置运算符(如 位于左侧、右侧、包含等)。

    +

    GiST屏蔽了数据库的内部工作机制,比如锁的机制和预写日志,使得实现新的GiST索引实例(或称作索引操作符类)的工作相对比较轻松。基于GiST架构的索引操作符类只需实现预定义的几个接口。

    +

    GIN索引

    +

    Generalized Inverted Tree倒排索引。主要用于多值类型,如数组、全文索引等。如果对应的TID的列表很小,可以和元素放在一个页面内(称为posting list)。如果TID列表很大,需要使用更高效的数据结构B-tree,这棵B-tree存储在单独的页面中(称为posting tree)。
    +image.png

    +
    +

                                                                        图-3 GIN索引结构

    +
    +

    行存表支持的索引类型:B-tree(缺省值)、GIN、GiST。列存表支持的索引类型:Psort(缺省值)、B-tree、GIN。

    +

    索引相关系统表

    +

    pg_am

    +

    PG_AM系统表存储有关索引访问方法的信息。系统支持的每种索引访问方法都有一行。表中各个字段的含义可以参考官方文档:https://opengauss.org/zh/docs/2.0.0/docs/Developerguide/PG_AM.html

    +

    pg_index

    +

    PG_INDEX系统表存储索引的一部分信息,其他的信息大多数在PG_CLASS中。

    +

    对于分区表的partition local index,除了在pg_index中有一行数据外,每个分区的索引信息存储在pg_partition中。
    +表中具体字段含义参考官方文档:
    +https://opengauss.org/zh/docs/2.0.0/docs/Developerguide/PG_INDEX.html
    +其中indisvalid、indisready、indcheckxmin等字段会在后续内容详细介绍。

    +

    除了上述两张表外,索引使用流程中涉及的相关的系统表还有很多,如 pg_class、pg_attribute、pg_depend、pg_constraint等不一一介绍了,大家参考官方文档。

    +

    索引使用流程

    +

    创建索引

    +

    创建索引入口函数

    +

    DefineIndex

    +
      +
    1. 创建索引相关参数检查及校验
    2. +
    3. 调用index_create 完成索引创建主要工作。所有索引创建都需要调用index_create,通过入参决定是不是需要构建索引结构。有一些流程,如create index concurrently,或者 分区表的partition local index,在这一步实际只是创建索引相关元数据,构建索引结构在后续流程完成。非分区表的index、分区表的global index构建索引结构在这一步完成。
    4. +
    5. 如果是创建分区表的partition local index ,遍历所有分区,逐个分区调用 partition_index_create 创建分区索引。
    6. +
    7. 如果是create index concurrently,执行create index concurrently的流程。此流程中表上加的锁类型是ShareUpdateExclusiveLock,不会阻塞对表的read及DML操作,普通建索引流程加的锁类型是ShareLock,会阻塞DML操作。分区表不允许create index concurrently。
    8. +
    +
    index_create
    +
      +
    1. 参数检查及校验
    2. +
    3. 创建 index tuple descriptor,tuple descriptor用于描述tuple的结构,index tuple descriptor中很多属性是从对应的表的tuple descriptor中拷贝过来的。最终relcache中索引的tuple descriptor很多信息来自这里创建的tuple descriptor。 ConstructTupleDescriptor
    4. +
    5. 为索引生成新的 OID。 GetNewRelFileNode
    6. +
    7. 将索引信息插入relcache中;在磁盘上创建索引文件,新建索引文件会记录WAL,新建索引时relfilenode设置为和OID相同;如果是concurrent create index或者创建分区表的partition local index,会跳过创建索引文件。heap_create
    8. +
    9. 插入 pg_class 、pg_attribute、pg_index、pg_constraint、pg_depend等系统表。
    10. +
    11. 执行构建索引流程,非分区表的index,及分区表的global index会在这一步真正构建索引结构。分区表的partition local index,会跳过这一步;如果是create index concurrently,跳过这一步。 index_build
    12. +
    13. 在pg_object中记录索引创建时间。
    14. +
    +
    index_build
    +

    执行构建索引,在调用index_build之前,索引相关元数据已经插入,空的索引文件已经创建。index_build根据pg_am中ambuild指定的创建索引的处理函数,执行构建索引的流程。

    +
      +
    1. 根据pg_am和索引类型找到构建索引对应的procedure,例如:btree索引的ambuild是btbuild、gin索引的ambuild是ginbuild。调用对应的处理函数。index_build_storage
    2. +
    3. 索引构建完成后,如果构建过程中不是hot safe的,需要将pg_index中索引的indcheckxmin设置为true。设置indcheckxmin的目的是告诉其他事务,本索引可能是unsafe的。对应的事务在生成执行计划的收,如果发现索引的indcheckxmin标记为true,则需要比较创建索引的事务和当前事务的先后顺序,决定是否能使用索引。
    4. +
    5. 更新 pg_class 中表和索引相关字段,如表中是否有索引的字段relhasindex设置为true,relallvisible 设置为true。
    6. +
    +
    btbuild
    +

    不同类型的索引,对应的建索引的处理函数不同。btbuild是B-tree索引对应的处理函数。

    +
      +
    1. 构建一个BTBuildState对象,用于btbuild。BTBuildState中包含两个BTSpool对象指针,用于将heap tuple加载到内存中,以及heap tuple的排序。BTSpool中包含一个Tuplesortstate 类型的指针,Tuplesortstate 中用于记录tuple sort过程中的状态,维护tuple sort所需的内存空间/磁盘空间。
    2. +
    3. 执行heap scan。如果是普通建索引,需要读取所有heap tuple(SNAPSHOT_ANY),然后判断heap tuple是否需要被索引。如果是create index concurrently 基于MVCC snapshot读取heap tuple(SNAPSHOT_MVCC),每个读取出来的heap tuple抽取出索引需要的列信息。 对于heap-only-tuple,index tuple中的tid指向hot-chain的root。IndexBuildHeapScan ? GlobalIndexBuildHeapScan
    4. +
    5. 对扫描出的heap tuple进行排序;基于排完序的index tuple,构建完整的B-tree索引。_bt_leafbuild
    6. +
    +
    _bt_leafbuid
    +
      +
    1. 对index tuple进行排序。tuplesort_performsort
    2. +
    3. 基于排完序的index tuple,构建完整的B-tree索引。_bt_load
    4. +
    +
    _bt_load
    +
      +
    1. 遍历所有排好序的index tuple,逐个调用_bt_buildadd加入到B-tree page中。B-tree从叶子节点开始构建,每一层从左向右构建。如果page写满了会触发下盘,同时创建同层右侧page;如果上层父page不存在,还会创建父page;如果已经存在父page,则将本page 的minkey 和 页号插入父节点。插入父节点的过程和插入子节点类似,可能触发父节点下盘等动作。index page会在special space记录左右两侧page的页号。每个page都会记WAL。
      +image.png
    2. +
    +
    +

                                                                        图-4 B-tree索引页构建

    +
    +
      +
    1. 由于构建B-tree的过程是自左向右、自底向上,触发page下盘是page写满时,所以所有index tuple遍历完后,每一层的最右侧page可能还没有下盘及加入父节点。因此所有index tuple遍历完成后,还需要对每一层的最右侧节点做一次处理。每一层的最右侧节点没有HK,所以最终所有的ItemPointer需要向左移动一个位置。 B-tree索引构建完成后,还需要构建meta-page,所有page都会写WAL,在流程结束前会主动调一次fsync,让WAL下盘。 _bt_uppershutdown
      +image.png
    2. +
    +
    +

                                                                        图-5 B-tree索引每层最右侧page结构

    +
    +
    partition_index_create
    +

    用于创建分区表的partition local index。创建分区表的partition local index时,先获取分区信息,然后遍历每一个分区执行partition_index_create。

    +
      +
    1. 为partition local index生成新的OID
    2. +
    3. 向partcache中插入索引相关信息,创建partition local index索引文件,记录WAL。 heapCreatePartition
    4. +
    5. 在pg_partition中插入partition local index相关信息。insertPartitionEntry
    6. +
    7. 执行索引构建。index_build
    8. +
    9. 更新pg_class中表和索引信息。
    10. +
    +
    create index concurrently
    +

    用于在不阻塞DML操作的情况下创建索引。

    +

    Phase 1

    +
      +
    1. 开启事务 tx1
    2. +
    3. 插入relcache,插入索引相关元数据pg_class… ,和普通建索引相同,只是其中 pg_index 的 indisvalid、indisready 设置为false
    4. +
    5. 在表上 加一个 session-level ShareUpdateExclusiveLock,加锁目的是防止在建索引的流程中表和索引元数据被其他流程修改
    6. +
    7. 提交事务 tx1。tx1 提交后,新开启的事务将会看到索引信息,索引状态为不可读(indisvalid = false)、不可写(indisready = false),看到索引元数据的事务在插入数据时会考虑HOT-safe。
      +Phase 2
    8. +
    9. 开启事务 tx2
    10. +
    11. 等待当前在执行的DML事务结束。具体实现是:找出当前所有持有的锁与ShareLock冲突的事务ID,等待这些事务提交或者Abort 。这一步等待的目的是什么? 举例:表有两列{id, name},数据如图-6所示,在id字段建索引。在Phase 1结束前开始的事务tx,无法看到索引元数据,所以在更新数据时做HOT update;Case1:由于流程中没有等待事务结束,建索引流程扫描heap tuple时,对应的heap tuple 为{id:3,name: ‘dd’},index中对应的key是3,tx在索引扫描完后更新{id:3,name: ‘dd’} 这行数据为 {id:4,name: ‘dd’},因此索引中的数据实际是错误的。普通建索引流程,因为阻塞DML操作,因此不会出现该问题。 Case2: 如果 tx是一个在Phase 1之后开启的事务,由于索引元数据可见,update操作发现对应的列上有索引,在更新数据时不会知道这不是一个HOT update,此时因为建索引和update的执行顺序,也会出现索引数据遗漏,索引数据如图-7所示。 由于现在索引本身还是一个中间状态,对读写操作都不可见,所以这里数据有偏差不是什么大问题,只需要最终索引数据正确即可。Case2索引数据出现的遗漏,会在Phase 3中补全;而Case1出现的错误不会被修复,因为一条hot-chain上的所有tuple只会有一个index entry。
      +image.png
    12. +
    +
    +

                                                                        图-6 不等待Phase 1之前的DML结束导致的索引数据错误

    +
    +

    image.png

    +
    +

                                                                        图-7 不等待Phase 1之后的DML结束导致的索引数据遗漏

    +
    +
      +
    1. 获取快照 snapshot1
    2. +
    3. 扫描表中的所有可见元组,构建索引
    4. +
    5. 设置索引的 indisready 为 true(索引对写操作可见)
    6. +
    7. 提交 tx2。tx2提交后新开启的事务更新数据时,会同时更新索引。
      +Phase 3
    8. +
    9. 开启事务 tx3
    10. +
    11. 等待当前在执行的DML事务结束。这里时为了等待Phase 2结束前开始的事务,这些事务看不到索引indisready = true,在更新数据时没有更新索引。
    12. +
    13. 获取快照 snapshot2
    14. +
    15. 为Phase2开始后没有更新索引的DML操作执行索引更新。 validate_index
    16. +
    17. 记录 snapshot2’s 中的xmin
    18. +
    19. 提交事务 tx3
      +Phase 4
    20. +
    21. 开启事务 tx4
    22. +
    23. 等待Phase 3之前开启的事务结束,这些事务可能持有一个比较老的snapshot,如果不等待这些事务结束就将索引的indisvalid 设置为true,这些事务可能出现读不一致的情况。如图-8所示,事务 txA 在Phase 3之前开启,读取数据r1,紧接着 txB delete r1;Phase 3中tx3 执行建索引时,由于对应的数据删除了,因此索引中没有r1的记录,tx3提交后索引的indisvalid设置为true,索引读可见,t’xA第二次读数据时使用索引,发现没有对应的数据,出现数据读一致的情况。为防止这种情况,需要在把索引的indisvalid设置为true之前,等待这些事务结束。
      +image.png
    24. +
    +
    +

                                                                        图-8 等待读事务结束

    +
    +
      +
    1. 将索引的indisvalid 设置为true
    2. +
    3. 提交 tx4
    4. +
    +

    删除索引

    +

    和创建索引类似,删除索引也有concurrent和非concurrent两种方式,对应的加锁类型分别是ShareUpdateExclusiveLock和AccessExclusiveLock。

    +

    index_drop

    +
    concurrently
    +
      +
    1. 开启事务 tx1
    2. +
    3. 索引indisvalid设置为false,记WAL。index_set_state_flags(indexId, INDEX_DROP_CLEAR_VALID)
    4. +
    5. 表的relcache失效,表和索引上加会话级别的ShareUpdateExclusiveLock,防止流程执行期间,其他流程修改元数据,例如 drop table
    6. +
    7. 提交事务 tx1。tx1提交后,新的事务查询不会使用该索引。
    8. +
    9. 开启事务tx2
    10. +
    11. 等待所有的事务结束,有一些事务在tx1提交前已经开启,要确保没有事务查询使用该索引,需要等这些事务结束。
    12. +
    13. 设置索引的indisready为false,indisvalid为true ? 有疑问,表的relcache失效
    14. +
    15. 提交事务tx2
    16. +
    17. 开启事务tx3
    18. +
    19. 等待所有的事务结束,有一些事务在tx2提交前已经开启,要确保没有事务更新该索引,需要等这些事务结束。
    20. +
    21. 表加ShareUpdateExclusiveLock,索引上加AccessExclusiveLock,为删除索引文件做准备
    22. +
    23. 删除索引文件
    24. +
    25. 删除pg_index中索引数据,删除pg_class、pg_attribute中索引相关数据,刷新缓存
    26. +
    27. 释放会话级ShareUpdateExclusiveLock
    28. +
    +

    非concurrent删除索引流程上更简单一些,在表和索引上加AccessExclusiveLock,删除索引文件和相关元数据,刷新缓存。
    +限于篇幅,索引相关其他内容,如重建索引,索引插入,索引的读写并发等内容下次再补充。

    +
    + + + + diff --git "a/content/zh/post/zhengwen2/Windows\345\256\211\350\243\205\345\215\216\344\270\272openGauss\346\225\260\346\215\256\345\272\223\342\200\224\342\200\224openGauss\345\237\272\344\272\216x86\346\236\266\346\236\204openEuler\350\231\232\346\213\237\346\234\272\347\232\204\347\274\226\350\257\221\344\273\245\345\217\212JDBC\347\232\204\350\277\236\346\216\245.md" "b/content/zh/post/zhengwen2/Windows\345\256\211\350\243\205\345\215\216\344\270\272openGauss\346\225\260\346\215\256\345\272\223\342\200\224\342\200\224openGauss\345\237\272\344\272\216x86\346\236\266\346\236\204openEuler\350\231\232\346\213\237\346\234\272\347\232\204\347\274\226\350\257\221\344\273\245\345\217\212JDBC\347\232\204\350\277\236\346\216\245.md" new file mode 100644 index 0000000000000000000000000000000000000000..94cb590c82f1f303c390cea1e9371665a7f10796 --- /dev/null +++ "b/content/zh/post/zhengwen2/Windows\345\256\211\350\243\205\345\215\216\344\270\272openGauss\346\225\260\346\215\256\345\272\223\342\200\224\342\200\224openGauss\345\237\272\344\272\216x86\346\236\266\346\236\204openEuler\350\231\232\346\213\237\346\234\272\347\232\204\347\274\226\350\257\221\344\273\245\345\217\212JDBC\347\232\204\350\277\236\346\216\245.md" @@ -0,0 +1,387 @@ ++++ + +title = "Windows安装华为openGauss数据库——openGauss基于x86架构openEuler虚拟机的编译以及JDBC的连接" + +date = "2021-07-09" + +tags = ["Windows安装华为openGauss数据库——openGauss基于x86架构openEuler虚拟机的编译以及JDBC的连接"] + +archives = "2021-07" + +author = "安徽大学计算机科学与技术学院 何迪亚,顾宇轩" + +summary = "Windows安装华为openGauss数据库——openGauss基于x86架构openEuler虚拟机的编译以及JDBC的连接" + +img = "/zh/post/zhengwen2/img/img29.png" + +times = "12:30" + ++++ + +# Windows安装华为openGauss数据库——openGauss基于x86架构openEuler虚拟机的编译以及JDBC的连接 + +## 1、Hype-V虚拟机安装openEuler + +虚拟机平台有很多,像`vmware`、`Hype-V`、`VirtualBox`等等,考虑到与wsl2的兼容,这里选用Hype-V来安装x86架构的`openEuler` + +1. 开启 `hype-v`虚拟机 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707111105364.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707111117739.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707111127315.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +2. 下载x86架构openEuler镜像 +打开openEuler官网,我们这里打算使用openEuler-20.03-LTS长期支持版 +依次打开`openEuler-20.03-LTS-ISO-x86-64`,选择`openEuler-20.03-LTS-x86_64-dvd.iso)`进行下载 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707111806805.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +3. 打开`Hyper-V`,这里配置镜像流程不再累赘 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707111905895.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +4. 配置完成后,就可以进入安装系统的页面了这里选择`Install openEuler`就好 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707112103893.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +5. 这里进入到我们非常熟悉的类似与centOS的安装页面,按照步骤安装就行 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707112146184.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +6. 这里我们打算直接用root用户登录,设置一下root密码就行,不需要创建用户了,这里等待安装好就行,安装完成后会提示你重启,这里先关键,拔掉镜像(`DVD驱动设置为无`,不让会进入安装页面)后启动 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707112309231.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707112346173.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +7. OK这里启动成功,输入一下root账号和密码,便可以成功进入openEuler页面了,这里没有图像画页面只有命令行(精简的openEuler系统应该大部分人都喜欢吧,实在不行可以安装图像画页面但是不建议) +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707112421827.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +## 2、openEuler虚拟系统的配置(支持图形化) + +1. 首先添加一下软件源,官方提供的openEuler镜像(20.03版本)是没有自带软件源的,这里yum -install是没有任何东西的 +step1:cd /etc/yum.repos.d/ +step2:sudo vi openEuler_x86_64.repo +step3:在最下方添加如下代码 +``` +[base] +name=base +baseurl=https://repo.openeuler.org/openEuler-20.03-LTS/OS/x86_64/ +enabled=1 +gpgcheck=0 +``` + +step4:退出vim,逐行键入如下命令 +```powershell +yum repolist all +sudo yum-config-manager --enable base +``` +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707112646396.png#pic_center) +如果你的openEuler版本或平台不同,做相应的更改即可。至此yum应该已经可以正常使用了。 + +2. 利用`windows powershell`连接`hype-v`,并关闭防火墙,键入如下命令 + +```powershell +yum install -y net-tools +yum install -y vim +``` + +然后查看一下ip,`ifconfig` +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707112931508.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +我们可以打开Windows powershell,操作如下 +命令ssh root@(ifconfig网卡的地址)输入一下密码就行了,这里也可以配置密钥进行免密连接,具体不再说了,这里也可以用xshell之类的工具进行连接,不连接都行,怎么方便怎么来 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707113017647.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +接下来我们关闭虚拟机防火墙,为了后续端口的开放方便,但是在服务器上不建议这么做 + +``` +systemctl stop firewalld.service +systemctl disable firewalld.service +``` + +如下 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707113056958.png#pic_center) +这里也可以开启openGauss的图形化页面,具体操作如下,这里没什么作用,但是对于不熟悉命令行的小伙伴应该更友好 + +``` +yum install ukui + +yum groupinstall fonts -y + +systemctl set-default graphical.target + +reboot +``` +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707113141499.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +3. 接下来我们准备openGauss的安装依赖,键入如下命令 + +``` +yum install libaio-devel ncurses-devel pam-devel libffi-devel libtool readline-devel +zlib-devel python3-devel autoconf flex gcc gcc-c++ patch byacc bison -y +``` +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707113243812.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +4. 修改一下python的版本, +首先看一下python的版本 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707113659687.png#pic_center) +请将Python默认版本指向Python 3.x,具体操作如下: + +``` +rm -rf /usr/bin/python +ln -s /usr/bin/python3.7 /usr/bin/python +``` + +接下来再看一下python的版本,如下 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707113811356.png#pic_center) +5. 设置字符集及环境变量 +依次输入如下命令 + +``` +cat >>/etc/profile<>/etc/profile<>/etc/profile</proc/sys/vm/drop_caches + +第一条语句是建议系统不要使用swap, + +第二条语句是让系统清理cache,以便释放更多内存。但第一条并不能够绝对阻止swap(因为只是建议)。 +这里我们采用最粗暴简单的方法,关闭swap交换内存 +``` + +``` + [root@db1 ~]# swapoff -a +``` + +7. 接下来我们调整系统参数 + +``` +[root@db1 ~]# vi /etc/profile.d/performance.sh +``` + +如图,按i进入编辑模式,用#注释掉`sysctl -w vm.min_free_kbytes=112640 &> /dev/null`,按`Esc`后:`wq`保存退出。 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707114252352.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +8. 接下来下载源到/etc/yum.repos.d/openEuler_x86_64.repo +输入命令 + +``` +curl -o /etc/yum.repos.d/openEuler_x86_64.repo https://mirrors.huaweicloud.com/repository/conf/openeuler_x86_64.repo +``` +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707114342639.png#pic_center) +再输入 + +``` +cat /etc/yum.repos.d/openEuler_x86_64.repo +``` + +若出现以下结果,则正确 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210707114616151.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +然后安装依赖 + +``` +yum install libaio* -y +yum install libnsl* -y +``` +

    3、openEuler虚拟机安装openGauss

    +

    这里使用opengauss:1.0.1版本进行配置安装,下载地址如下,可以下载完成传到openEuler,也可以直接通过wget下载

    +

    https://gitee.com/opengauss/openGauss-server/repository/archive/v1.0.1

    +

    https://gitee.com/opengauss/openGauss-server/repository/archive/v1.0.1

    +

    还需要下载gcc-8.2.0,低版本opengauss不支持gcc-7*

    +

    http://mirror.koddos.net/gcc/releases/gcc-8.2.0/gcc-8.2.0.tar.gz

    +

    下载完成后,全部放入/root/目录下,即cd /root/如下
    +image.png
    +安装依赖

    +
    yum install zlib-devel python3-devel autoconf flex gcc gcc-c++ patch byacc bison -y +
    +

    gcc拷贝到/root/openGauss-third_party/buildtools/gcc/

    +
    cp gcc-releases-gcc-8.2.0.tar.gz /root/openGauss-third_party/buildtools/gcc/ +
    +

    当前openGauss官方支持ARM架构的openEuler,这里支持x86架构的openEuler需要修改get_PlatForm_str.sh文件。

    +
    cd /root/openGauss-third_party/build/ +vi get_PlatForm_str.sh +
    +

    添加这样一行

    +
    elif [ "$os_name"x = "openEuler"x -a "$cpu_arc"x = "x86_64"x ]; then + os_str=openeuler_x86_64 +
    +

    image.png
    +保存退出

    +

    开始编译第三方软件

    +
    + +``` +sh build_all.sh + +若报错:You should download gcc-8.2.0.tar.gz or gcc-8.2.0.zip and put it in /root/openGauss-third_party/build/../buildtools/gcc/如下解决 + +cd /root/openGauss-third_party/buildtools/ +mv gcc-releases-gcc-8.2.0.tar.gz gcc-8.2.0.tar.gz +cd /root/openGauss-third_party/build/ +再重新执行命令 +``` + +
    +

    用户执行以上命令之后,可以自动生成数据库编译所需的开源第三方软件,如果想单独的生成某个开源三方软件,可以进入对应的目录,执行build.sh脚本,如/root/openGauss-third_party/dependency/。最终编译构建出的结果会存放在openGauss-third_party同级的binarylibs目录。这些文件会在后面编译openGauss-server时用到。

    +

    PS:这一步需要好几长时间,我已经哭晕在厕所

    +

    编译好后如图所示image.png
    +接下来我们设置环境变量

    +
    + +``` +cd/root/ +vi bashrc +``` + +
    +

    在最底下加上下面这些,千万别错,一失足成千古恨

    + +``` +export CODE_BASE=/root/openGauss-server # Path of the openGauss-server file +export BINARYLIBS=$CODE_BASE/../binarylibs # Path of the binarylibs file +export GAUSSHOME=/opt/opengauss/ +export GCC_PATH=$BINARYLIBS/buildtools/openeuler_x86_64/gcc8.2 +export CC=$GCC_PATH/gcc/bin/gcc +export CXX=$GCC_PATH/gcc/bin/g++ +export LD_LIBRARY_PATH=$GAUSSHOME/lib:$GCC_PATH/gcc/lib64:$GCC_PATH/isl/lib:$GCC_PATH/mpc/lib/:$GCC_PATH/mpfr/lib/:$GCC_PATH/gmp/lib/:$LD_LIBRARY_PATH +export PATH=$GAUSSHOME/bin:$GCC_PATH/gcc/bin:$PATH +``` + +
    +

    最后再更新一下环境变量

    +
    source bashrc +
    +

    设置Makefile

    +

    当前openGauss官方支持ARM架构的openEuler,这里支持x86架构的openEuler需要修改Makefile文件。

    +
    cd openGauss-server +vi ./src/gausskernel/Makefile +
    +

    修改内容如下图所示,将绿色部分插入如下内容

    +
    else ifeq( $(PLAT_FORM_STR), openeuler_x86_64) + cp '$(LIBCURL_LIB PATH)/libcurl.so.4.6.0' '$(DESTDIR)$(libdir)/libcurl.so.4.6.0' +
    +

    image.png
    +选择Release版本进行配置

    +
    ./configure --gcc-version=8.2.0 CC=g++ CFLAGS="-O2 -g3" --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-thread-safety --enable-thread-safety +
    +

    开始编译

    +
    make -j +
    +

    image.png

    +

    看到上述截图中的结果表明编译成功。

    +

    最后一步make install
    +image.png
    +软件安装路径为:$GAUSSHOME

    +

    二进制放置路径为:$GAUSSHOME/bin

    +

    这样便在x86的openEuler虚拟机中可以使用openGauss了

    +

    启动openGauss服务直接使用

    +
    gs_om -t start +
    +

    连接数据库使用

    +
    sudo gsql +
    +

    连接进入数据库,可以修改数据库的端口号,用户和密码

    +

    4、JDBC的使用编写与连接

    +
      +
    1. 下载JDK。
    2. +
    +

    https://www.oracle.com/java/technologies/javase/javase-jdk8-downloads.html

    +

    这里安装JDK-8的开源版本尚可

    +
      +
    • 配置jdk环境变量
    • +
    +

    右击“此电脑”选择“属性”,点击“高级系统设置”。
    +image.png
    +点击“环境变量”,新建系统变量“JAVA_HOME”,输入JDK安装目录。
    +image.png
    +点击“环境变量”,新建系统变量“JAVA_HOME”,输入JDK安装目录
    +image.png
    +变量值填入jdk安装目录
    +image.png
    +编辑系统变量“path”。
    +image.png
    +在变量值最后输入 %JAVA_HOME%\bin;%JAVA_HOME%\jre\bin;
    +新建系统变量“CLASSPATH”变量,输入“.”即可。
    +image.png
    +然后在打开windows powershell输入java --version,如果输出如下,则安装成功
    +image.png

    +

    2.jdbc连接的编写

    +这里使用`idea`工具插入数据库和连接数据库不再累赘,看jdbc的主要代码 + +``` +import java.sql.*; + +public class GaussDBMySQLDemo { + + static final String JDBC_DRIVER = "org.postgresql.Driver"; + static final String DB_URL = "jdbc:postgresql://你的虚拟机IP地址:你的数据库占用端口号/要连接的数据库"; + + // 数据库的用户名与密码,需要根据自己的设置 + static final String USER = "root"; + static final String PASS = "123456"; + + public static void main(String[] args) { + Connection conn = null; + Statement stmt = null; + try{ + // 注册 JDBC 驱动 + Class.forName(JDBC_DRIVER); + + // 打开链接 + System.out.println("connecting..."); + conn = DriverManager.getConnection(DB_URL,USER,PASS); + + //实例化对象 + stmt = conn.createStatement(); + + // 执行查询 + String sql; + sql = "SELECT id, name, url FROM websites"; + ResultSet rs = stmt.executeQuery(sql); + + // 创建表 + sql = "CREATE TABLE COMPANY1 " + + "(ID INT PRIMARY KEY NOT NULL," + + " NAME TEXT NOT NULL, " + + " AGE INT NOT NULL, " + + " ADDRESS CHAR(50), " + + " SALARY REAL)"; + rs = stmt.executeQuery(sql); + 关闭脚本文件 + stmt.close(); +// 结束连接 + c.close(); + } catch ( Exception e ) { + System.err.println( e.getClass().getName()+": "+ e.getMessage() ); + System.exit(0); + } + System.out.println("Table created successfully"); + } +} +``` + diff --git a/content/zh/post/zhengwen2/img/img20.png b/content/zh/post/zhengwen2/img/img20.png new file mode 100644 index 0000000000000000000000000000000000000000..ce35c3cd313c8e4ed939ae18b91b9a64767ab504 Binary files /dev/null and b/content/zh/post/zhengwen2/img/img20.png differ diff --git a/content/zh/post/zhengwen2/img/img21.png b/content/zh/post/zhengwen2/img/img21.png new file mode 100644 index 0000000000000000000000000000000000000000..1da9e55bd25cbc7cfc6fdef1800b4c95b077829b Binary files /dev/null and b/content/zh/post/zhengwen2/img/img21.png differ diff --git a/content/zh/post/zhengwen2/img/img22.jpg b/content/zh/post/zhengwen2/img/img22.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ebe22cb03c6ee1e735b29bce766c1e10d334f0c Binary files /dev/null and b/content/zh/post/zhengwen2/img/img22.jpg differ diff --git a/content/zh/post/zhengwen2/img/img24.png b/content/zh/post/zhengwen2/img/img24.png new file mode 100644 index 0000000000000000000000000000000000000000..2af578504062e5fa7a7aaf7e1c2014531e51e9c2 Binary files /dev/null and b/content/zh/post/zhengwen2/img/img24.png differ diff --git a/content/zh/post/zhengwen2/img/img25.png b/content/zh/post/zhengwen2/img/img25.png new file mode 100644 index 0000000000000000000000000000000000000000..b71bb7d740d0f375bbea6116ffde9175c0dbcacf Binary files /dev/null and b/content/zh/post/zhengwen2/img/img25.png differ diff --git a/content/zh/post/zhengwen2/img/img28.png b/content/zh/post/zhengwen2/img/img28.png new file mode 100644 index 0000000000000000000000000000000000000000..31e776c19ddc9b62b4b88171d015b1b94ff2b022 Binary files /dev/null and b/content/zh/post/zhengwen2/img/img28.png differ diff --git a/content/zh/post/zhengwen2/img/img29.png b/content/zh/post/zhengwen2/img/img29.png new file mode 100644 index 0000000000000000000000000000000000000000..5537c95b900978a3020269be7ec52ce914224844 Binary files /dev/null and b/content/zh/post/zhengwen2/img/img29.png differ diff --git a/content/zh/post/zhengwen2/img/img33.png b/content/zh/post/zhengwen2/img/img33.png new file mode 100644 index 0000000000000000000000000000000000000000..b903c7f8d5a3ba8b66b2d6be883a4bac7230915e Binary files /dev/null and b/content/zh/post/zhengwen2/img/img33.png differ diff --git a/content/zh/post/zhengwen2/img/img38.png b/content/zh/post/zhengwen2/img/img38.png new file mode 100644 index 0000000000000000000000000000000000000000..86a420b92fb8289658d807d49f137b6d13862f6d Binary files /dev/null and b/content/zh/post/zhengwen2/img/img38.png differ diff --git a/content/zh/post/zhengwen2/img/img4.png b/content/zh/post/zhengwen2/img/img4.png new file mode 100644 index 0000000000000000000000000000000000000000..6b7b474933a31c6a20d0d1708e8909163293b4ad Binary files /dev/null and b/content/zh/post/zhengwen2/img/img4.png differ diff --git a/content/zh/post/zhengwen2/img/img5.png b/content/zh/post/zhengwen2/img/img5.png new file mode 100644 index 0000000000000000000000000000000000000000..830c8bc490a1b830e759df1f04b453909a097406 Binary files /dev/null and b/content/zh/post/zhengwen2/img/img5.png differ diff --git "a/content/zh/post/zhengwen2/openGauss \345\244\207\344\273\275\346\201\242\345\244\215gs_probackup.md" "b/content/zh/post/zhengwen2/openGauss \345\244\207\344\273\275\346\201\242\345\244\215gs_probackup.md" new file mode 100644 index 0000000000000000000000000000000000000000..5f531a5a5b8fc13950e493580d497646f59ea884 --- /dev/null +++ "b/content/zh/post/zhengwen2/openGauss \345\244\207\344\273\275\346\201\242\345\244\215gs_probackup.md" @@ -0,0 +1,738 @@ ++++ + +title = "openGauss 备份恢复gs_probackup" + +date = "2021-07-09" + +tags = ["openGauss 备份恢复gs_probackup"] + +archives = "2021-07" + +author = "Anacesthesia" + +summary = "openGauss 备份恢复gs_probackup" + +img = "/zh/post/zhengwen2/img/img21.png" + +times = "12:30" + ++++ + +# openGauss 备份恢复gs_probackup + +# openGauss 备份恢复之gs_probackup + +机器数量:2 台 +硬件环境:x86 虚拟机的内存4GB +软件环境:CentOS7.6 x64 +数据库版本:opengauss2.0.0,数据库软件是通过编译安装 +节点:node1 192.168.126.129 +节点:node2 192.168.126.130 +## 1.1. 背景信息 +gs_probackup是一个用于管理openGauss数据库备份和恢复的工具。它对openGauss实例进行定期备份,以便在数据库出现故障时能够恢复服务器。 +- 可用于备份单机数据库或者集群主节点数据库,为物理备份。 +- 可备份外部目录的内容,如脚本文件、配置文件、日志文件、dump文件等。 +- 支持增量备份、定期备份和远程备份。 +- 可设置备份的留存策略。 +## 1.2. 前提条件 +- 备份必须由运行数据库服务器的用户执行。 +- 备份和恢复的数据库服务器的主版本号必须相同。 +- 如果要通过ssh在远程模式下备份数据库,需要在本地和远程主机安装相同主版本的数据库,并通过ssh-copy-id remote_user@remote_host命令设置本地主机备份用户和远程主机数据库用户的无密码ssh连接。 +- 远程模式下只能执行add-instance、backup、restore子命令。 +- 使用restore子命令前,应先停止gaussdb进程。 +## 1.4. gs_probackup本地备份恢复测试 +### 1.4.1.打开参数enable_cbm_tracking,跟踪数据页的变化 +在node2主执行: +```sql +postgres=# show enable_cbm_tracking; + enable_cbm_tracking +--------------------- + off +(1 row) +postgres=# alter system set enable_cbm_tracking=on; +ALTER SYSTEM SET +postgres=# +``` +### 1.4.2.本地初始化备份目录 +node2执行 +```powershell +mkdir -p /opt/ogdata01 +chown -R omm: /opt/ogdata01 +chmod -R 700 /opt/ogdata01 + +cd /opt +mkdir opgaussbak +chown -R omm:dbgrp opgaussbak/ +[omm@node2 ~]$ gs_probackup init -B /opt/opgaussbak +INFO: Backup catalog '/opt/opgaussbak' successfully inited +[omm@node2 ~]$ +[omm@node2 opgaussbak]$ ls +backups wal +``` +对目录的初始化操作实际是在备份目录下创建backups/和wal/子目录,分别用于存放备份文件和WAL文件。 +打开数据库归档模式:修改postgres.conf + +```xml +wal_level = hot_standby +archive_mode = on +archive_dest = '/opt/archive6543' +logging_collector = on +log_directory = 'pg_log' +log_filename = 'postgresql.log' # +log_file_mode = 0600 +``` +### 1.4.3.添加本地备份实例 +Node2执行: +```powershell +su - omm +[omm@node2 ~]$ gs_probackup add-instance -B /opt/opgaussbak -D /opt/ogdata --instance node2bak +INFO: Instance 'node2bak' successfully inited +``` +查看备份集 +```powershell +[omm@node2 ~]$ gs_probackup show -B /opt/opgaussbak/ +``` +### 1.4.4.本地执行一次全量备份 +```powershell +[omm@node2 ~]$ gs_probackup backup -B /opt/opgaussbak/ --instance node2bak -b full -D /opt/ogdata -d postgres -p 6543 --progress --log-filename=full_postgres_log --retention-redundancy=2 --compress --note='This is full backup set.' + +INFO: Progress: (2044/2044). Validate file "database_map" +INFO: Backup QSDACN data files are valid +INFO: Backup QSDACN resident size: 702MB +INFO: Backup QSDACN completed +[omm@node2 ~]$ +``` +### 1.4.5.查看本地全备备份集 +```powershell +gs_probackup show -B /opt/opgaussbak/ +``` +![](https://img-blog.csdnimg.cn/img_convert/cc70e6a0416aaa64af04d7cd97066d2f.png) + +#### 1.4.6.第一次执行增量备份 +增量之前创建测试数据 +创建测试数据在postgres中 + +```powershell +postgres=# create table t4(id number); +CREATE TABLE +postgres=# select * from t4; + id +---- +(0 rows) +postgres=# insert into t4 values('4'); +INSERT 0 1 +postgres=# +postgres=# select * from t4; + id +---- + 4 +(1 row) +``` + +本地node2执行: + +```powershell +gs_probackup backup -B /opt/opgaussbak/ --instance node2bak -b PTRACK -D /opt/ogdata -d postgres -p 6543 --progress --log-filename=incr1.log --delete-expired --delete-wal --retention-redundancy=2 --compress --note='This is the first incremental backup set.' +INFO: Backup QSDAT3 data files are valid +INFO: Backup QSDAT3 resident size: 290MB +INFO: Backup QSDAT3 completed +LOG: REDUNDANCY=2 +INFO: Evaluate backups by retention +INFO: Backup QSDAT3, mode: PTRACK, status: OK. Redundancy: 1/2, Time Window: 0d/0d. Active +INFO: Backup QSDACN, mode: FULL, status: OK. Redundancy: 1/2, Time Window: 0d/0d. Active +INFO: Backup QSDA67, mode: FULL, status: ERROR. Redundancy: 2/2, Time Window: 0d/0d. Expired +INFO: Backup QSD7SD, mode: FULL, status: ERROR. Redundancy: 3/2, Time Window: 0d/0d. Expired +INFO: Backup QSD7K2, mode: FULL, status: ERROR. Redundancy: 4/2, Time Window: 0d/0d. Expired +INFO: Backup QSD7DS, mode: FULL, status: ERROR. Redundancy: 5/2, Time Window: 0d/0d. Expired +LOG: Consider backup QSDA67 for purge +WARNING: Process 21875 which used backup QSDA67 no longer exists +INFO: Delete: QSDA67 1970-01-01 08:00:00+08 +INFO: Progress: (1/4). Delete file "/opt/opgaussbak/backups/node2bak/QSDA67/database" +INFO: Progress: (2/4). Delete file "/opt/opgaussbak/backups/node2bak/QSDA67/backup.pid" +INFO: Progress: (3/4). Delete file "/opt/opgaussbak/backups/node2bak/QSDA67/backup.control" +INFO: Progress: (4/4). Delete file "/opt/opgaussbak/backups/node2bak/QSDA67" +LOG: Consider backup QSD7SD for purge +WARNING: Process 20898 which used backup QSD7SD no longer exists +INFO: Delete: QSD7SD 1970-01-01 08:00:00+08 +INFO: Progress: (1/4). Delete file "/opt/opgaussbak/backups/node2bak/QSD7SD/database" +INFO: Progress: (2/4). Delete file "/opt/opgaussbak/backups/node2bak/QSD7SD/backup.pid" +INFO: Progress: (3/4). Delete file "/opt/opgaussbak/backups/node2bak/QSD7SD/backup.control" +INFO: Progress: (4/4). Delete file "/opt/opgaussbak/backups/node2bak/QSD7SD" +LOG: Consider backup QSD7K2 for purge +WARNING: Process 20728 which used backup QSD7K2 no longer exists +INFO: Delete: QSD7K2 1970-01-01 08:00:00+08 +INFO: Progress: (1/4). Delete file "/opt/opgaussbak/backups/node2bak/QSD7K2/database" +INFO: Progress: (2/4). Delete file "/opt/opgaussbak/backups/node2bak/QSD7K2/backup.pid" +INFO: Progress: (3/4). Delete file "/opt/opgaussbak/backups/node2bak/QSD7K2/backup.control" +INFO: Progress: (4/4). Delete file "/opt/opgaussbak/backups/node2bak/QSD7K2" +LOG: Consider backup QSD7DS for purge +WARNING: Process 20645 which used backup QSD7DS no longer exists +INFO: Delete: QSD7DS 1970-01-01 08:00:00+08 +INFO: Progress: (1/4). Delete file "/opt/opgaussbak/backups/node2bak/QSD7DS/database" +INFO: Progress: (2/4). Delete file "/opt/opgaussbak/backups/node2bak/QSD7DS/backup.pid" +INFO: Progress: (3/4). Delete file "/opt/opgaussbak/backups/node2bak/QSD7DS/backup.control" +INFO: Progress: (4/4). Delete file "/opt/opgaussbak/backups/node2bak/QSD7DS" +INFO: There are no backups to merge by retention policy +INFO: Purging finished +INFO: There is no WAL to purge by retention policy +``` + +### 1.4.7.查看本地增量备份集 + +```powershell +gs_probackup show -B /opt/opgaussbak/ +``` +![](https://img-blog.csdnimg.cn/img_convert/4cc1d23c1abf758be22be83ed608a8a6.png) + + +### 1.4.8.第二次执行增量备份 +创建测试表t5 +```powershell +postgres=# create table t5(id number); +insert into t5 values('5'); +select * from t5; +CREATE TABLE +postgres=# INSERT 0 1 +postgres=# id +---- + 5 +(1 row) + +postgres=# +postgres=# +postgres=# select * from t5; + id +---- + 5 +(1 row) + +[omm@node2 opgaussbak]$gs_probackup backup -B /opt/opgaussbak/ --instance node2bak -b PTRACK -D /opt/ogdata -d postgres -p 6543 --progress --log-filename=incr1.log --delete-expired --delete-wal --retention-redundancy=2 --compress --note='This is the first incremental backup set.' +..................... +INFO: Backup QSER8W data files are valid +INFO: Backup QSER8W resident size: 305MB +INFO: Backup QSER8W completed +LOG: REDUNDANCY=2 +INFO: Evaluate backups by retention +INFO: Backup QSER8W, mode: PTRACK, status: OK. Redundancy: 1/2, Time Window: 0d/0d. Active +INFO: Backup QSDAT3, mode: PTRACK, status: OK. Redundancy: 1/2, Time Window: 0d/0d. Active +INFO: Backup QSDACN, mode: FULL, status: OK. Redundancy: 1/2, Time Window: 0d/0d. Active +INFO: There are no backups to merge by retention policy +INFO: There are no backups to delete by retention policy +INFO: There is no WAL to purge by retention policy +``` +### 1.4.9.查看本地第二次增量备份集 +![](https://img-blog.csdnimg.cn/img_convert/176a09e6e3051a07567376f7c74fbddb.png) + +#### 1.4.10.删除数据库并进行全量恢复 + +```powershell +gs_ctl stop +rm -rf /opt/ogdata +``` +![](https://img-blog.csdnimg.cn/img_convert/4e641ee29e3a34946891a8f9371b89f9.png) + +```powershell +[omm@node2 ~]$ gs_probackup restore -B /opt/opgaussbak/ -D /opt/ogdata -i QSDACN --instance node2bak +LOG: Restore begin. +LOG: there is no file tablespace_map +LOG: check tablespace directories of backup QSDACN +LOG: check external directories of backup QSDACN +WARNING: Process 22144 which used backup QSDACN no longer exists +INFO: Validating backup QSDACN +INFO: Backup QSDACN data files are valid +LOG: Thread [1]: Opening WAL segment "/opt/opgaussbak/backups/node2bak/QSDACN/database/pg_xlog/00000001000000000000009C" +INFO: Backup QSDACN WAL segments are valid +INFO: Backup QSDACN is valid. +INFO: Restoring the database from backup at 2021-04-30 16:02:47+08 +LOG: there is no file tablespace_map +LOG: Restore directories and symlinks... +INFO: Start restoring backup files. PGDATA size: 1296MB +LOG: Start thread 1 +INFO: Backup files are restored. Transfered bytes: 1312MB, time elapsed: 21s +INFO: Restore incremental ratio (less is better): 101% (1312MB/1296MB) +INFO: Syncing restored files to disk +INFO: Restored backup files are synced, time elapsed: 0 +INFO: Restore of backup QSDACN completed. +``` +启动数据库查看数据库: +```powershell +gs_ctl start +gssql postgres –p 6543 +``` +![](https://img-blog.csdnimg.cn/img_convert/e8f4971bf2d6705a5c61d58102bec40c.png) +可以看到全量的恢复并没有包含我们第一次增量和第二次增量的数据。我们进行第一次增量恢复。 + +### 1.4.11.执行全量+第一次增量恢复测试并验证数据。 +查看需要恢复数据的增量点:我们恢复数据库到ID为QSDAT3。这个点有我们新创建的表T4. +![](https://img-blog.csdnimg.cn/img_convert/d583637749e9e2c6c8ae76b0f9861294.png) +执行恢复: +```powershell +gs_probackup restore -B /opt/opgaussbak/ -D /opt/ogdata01 -i QSDAT3 --instance node2bak +gs_ctl start -D /opt/ogdata01/ +gsql postgres -p 6543 +``` +我们可以查到这个备份集的表T4。 +![](https://img-blog.csdnimg.cn/img_convert/40cabc54bb039bddbda839e853468f36.png) + + +### 1.4.12.执行全量+第二次增量恢复测试并验证数据。 +查看需要恢复数据的增量点:我们恢复数据库到ID为QSDAT3。这个点有我们新创建的表T5. +![](https://img-blog.csdnimg.cn/img_convert/5c5d0da9ba07ff1ac7f35ec9f3b6bd85.png) +```powershell +gs_probackup restore -B /opt/opgaussbak/ -D /opt/ogdata -i QSERBG --instance node2bak +gs_ctl start -D /opt/ogdata01 +gsql postgres -p 6543 +``` +![](https://img-blog.csdnimg.cn/img_convert/d448648d2a1ce38dc84db1aae8f49b79.png) +### 1.4.13.gs_probackup配置文件解析 +pg_probackup.conf文件,设置备份配置策略前的配置: + +```powershell +[root@node2 node2bak]# pwd +/opt/opgaussbak/backups/node2bak +[root@node2 node2bak]# cat pg_probackup.conf +# Backup instance information +pgdata = /opt/ogdata +system-identifier = 3422924873445789 +``` +设置后 +```powershell +gs_probackup set-config -B /opt/opgaussbak/ --instance node2bak --retention-redundancy=2 --compress-algorithm=zlib --compress-level=6 +cd /opt/opgaussbak/backups/node2bak +[root@node2 node2bak]# cat pg_probackup.conf +# Backup instance information +pgdata = /opt/ogdata +system-identifier = 3422924873445789 +# Retention parameters +retention-redundancy = 2 +# Compression parameters +compress-algorithm = zlib +compress-level = 6 +``` +备份集目录下的backup.control文件(描述备份集的属性信息) +```powershell +[omm@node2 QSEVBS]$ cat backup.control +#Configuration +backup-mode = FULL +stream = true +compress-alg = zlib +compress-level = 1 +from-replica = false + +#Compatibility +block-size = 8192 +xlog-block-size = 8192 +checksum-version = 0 +program-version = 2.4.2 +server-version = 9.2 + +#Result backup info +timelineid = 1 +start-lsn = 0/A8000028 +stop-lsn = 0/A80002F8 +start-time = '2021-05-01 12:33:28+08' +merge-time = '2021-05-01 12:36:05+08' +end-time = '2021-05-01 12:36:33+08' +recovery-xid = 14835 +recovery-time = '2021-05-01 12:33:29+08' +recovery-name = 'backup QSEV3L' +data-bytes = 1152575411 +wal-bytes = 16777216 +uncompressed-bytes = 2429062395 +pgdata-bytes = 1342753913 +status = OK +note = 'This is the first incremental backup set.' +``` +修改备份信息后 + +```powershell +gs_probackup set-backup -B /opt/opgaussbak/ --instance node2bak -i QSEVBS --note 'backup.control setting' --ttl 20d +INFO: Backup QSEVBS is pinned until '2021-05-21 12:33:29+08' +INFO: Adding note to backup QSEVBS: 'backup.control setting' + +[root@node2 QSEVBS]# cat backup.control +#Configuration +backup-mode = FULL +stream = true +compress-alg = zlib +compress-level = 1 +from-replica = false + +#Compatibility +block-size = 8192 +xlog-block-size = 8192 +checksum-version = 0 +program-version = 2.4.2 +server-version = 9.2 + +#Result backup info +timelineid = 1 +start-lsn = 0/A8000028 +stop-lsn = 0/A80002F8 +start-time = '2021-05-01 12:33:28+08' +merge-time = '2021-05-01 12:36:05+08' +end-time = '2021-05-01 12:36:33+08' +recovery-xid = 14835 +recovery-time = '2021-05-01 12:33:29+08' +expire-time = '2021-05-21 12:33:29+08' +recovery-name = 'backup QSEV3L' +data-bytes = 1152575411 +wal-bytes = 16777216 +uncompressed-bytes = 2429062395 +pgdata-bytes = 1342753913 +status = OK +note = 'backup.control setting' +content-crc = 1707874668 +``` +### 1.4.14.其他常用命令 +#### 1.4.14.1. 查看备份集详细信息。 + +```powershell +[omm@node2 ~]$ gs_probackup show -B /opt/opgaussbak/ --instance node2bak -i QSERBG +#Configuration +backup-mode = PTRACK +stream = true +compress-alg = zlib +compress-level = 1 +from-replica = false +#Compatibility +block-size = 8192 +xlog-block-size = 8192 +checksum-version = 0 +program-version = 2.4.2 +server-version = 9.2 +#Result backup info +timelineid = 1 +start-lsn = 0/A2000028 +stop-lsn = 0/A20002F8 +start-time = '2021-05-01 11:06:52+08' +end-time = '2021-05-01 11:07:03+08' +recovery-xid = 14544 +recovery-time = '2021-05-01 11:06:53+08' +recovery-name = 'backup QSERBG' +data-bytes = 285557584 +wal-bytes = 16777216 +uncompressed-bytes = 268780252 +pgdata-bytes = 1342696865 +status = OK +parent-backup-id = 'QSER8W' +note = 'This is the second incremental backup set.' +content-crc = 2279034550 +[omm@node2 ~]$ +``` + +#### 1.4.14.2. 删除备份实例 + +```powershell +[omm@node2 ~]$ gs_probackup del-instance -B /opt/opgaussbak/ --instance node2bak +WARNING: Process 24149 which used backup QSDACN no longer exists +WARNING: Process 24149 which used backup QSDAT3 no longer exists +WARNING: Process 24149 which used backup QSER8W no longer exists +WARNING: Process 24149 which used backup QSERBG no longer exists +INFO: Delete: QSERBG 2021-05-01 11:06:53+08 +INFO: Delete: QSER8W 2021-05-01 11:05:22+08 +INFO: Delete: QSDAT3 2021-04-30 16:12:47+08 +INFO: Delete: QSDACN 2021-04-30 16:04:26+08 +INFO: Instance 'node2bak' successfully deleted +[omm@node2 ~]$ +``` + +#### 1.4.14.3. 合并备份集 +查看当被备份集: +![](https://img-blog.csdnimg.cn/img_convert/3c25bb844474169b46e8035dc1786f22.png) +```powershell +[omm@node2 ~]$ gs_probackup merge -B /opt/opgaussbak/ -i QSEVBS --instance node2bak +INFO: Merge started +WARNING: Process 24783 which used backup QSEV3L no longer exists +WARNING: Process 24839 which used backup QSEV8K no longer exists +WARNING: Process 24869 which used backup QSEVBS no longer exists +INFO: Merging backup QSEVBS with parent chain +INFO: Validate parent chain for backup QSEVBS +INFO: Validating backup QSEV3L +INFO: Backup QSEV3L data files are valid +INFO: Validating backup QSEV8K +INFO: Backup QSEV8K data files are valid +INFO: Validating backup QSEVBS +INFO: Backup QSEVBS data files are valid +LOG: Restore directories and symlinks... +INFO: Start merging backup files +LOG: Creating page header map "/opt/opgaussbak/backups/node2bak/QSEV3L/page_header_map_tmp" +INFO: Backup files are successfully merged, time elapsed: 27s +INFO: Delete: QSEV8K 2021-05-01 12:31:33+08 +INFO: Delete: QSEVBS 2021-05-01 12:33:29+08 +LOG: Rename /opt/opgaussbak/backups/node2bak/QSEV3L to /opt/opgaussbak/backups/node2bak/QSEVBS +INFO: Rename merged full backup QSEV3L to QSEVBS +INFO: Validating backup QSEVBS +INFO: Backup QSEVBS data files are valid +INFO: Merge of backup QSEVBS completed +``` +查看合并后的备份集: +```powershell +gs_probackup show -B /opt/opgaussbak/ +``` +![](https://img-blog.csdnimg.cn/img_convert/abbc38843cdb30fa2a5b597f2db03272.png) +大致流程: +1. 校验备份集 +2. 合并备份集 +3. 删除备份集 +4. 重命名QSEV3L为QSEVBS (将原先全备的备份ID重命名为最近的刚被删除的那个增备ID:QSEVBS) +5. 校验新的备份集QSEVBS数据文件是否有效。 +#### 1.4.14.4. 验证合并后的备份集 + +```powershell +[omm@node2 ~]$ gs_probackup validate -B /opt/opgaussbak/ -i QSEVBS --instance node2bak +LOG: Validate begin. +INFO: Validating backup QSEVBS +INFO: Backup QSEVBS data files are valid +LOG: Thread [1]: Opening WAL segment "/opt/opgaussbak/backups/node2bak/QSEVBS/database/pg_xlog/0000000100000000000000A8" +INFO: Backup QSEVBS WAL segments are valid +INFO: Backup QSEVBS is valid. +INFO: Validate of backup QSEVBS completed. +[omm@node2 ~]$ +``` + +## 1.5. gs_probackup异地备份恢复测试 +### 1.5.1.配置远程复制用户 + +```sql +postgres=# create user rep1 with sysadmin replication identified by 'asdfg.1314'; +NOTICE: The encrypted password contains MD5 ciphertext, which is not secure. +CREATE ROLE +``` +rep1权限:sysadmin+replication +ssh互信配置 +1. 第一步:在本地机器上使用ssh-keygen产生公钥私钥对 +```powershell +$ ssh-keygen +``` +2. 第二步:用ssh-copy-id将公钥复制到远程机器中 +```powershell +ssh-copy-id -i .ssh/id_rsa.pub omm@192.168.126.130 +``` +```bash +注意: +ssh-copy-id 将key写到远程机器的 ~/ .ssh/authorized_key.文件中 +``` +3. 第三步: 登录到远程机器不用输入密码。 + +### 1.5.2.打开参数enable_cbm_tracking,跟踪数据页的变化 +在node2主执行: +```sql +postgres=# show enable_cbm_tracking; + enable_cbm_tracking +--------------------- + off +(1 row) +postgres=# alter system set enable_cbm_tracking=on; +ALTER SYSTEM SET +postgres=# +``` +### 1.5.3.异机初始化备份目录 +Node1执行 +```powershell +mkdir -p /opt/ogdata01 +chown -R omm: /opt/ogdata01 +chmod -R 700 /opt/ogdata01 + +cd /opt +mkdir opgaussbak +chown -R omm:dbgrp opgaussbak/ +[omm@node1 ~]$ gs_probackup init -B /opt/opgaussbak +INFO: Backup catalog '/opt/opgaussbak' successfully inited +``` +#对目录的初始化操作实际是在备份目录下创建backups/和wal/子目录,分别用于存放备份文件和WAL文件。 +### 1.5.4.添加异机备份实例 +node1执行: +```powershell +[omm@node1 ~]$ gs_probackup add-instance -B /opt/opgaussbak -D /opt/ogdata --instance node1bak --remote-proto=ssh --remote-host=192.168.126.130 --remote-port=22 --remote-path=/opt/og/bin --remote-user=omm +``` +报错1: +```powershell +LOG: Start SSH client process, pid 20743 +ERROR: Agent error: /opt/og/bin/gs_probackup: error while loading shared libraries: libssl.so.1.1: cannot open shared object file: No such file or directory +``` +解决方法: +默认情况下,可执行文件运行时只会去lib和/usr/lib中寻找,如果库安装在别的地方,则需要更新ld.so.conf文件 +```powershell +vi /etc/ld.so.conf +``` +在该文件中,追加lib的路径如下(不要加include): +```powershell +[root@node1 ~]# cat /etc/ld.so.conf +include ld.so.conf.d/*.conf +/opt/og/lib +``` +然后运行ldconfig命令让修改生效: +```powershell +sudo /sbin/ldconfig –v +``` +node1再次执行: +```powershell +[omm@node1 ~]$ /opt/og/bin/gs_probackup add-instance -B /opt/opgaussbak -D /opt/ogdata --instance node1bak --remote-proto=ssh --remote-host=192.168.126.130 --remote-port=22 --remote-path=/opt/og/bin --remote-user=omm +LOG: Start SSH client process, pid 23822 +INFO: Instance 'node1bak' successfully inited +[omm@node1 ~]$ +``` +### 1.5.5.异地执行全量备份 + +node1执行: + +```powershell +gs_probackup backup –B /opt/opgaussbak --instance=node1 -b full -D /opt/ogdata -h 192.168.126.130 -p 6543 -d postgres -U rep1 -W asdfg.1314 --remote-host=192.168.126.130 --remote-proto=ssh --remote-port=22 --remote-user=omm --remote-path=/opt/og/bin +``` +报错如下: +```powershell +remote-proto=ssh --remote-port=22 --remote-user=omm --remote-path=/opt/og/bin +INFO: Backup start, gs_probackup version: 2.4.2, instance: node1bak, backup ID: QSIXPL, backup mode: FULL, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 +LOG: Backup destination is initialized +WARNING: This openGauss instance was initialized without data block checksums. gs_probackup have no way to detect data block corruption without them. Reinitialize PGDATA with option '--data-checksums'. +LOG: Start SSH client process, pid 23937 +LOG: Database backup start +ERROR: could not connect to database postgres: FATAL: no pg_hba.conf entry for replication connection from host "192.168.126.129", user "rep1", SSL off +WARNING: backup in progress, stop backup +INFO: wait for pg_stop_backup() +INFO: pg_stop backup() successfully executed +WARNING: Backup QSIXPL is running, setting its status to ERROR +``` +解决如下: +添加 192.168.126.129 node1 可以访问node2进行备份。 +```powershell +host replication rep1 192.168.126.129/32 md5 +``` +再次执行日志如下: +```powershell +LOG: SSH process 24102 is terminated with status 0 +INFO: Data files are transferred, time elapsed: 1m:38s +INFO: wait for pg_stop_backup() +INFO: pg_stop backup() successfully executed +LOG: stop_lsn: 0/C10002F8 +LOG: Looking for LSN 0/C10002F8 in segment: 0000000100000000000000C1 +LOG: Found WAL segment: /opt/opgaussbak/backups/node1bak/QSIYFH/database/pg_xlog/0000000100000000000000C1 +LOG: Thread [0]: Opening WAL segment "/opt/opgaussbak/backups/node1bak/QSIYFH/database/pg_xlog/0000000100000000000000C1" +LOG: Found LSN: 0/C10002F8 +[2021-05-03 17:33:07]:(null): not renaming "/opt/opgaussbak/backups/node1bak/QSIYFH/database/pg_xlog/0000000100000000000000C2", segment is not complete. +LOG: finished streaming WAL at 0/C2000140 (timeline 1) +LOG: Getting the Recovery Time from WAL +LOG: Thread [0]: Opening WAL segment "/opt/opgaussbak/backups/node1bak/QSIYFH/database/pg_xlog/0000000100000000000000C1" +INFO: Syncing backup files to disk +INFO: Backup files are synced, time elapsed: 4s +INFO: Validating backup QSIYFH +INFO: Backup QSIYFH data files are valid +INFO: Backup QSIYFH resident size: 1314MB +INFO: Backup QSIYFH completed +``` +1.5.6. 查看异机全量备份 +```powershell +[omm@node1 ~]$ gs_probackup show -B /opt/opgaussbak +``` +![](https://img-blog.csdnimg.cn/img_convert/737a5c65f20891ee24a3ad4ce9806fb1.png) +### 1.5.7.异机增量备份 +Node2主库查看数据本身t4只有1条。 + +```sql +postgres=# \d t4; + Table "public.t4" + Column | Type | Modifiers +--------+---------+----------- + id | numeric | + +postgres=# select *from t4; + id +---- + 4 +(1 row) +``` +我们插入3条数据并进行增量备份 +```sql +postgres=# insert into t4 values('4'); +INSERT 0 1 +postgres=# +postgres=# select * from t4; + id +---- + 4 + 4 + 4 +(3 rows) +``` + +```powershell +[omm@node1 ~]$ gs_probackup backup -B /opt/opgaussbak --instance=node1bak -b PTRACK -D /opt/ogdata -h 192.168.126.130 -p 6543 -d postgres -U rep1 -W asdfg.1314 --remote-host=192.168.126.130 --remote-proto=ssh --remote-port=22 --remote-user=omm --remote-path=/opt/og/bin +``` +日志如下: + +```powershell +INFO: Backup start, gs_probackup version: 2.4.2, instance: node1bak, backup ID: QSIZ0I, backup mode: PTRACK, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 +LOG: Backup destination is initialized +WARNING: This openGauss instance was initialized without data block checksums. gs_probackup have no way to detect data block corruption without them. Reinitialize PGDATA with option '--data-checksums'. +LOG: Start SSH client process, pid 24279 +LOG: Database backup start +LOG: Latest valid FULL backup: QSIYFH +INFO: Parent backup: QSIYFH +LOG: started streaming WAL at 0/C3000000 (timeline 1) +[2021-05-03 17:44:02]: check identify system success +[2021-05-03 17:44:02]: send START_REPLICATION 0/C3000000 success +[2021-05-03 17:44:02]: keepalive message is received + +....... + +.......... +INFO: wait for pg_stop_backup() +[2021-05-03 17:44:25]: keepalive message is received +INFO: pg_stop backup() successfully executed +LOG: stop_lsn: 0/C30002F8 +LOG: Looking for LSN 0/C30002F8 in segment: 0000000100000000000000C3 +LOG: Found WAL segment: /opt/opgaussbak/backups/node1bak/QSIZ0I/database/pg_xlog/0000000100000000000000C3 +LOG: Thread [0]: Opening WAL segment "/opt/opgaussbak/backups/node1bak/QSIZ0I/database/pg_xlog/0000000100000000000000C3" +LOG: Found LSN: 0/C30002F8 +[2021-05-03 17:44:30]:(null): not renaming "/opt/opgaussbak/backups/node1bak/QSIZ0I/database/pg_xlog/0000000100000000000000C4", segment is not complete. +LOG: finished streaming WAL at 0/C4000140 (timeline 1) +LOG: Getting the Recovery Time from WAL +LOG: Thread [0]: Opening WAL segment "/opt/opgaussbak/backups/node1bak/QSIZ0I/database/pg_xlog/0000000100000000000000C3" +INFO: Syncing backup files to disk +INFO: Backup files are synced, time elapsed: 3s +INFO: Validating backup QSIZ0I +INFO: Backup QSIZ0I data files are valid +INFO: Backup QSIZ0I resident size: 290MB +INFO: Backup QSIZ0I completed +[omm@node1 ~]$ +``` + +### 1.5.8.查看异机增量备份 +```powershell +[omm@node1 ~]$ gs_probackup show -B /opt/opgaussbak +``` +![](https://img-blog.csdnimg.cn/img_convert/5925103dab2e4c34524b258eccfb4ef6.png) +### 1.5.9.执行全量恢复异机恢复 +```powershell + [omm@node1 ~]$ gs_probackup restore -B /opt/opgaussbak/ -D /opt/ogdata01 -i QSIYFH --instance QSIYFH +WARNING: Failed to access directory "/opt/opgaussbak/backups/QSIYFH": No such file or directory +ERROR: Instance 'QSIYFH' does not exist in this backup catalog +[omm@node1 ~]$ gs_probackup restore -B /opt/opgaussbak/ -D /opt/ogdata01 -i QSIYFH --instance node1bak +LOG: Restore begin. +LOG: there is no file tablespace_map +LOG: check tablespace directories of backup QSIYFH +LOG: check external directories of backup QSIYFH +WARNING: Process 24097 which used backup QSIYFH no longer exists +INFO: Validating backup QSIYFH +INFO: Backup QSIYFH data files are valid +LOG: Thread [1]: Opening WAL segment "/opt/opgaussbak/backups/node1bak/QSIYFH/database/pg_xlog/0000000100000000000000C1" +INFO: Backup QSIYFH WAL segments are valid +INFO: Backup QSIYFH is valid. +INFO: Restoring the database from backup at 2021-05-03 17:30:53+08 +LOG: there is no file tablespace_map +LOG: Restore directories and symlinks... +INFO: Start restoring backup files. PGDATA size: 1297MB +LOG: Start thread 1 +INFO: Backup files are restored. Transfered bytes: 1313MB, time elapsed: 41s +INFO: Restore incremental ratio (less is better): 101% (1313MB/1297MB) +INFO: Syncing restored files to disk +INFO: Restored backup files are synced, time elapsed: 6s +INFO: Restore of backup QSIYFH completed. +``` +---- +注意:恢复之前需要创建恢复目录,启动数据库进行数据验证。 +![](https://img-blog.csdnimg.cn/img_convert/040b4a3782d9b1858946bcd0595eee9b.png) + + + diff --git "a/content/zh/post/zhengwen2/openGauss\344\271\213indexadvisor\346\265\213\350\257\225\344\270\216\346\200\273\347\273\223.md" "b/content/zh/post/zhengwen2/openGauss\344\271\213indexadvisor\346\265\213\350\257\225\344\270\216\346\200\273\347\273\223.md" new file mode 100644 index 0000000000000000000000000000000000000000..c9b529cf8fafe66f2da8e72a54425c30b8b31303 --- /dev/null +++ "b/content/zh/post/zhengwen2/openGauss\344\271\213indexadvisor\346\265\213\350\257\225\344\270\216\346\200\273\347\273\223.md" @@ -0,0 +1,85 @@ ++++ + +title = "openGauss之indexadvisor测试与总结" + +date = "2021-07-09" + +tags = ["openGauss之indexadvisor测试与总结"] + +archives = "2021-07" + +author = "三五七言" + +summary = "openGauss之indexadvisor测试与总结" + +img = "/zh/post/zhengwen2/img/img29.png" + +times = "12:30" + ++++ + +# openGauss之indexadvisor测试与总结 + +# 测试种类:单query索引推荐、虚拟索引推荐(由于数据限制没有进行workload测试) + +# 1. 测试的表数据量如下:在test数据库下的aka_name、customer表。 +aka_name为60多万条数据(取自imdb数据集数据中一个表的数据)、customer为4条数据(自己创建的)。 + +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702172226929.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) + +# 2.单query索引推荐:gs_index_advise 用于针对单挑查询语句生成推荐索引 +(1)where语句只有一个属性的情况 + ![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702172241119.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) + +(2)where语句有多个属性的情况 +① 两个属性,范围查找.只推荐了id,没有推荐person_id。发现哪个属性在前面就推荐谁。 + ![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702172247553.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) + +②三个属性,两个范围查找,一个=精准查找.推荐id与=的属性。 +③四个属性,两个like模糊查询。 like属性不给推荐。 +④五个属性,两个like模糊查询。 like属性不给推荐。 +⑤五个属性,一个like模糊查询,一个=。 like不给于推荐,=给予推荐。 +⑥五个属性,一个like精准查询,一个=。 like依旧不给于推荐索引。 +⑦五个属性,两个=。 =的属性全部给予推荐索引。 + ![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702172254508.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) + +(3)order by 与 group by + +# 3.虚拟索引 +①hypopg_create_index:创建虚拟索引。 + ![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702172310447.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) + +②hypopg_display_index:显示所有创建的虚拟索引信息。 +hypopg_drop_index:删除指定的虚拟索引。 +hypopg_reset_index:清除所有虚拟索引。 +hypopg_estimate_size:估计指定索引创建所需的空间大小。 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702172316460.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) + +# 总结如下: +目前对gauss的单索引推荐和虚拟索引推荐进行了测以及结果情况如下: + + - 单索引推荐:适用于表中数据量大的情况,数据量过小不会进行推荐。 + +(1)当where中的查询条件只有一个的时候,推荐单一索引,如只有id在where中,只推荐id为索引;当where中的查询条件有多个的时候推荐多重索引,如id,name在where中被当做条件,则一起被推荐为联合索引,但是如果同时存在id、person_id则默认推荐id( 目前不知道原因) +(2)当query中除了where这个语句时,还存在order by 和 group by等条件时,将where、order by、group by中的属性全部作为联合索引进行推荐。 +(3)使用like模糊查询时或精准查询都不对该属性进行索引建立,并且=属性一定给予索引推荐建立。 +(4)当query中的条件过多时存在的属性也超过三个时,依旧推荐联合属性是在三个以上,会不会导致推荐索引过多从而性能下降,这个有待商榷,最好推荐索引中的属性在三个以内最好。不过这个可能需要通过DRL来学习,判断究竟选择一条query中的哪几个属性来建立索引。 + + - 虚拟索引 + +(1)通过使用hypopg这个建立虚拟索引可以加快查询速度,具体可以通过explain命令发现cost得到了很大程度上的减少,并且会显示在什么地方建立虚拟索引达到这种效果的。 +(2)同时发现openGuass的index_advior在虚拟索引上和论文中的索引推荐都使用到hypopg这个工具,都是用于创建虚拟索引,进行索引推荐,不同的地方可能在于论文中使用了DRL进行学习以及建立了DQN模型,论文中找到全部能够建立的索引候选项,都是可以在一定程度上减少cost的,但是具体上不知道是哪一种的索引候选项能够使cost最小,然后通过DQN来学习,但是通过学习最终的Q-value不一定是全局最优的,但一定是局部最优的。所以就是gauss可能就是更加hypopg进行虚拟的创建得到一个索引,然后通过改索引在一定程度上对query进行了优化,而论文中则是先获取所有可能建立索引的索引候选项,然后通过学习来从所有的索引候选项里面找到一个最优的索引。 + + - workload级别索引推荐 + +暂未测试。 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210409150842510.jpg?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center, =200x200 ) +
    Gauss松鼠会是汇集数据库爱好者和关注者的大本营,
    + +
    大家共同学习、探索、分享数据库前沿知识和技术,
    + +
    互助解决问题,共建数据库技术交流圈。
    + +
    +openGauss官网 +
    diff --git "a/content/zh/post/zhengwen2/openGauss\345\206\205\345\255\230\347\256\241\347\220\206\345\210\235\346\216\242.md" "b/content/zh/post/zhengwen2/openGauss\345\206\205\345\255\230\347\256\241\347\220\206\345\210\235\346\216\242.md" new file mode 100644 index 0000000000000000000000000000000000000000..6ad005264827b8cb5c1773ef9881c40e54c91979 --- /dev/null +++ "b/content/zh/post/zhengwen2/openGauss\345\206\205\345\255\230\347\256\241\347\220\206\345\210\235\346\216\242.md" @@ -0,0 +1,79 @@ ++++ + +title = "openGauss内存管理初探" + +date = "2021-07-10" + +tags = [ "openGauss内存管理初探"] + +archives = "2021-07" + +author = "李士福" + +summary = "openGauss内存管理初探" + +img = "/zh/post/zhengwen2/img/img5.png" + +times = "12:30" + ++++ + +# openGauss内存管理初探 + +        上周,有小伙伴在openGauss技术交流群里问在编码开发过程中如何进行内存分配,使用时感觉和PostgreSQL使用方式有些不同。的确如这位同学所想,openGauss的内存管理尽管继承了PostgreSQL的内存管理机制,但进行了多方面的扩展和改造,目的是适配多线程架构,更好的满足企业化应用诉求。openGauss内存管理主要做了如下的功能: + + - 引入jemalloc开源库,替换glibc的内存分配和释放,减少内存碎片 + - 引入逻辑内存管理机制,控制进程内存使用,避免出现OOM问题 + - 引入多种内存上下文(共享内存上下文、栈式内存上下文、对齐内存上下文),满足不同场景代码开发诉求 + - 引入ASAN(Address Sanitizer)开源库,在Debug版本下定位内存泄漏和内存越界问题 + 引入丰富的内存查询视图,方便观察内存使用情况,定位潜在内存问题 + +        下面基于上面的功能特性,从开发和使用者两方面阐述一下如何在编码过程中使用内存以及如在问题出现时快速定位问题。 + +#### 1. openGauss内存管理开发注意事项 + +        openGauss中内存分配和释放接口,仍然同PostgresSQL内存上下文使用方式一样;通用内存上下文使用的数据结构和算法没有大的变化,新增内存上下文使用新的数据结构来实现,大家可以先看看相关文章了解PostgreSQL的内存上下文机制。 +        默认情况下,使用“AllocSetContextCreate”函数创建内存上下文。在这需要注意是否指定内存上下文的类型,默认不指定,则使用“STANDARD_CONTEXT”标识符来创建通用内存上下文,该内存上下文的作用域仅用于单个线程内,随着线程退出或者作业重置,需要进行内存上下文清理,防止内存堆积。线程中的内存上下文的根节点是TopMemoryContext(即代码中的t_thrd.top_mem_cxt),通常在代码中禁止从TopMemoryContext内存上下文上申请内存,在使用时根据内存作用域从相应的内存上下文节点上创建子节点,父子节点均为通用内存上下文。 +        因为openGauss是多线程架构,通常会使用共享内存来保存关键信息用于多线程访问和更新。在创建内存上下文时,需要明确指定“SHARED_CONTEXT”标识符,同时需要保证父节点均为共享内存上下文。共享内存上下文的根节点为“ProcessMemory”(即代码中的g_instance.instance_context),默认情况下不在该内存上下文上分配内存。共享内存上下文的可分配内存通常是受限的,因为内存使用的主体在作业执行过程,所以开发人员需要自行限制共享内存上下文最大可申请内存的大小(可通过成员数量限制或者淘汰机制实现),建议不超过200MB。在共享内存上下文上分配内存或者释放内存的操作,不需要额外加锁,直接调用palloc或者pfree即可,但申请内存后返回的指针后续操作需要用户根据调用逻辑来决定是否需要锁保护。 +        栈式内存上下文的实现机理很简单,和传统内存上下文不同,没有使用buddy算法进行2幂次方对齐,故分配内存时仅需8字节对齐,可以节省大量内存空间。栈式内存上下文适用于仅调用palloc分配内存,不需要进行pfree操作,在内存上下文不再进行使用时一次进行MemoryContextDelete或者MemoryContextReset,可以参考hashjoin算子使用内存的逻辑。对齐内存上下文用于内存页对齐,适用于ADIO场景,当前代码中很少应用。 +除了上述指定MemoryContextCreate创建内存上下文场景,还有通过hash_create函数创建hash表时隐含创建的内存上下文,故hash_create创建的hash表也分为通用hash表(用于单个线程内部)以及共享hash表(可以用于整个进程共享),创建共享hash表时,需要指定HASH_SHRCTX参数,且参数中指定的父内存上下文也需要是共享内存上下文。 +        上述总结了内存上下文创建和使用的基本方法,对于内存上下文的分配和释放还有如下要求,总结如下: + + - 内存上下文分为线程级别(如TopMemoryContext)、Session级别(MessageMemoryContext)、作业级别(ExecutorState)、算子级别(HashJoin),不允许执行作业时到高级别的内存上下文上申请内存 + - 不允许频繁申请和释放同一内存上下文,即使是临时内存上下文,最低力度做到每个算子只申请和释放一次 + - 对于不使用的内存及内存上下文,要及时释放;算子执行完成后,算子内存上下文 及时释放 + - 非重度内存消耗算子(hashjoin/hashagg/setop/material/windowsagg)消耗内存原则上不允许超过10MB;若超过该限额,需给出评估依据 + - 共享内存上下文使用时需要进行总量的控制,原则上不允许超过200MB的内存使用若超过,需要进行评估 + - 全局变量指针在内存释放后置空,即调用pfree_ext函数进行置空 + - 一次性分配数组内存时,访问、写入数组的下标对应内存时,对数组下标加入Assert判断,防止越界 + +#### 2.openGauss内存定位方法介绍 + +###### 1> 出现报错“memory is temporarily unavailable” +        观察日志,是否为“reaching the database memory limitation”,表示为数据库的逻辑内存管 理机制保护引起,需要进一步分析数据库的视图;若为“reaching the OS memory limitation” ,表示为操作系统内存分配失败引起,需要查看操作系统参数配置及内存硬件情况等。 +**数据库逻辑内存保护需要查看下列视图:** +- pg_total_memory_detail 观察当前数据库内部模块使用内存情况。当dynamic_used_memory大于max_dynamic_memory就会报内存不足。如果此时dynamic_used_memory小max_dynamic_memory,而dynamic_peak_memory大于max_dynamic_memory表明曾经出现内存不足的情况。如果是other_used_memory较大,则只能通过更换Debug版本进一步定位。SQL语句为: Select * from pg_total_memory_detail; +- 如果dynamic_used_shrctx较大,则查询gs_shared_memory_detail视图,观察是哪个MemoryContext使用内存较多。SQL语句为:Select * from gs_shared_memory_detail; +- 如果dynamic_used_shrctx不大,则查询gs_session_memory_detail视图,观察是哪个MemoryContext使用内存较多。SQL语句为:Select * from gs_session_memory_detail order by totalsize desc limit 20; +- 发现内存上下文后,若不好定位,进一步排查内存上下文上哪个地方问题,需要在Debug版本使用 memory_tracking_mode进一步定位文件和行号; +- 若内存上下文无异常,需要查看线程数量是否很高,可能是由于CacheMemoryContext引起。 +- 可以在debug版本下,通过gdb脚本,把内存上下文上的分配信息打印出来 + +###### 2> 出现数据库节点RES很高或者节点宕机“Out of Memory” +- 首先读取/var/log/messages中的信息,看看是哪个进程引起的,通常是由 gaussdb引起;若gaussdb进程内存引起,进一步看是否正确配置 max_process_memory参数 +- 若配置合理,进一步观察pg_total_memory_detail视图是否为Other内存占用 过高 +- 若内存增长快速,且主要为内存上下文使用,可以通过jemalloc profiling快 速定位哪个地方申请的内存; +- 若Other内存过高,可能是由于第三方组件或者libpq等直接malloc内存引起的 ,需要通过ASAN工具进一步排查;若不能直接定位,只能逐步关闭参数(如 ssl/llvm等),进行排查 +#### 3.附录: +###### 1> jemalloc使用方法: +- 在debug版本下,设置环境变量: +export MALLOC_CONF=prof:true,prof_final:false,prof_gdump:true,lg_prof_sample:20 +其中最后的20表示每2^20B(1MB)产生一个heap文件,该值可以调,但是调大以后,虽然heap文件会减少,但也会丢失一些内存申请信息。 +- source 环境变量后,启动集群。 +- 使用jeprof处理heap文件,生成pdf。jeprof在开源第三方二进制目录下,binarylibs/${platForm}/jemalloc/debug/bin下可以获取,此外使用该二进制需要安装graphviz,可以通过yum install graphviz安装。 +- 生成pdf的命令: +全量:jeprof –show_bytes –pdf gaussdb *.heap > out.pdf +增量:jeprof –pdf gaussdb –base=start.heap end.heap > out.pdf +###### 2> ASAN使用方法: +- 检查操作系统配置:ulimit -v unlimited && vm.overcommit_memory不为0 +- 停止集群,在环境变量加入(单机部署中的.bashrc文件中): export ASAN_OPTIONS=halt_on_error=0:alloc_dealloc_mismatch=0:log_path=/tmp/memcheck/memcheck 其中log_path设置错误信息输出位置,目录为“/tmp/memcheck/”,文件名前缀为“memcheck”。 diff --git "a/content/zh/post/zhengwen2/openGauss\345\222\214PostgreSQL\347\232\204\346\272\220\347\240\201\347\233\256\345\275\225\347\273\223\346\236\204\345\257\271\346\257\224.md" "b/content/zh/post/zhengwen2/openGauss\345\222\214PostgreSQL\347\232\204\346\272\220\347\240\201\347\233\256\345\275\225\347\273\223\346\236\204\345\257\271\346\257\224.md" new file mode 100644 index 0000000000000000000000000000000000000000..f48f97cd198128ccf186a109d2e7c14a14134ad6 --- /dev/null +++ "b/content/zh/post/zhengwen2/openGauss\345\222\214PostgreSQL\347\232\204\346\272\220\347\240\201\347\233\256\345\275\225\347\273\223\346\236\204\345\257\271\346\257\224.md" @@ -0,0 +1,436 @@ ++++ + +title = "openGauss和PostgreSQL的源码目录结构对比" + +date = "2021-07-09" + +tags = [ "openGauss和PostgreSQL的源码目录结构对比"] + +archives = "2021-07" + +author = "YAN左使" + +summary = "openGauss和PostgreSQL的源码目录结构对比" + +img = "/zh/post/zhengwen2/img/img29.png" + +times = "12:30" + ++++ + +# openGauss和PostgreSQL的源码目录结构对比 + +(目录) + +> 前言:openGauss内核虽然源于PostgreSQL,但是华为在多个维度进行了深度的改进。本文从源目录的组织结构入手来研究openGauss,笔者在不断深入的研究中不禁惊叹于openGauss先进且合理的源码组织结构,这里面体现了华为对于数据库架构和技术的深刻理解,值得我们反复品味和学习! + +从源码入手是研究一款开源数据库的重要方法之一,对源代码的理解可以从宏观和微观两个层面入手。为了避免陷入局部代码之中,第一步我们应该抛开微观层面上具体的代码和实现细节,从宏观层面上的目录和组织结构入手,来窥探整个数据库的架构和实现逻辑,以及开发人员在实现层面的考量。对源代码的全局结构有了清晰的认识之后,我们便可以对查询优化、存储、事务、进程管理、内存管理等各个功能模块的代码进行深入的研究。 + +openGauss内核源于PostgreSQL 9.2.4版本,因此本文中我们通过对比的方式来探寻openGauss和PostgreSQL在源码目录和组织结构的异同。 + +## **1. GaussDB为什么选择PG?** + +首先我们需要弄清楚openGauss的产品定位,以及它和PostgreSQL的关系,这有助于我们理解openGauss的整个源码体系和结构。openGauss是华为于2020年6月开源的单机版GaussDB。华为决定自主研发GaussDB时为什么选择了PG,而不是其他的开源数据库如MySQL,我们或许可以从GaussDB的发展历程中寻找答案。 + +GaussDB并非是一个产品,而是一系列产品的统称,目前GaussDB产品线主要包括GaussDB T (OLTP)和GaussDB A (OLAP)。其中GaussDB T的前身是GaussDB 100,是华为自2007年开始在自研内存数据库基础上全面改造而来的一款分布式数据库,此前华为由于在电信计费领域的需求而自主研发了一款内存数据库。GaussDB A的前身是GaussDB 200,是华为自2011年开始基于PostgreSQL 9.2.4自主研发的一款具备多模分析及混合负载能力的大规模并行处理分布式数据库,支持行列混合存储以及线程化,支持高达2048节点的集群规模,提供PB(Petabyte)级数据分析能力、多模分析能力和实时处理能力。 + +openGauss内核虽然源于PostgreSQL,但华为在开发过程中结合企业级场景需求,通过C++语言(PostgreSQL是用C语言写的)对80+%的数据库内核代码进行了重构,修改和新增了70万行核心代码。着重在整体架构、数据库内核三大引擎 (优化器、执行引擎、存储引擎)、事务、以及鲲鹏芯片等方面做了大量的深度优化。 + +例如,通过引入向量化引擎和编译执行引擎等从多个维度重构了执行引擎,通过列存及自适应压缩等全新重构了存储引擎。除了数据库内核,在高可用、数据库安全和AI特性方面,openGauss数据库也做了极大的增强。PG11.3版本数据库中共有290个数据库参数,而openGauss目前有500多个数据库参数,每个参数对应一个数据库内核功能,所以可以看到华为对PG的内核做了非常大的改造和增强。 + +做数据库内核开发的技术难度很大,哪怕开发团队对内核架构与机制的制定上出现了丝毫的问题,上线后都极有可能会出现后果严重。有时一旦确定项目无法进行下去,甚至可能需要推倒重来。所以基于一款已经成熟的开源数据库进行自主研发就是一个很好的选择。那为什么选择PG而不是在互联网公司已经得到广泛使用的MySQL,可能是华为在调研分析后看中了PG各方面优秀的特性: + +- **代码质量高**:作为学院派的代表,PG的代码简洁、规范、结构清晰,非常适合从源码级进行二次研发。相比之下,修改MySQL的代码会困难很多。 +- **功能完善强大**:PG支持的数据类型丰富(多模能力),SQL语法完善(高级SQL特性),查询优化性能强。以JSON支持为例,PG从2012年的9.2版本就已经添加了对JSON数据类型的支持,相比之下Oracle从2014年发布12c才开始支持JSON,而MySQL直到2015年发布5.7.8版本才开始原生支持JSON。以join算法为例,PG几乎支持所有的多表连接算法;以SQL为例,PG支持大多数SQL语法,相比之下MySQL支持较弱;此外PG的查询优化处理能力,例如复杂子查询等都要强于MySQL。 +- **技术先进**:PG号称是世界最先进的开源数据库,其先进性不仅体现在基本的存储、事务、查询处理等方面,更多的是体现在其新技术上,比如JIT查询计划的即时编译和外部表技术等。 +- **扩展性强**:良好的扩展性使得PG非常适合进行二次开发,例如在PG基础架构之上引入MPP框架可以构建分布式数据仓库GreenPlum(MySQL基本不适合做数据仓库);在PG上引入OpenCypher可以构建具备图数据存储和查询能力的多模数据库AgensGraph;在PG架构上通过将数据自动按时间和空间分片可以构建时序数据库Timescale。 + +我觉得GaussDB发展的10年历程说明华为选择PG是一个十分正确的选择。目前PG的用户增长迅速,生态发展的也比MySQL要好,这说明越来越多的公司和开发者都意识到PG的确是一款优秀的开源数据库。其实在早年间,也有一些公司曾在MySQL上进行自主研发,比如阿里巴巴之前在MySQL社区版的基础上做了大量的性能与功能的优化改进,自主研发了AliSQL用于支撑淘宝双十一等业务,但相比PG来说,这样二次研发的成功案例要少很多。 + +至此我们理清了openGauss和PostgreSQL的联系,接下来我们一起通过对比二者源代码的组织结构,来窥探二者在数据库架构和实现方面的异同,这样对比学习的方式有助于同时加深我们的二者的认识。 + +## **2. 源代码目录结构对比** + +本文中我们进行对比的源代码版本分别是PostgreSQL 9.2.4 (发布于2013年4月4日,截至2020年7月9日PG已更新到14beat2版本)和openGauss 2.0.1 (截至2020年7月9日发布的最新版)。 + +进入PostgreSQL和openGauss的源码目录后,可以看到第一级目录下都有一个src目录,该目录就是数据库源代码目录。本文中我们重点关注src目录下的代码结构,因为src目录是整个数据库的核心代码。 + + +### **2.1 数据库管理系统的架构和主要组件** + +了解传统的关系数据库管理系统(RDBMS)的架构能帮助我们更好地理解源代码的各个模块和其组织结构,下图显示了一个RDBMS的架构和主要组件。 + +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210710214034571.PNG?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3lhbnpoZW5ndG9uZw==,size_16,color_FFFFFF,t_70#pic_center) + +> 图片来源于经典论文: +Hellerstein, J. M., Stonebraker, M., & Hamilton, J. (2007). [**Architecture of a Database System.**](https://www.nowpublishers.com/article/Details/DBS-002) Foundations and Trends® in Databases, 1(2), 141-259. + +图中显示了一个RDBMS包含的5个主要的功能模块: + +- 客户端通信管理器(Client Communications Manager) +- 进程管理器(Process Manager) +- 关系查询处理器(Relational Query Processor) +- 事务和存储管理器(Transactional Storage Manager) +- 共享组件和工具(Shared Components and Utilities) + + +考虑一个简单而典型的数据库查询应用实例-“查询某次航班的所有旅客名单”,这个操作所引发的的查询请求大致按如下方式进行处理: +1. 机场登机口的PC机(客户端)调用API与DBMS的客户端通信管理器(Client +Communications Manager)建立网络连接; +1. 在收到客户端的请求后,DBMS必须为之分配一个计算线程。系统必须确保该线程的数据以及控制输出是通过通信管理器与客户端连接的,这些工作由进程管理器(Process Manager)来管理。 +2. 分配控制进程之后,接下来便可以通过关系查询处理器(Relational Query Processor)来处理查询了。该模块会检查用户是否有查询权限,然后将用户的SQL语句编译为查询计划,并将查询计划交给查询执行器来执行。 +3. 在查询计划的底层,会有若干操作从数据库请求数据。这些操作通过事务和存储管理器(Transactional Storage Manager)读取数据并保证事务的“ACID”性质。此外还有一个缓冲管理器,用来控制内存缓冲区和磁盘之间的数据传输。 +4. 最后,查询处理器将数据库的数据组织成结果元组,结果元组生成后被放入客户通信管理器的缓冲区中,然后该通信管理器将结果发送给调用者。 + +上述例子我们没有提到共享组件和工具(Shared Components and Utilities), 但它们对于一个功能完整的DBMS是十分重要的,这些组件独立运行于任何查询,它们使数据库保持稳定性和整体性。比如目录管理器和内存管理器在传输数据时被作为工具来调用,在认证、解析以及查询优化过程中,查询处理器都会用到目录。同样,内存管理器也广泛应用于整个DBMS运行过程中的动态内存分配和释放。 + +### **2.2 src目录结构对比** + +``` +PostgreSQL-9.2.4\src +├─backend (后端代码,包括解析器、优化器、执行器、存储、命令、进程等) +├─bin (psql等命令的代码) +├─include (头文件) +├─interfaces (前端代码) +├─makefiles (平台相关的make的设置值) +├─pl (存储过程语言的代码) +├─port (平台移植相关的代码) +├─template (平台相关的设置值) +├─test (测试脚本) +├─timezone (时区相关代码) +├─tools (开发工具和文档) +└─tutorial (教程) +``` + +``` +openGauss-2.0.1\src +├─bin (gsql等命令的代码) +├─common (公共功能模块代码) +├─gausskernel (高斯内核代码) +├─include (头文件) +├─lib (库文件,包括) +├─makefiles (平台相关的make的设置值) +├─test (测试脚本) +└─tools (开发工具和文档) +``` + +与PostgreSQL相比,openGauss在src目录下的组织方式有以下变化: + +- 保留了bin、include、makefiles、test和tools这5个目录; +- 新建了**gausskernel**目录,用于存放整个**高斯内核**的代码,backend目录下的bootstrap、optimizer、executor、storage等模块被移动到gausskernel目录下; +- 新建了**common**目录,用于存放**公共功能模块**的代码,interfaces、pl、port、template、timezone和tutorial这6个目录的全部内容,以及backend目录的剩余内容(如libpq、nodes、parser等)被移动到common目录下。 + +接下来我们会对以上的变化进行详细的说明。 + +### **2.3 从backend到common和gausskernel的变化** + +由于PostgreSQL采用C/S(客户机/服务器)模式结构,客户端为前端(Frontend),服务器端为后端(Backend),所以PostgreSQL的backend目录是整个数据库服务的核心代码目录。 + +openGauss对PG的backend目录进行了功能上的细化分类,将optimizer、executor、storage等高斯内核的核心功能组件移动到新建的gausskernel目录下,其他一些公共功能模块则被移动到新建的common目录下。 + + +``` +PostgreSQL-9.2.4\src +├─backend (后端源码目录) +│ ├─access (各种数据的存储访问方法,如支持堆、索引等数据存取) +│ ├─bootstrap (支持Bootstrap运行模式,用来创建初始的模板数据库) +│ ├─catalog (系统目录) +│ ├─commands (执行非计划查询的SQL命令,如创建表命令等) +│ ├─executor (执行器,执行生成的查询计划) +│ ├─foreign (FDW:Foreign Data Wrapper处理) +│ ├─lib (共同函数) +│ ├─libpq (处理与客户端通信库函数,几乎所有的模块都依赖它) +│ ├─main (主程序模块,负责将控制权转到Postmaster进程或Postgres进程) +│ ├─nodes (定义系统内部用到的节点、链表等结构,以及处理这些结构的函数) +│ ├─optimizer (优化器,根据查询树创建最优的查询路径和查询计划) +│ ├─parser (解析器,将SQL查询转化为内部查询树) +│ ├─po +│ ├─port (平台兼容性处理相关的函数) +│ ├─postmaster (监听用户请求的进程,并控制Postgres进程的启动和终止) +│ ├─regex (正规表达式库及相关函数) +│ ├─replication (流复制) +│ ├─rewrite (查询重写) +│ ├─snowball (全文检索相关) +│ ├─storage (存储管理,包括内存、磁盘、缓存等管理) +│ ├─tcop (Postgres服务进程的主要处理部分,调用parser、optimizer、executor和commands中的函数来执行客户端提交的查询) +│ ├─tsearch (全文检索) +│ └─utils (各种支持函数,如错误报告、各种初始化操作等) +``` + +``` +openGauss-2.0.1\src +├─common (公共功能模块代码) +│ ├─backend +│ │ ├─catalog +│ │ ├─client_logic +│ │ ├─lib +│ │ ├─libpq +│ │ ├─nodes +│ │ ├─parser +│ │ ├─pgxc_single +│ │ ├─po +│ │ ├─port +│ │ ├─regex +│ │ ├─snowball +│ │ ├─tsearch +│ │ └─utils +│ ├─interfaces +│ ├─pgxc +│ ├─pl +│ ├─port +│ ├─template +│ ├─timezone +│ └─tutorial +``` + +``` +openGauss-2.0.1\src +├─gausskernel (高斯内核) +│ ├─bootstrap +│ ├─cbb +│ ├─dbmind (AI4DB和DB4AI功能模块) +│ ├─optimizer +│ ├─process (进程和线程管理模块) +│ ├─runtime (执行器模块) +│ ├─security +│ └─storage +``` + +#### **(1) gausskernel内核整体目录结构对比** + +openGauss对gausskernel内核部分代码进行了较大的变动,而内核又是数据库最核心最重要的部分,所以我们需要重点关注内核部分的源代码结构。PostgreSQL中的内核代码都在backend目录下,而openGauss的内核代码则主要在gausskernel目录下(从gausskernel的名称就可以看出来)。 + +openGauss之所以创建gausskernel目录,我想可能有以下几点原因: +1. 创建内核目录彰显了openGauss对于内核的重视,而不是像PG一样将所有的功能模块都放到backend目录下; +2. 突出华为在数据库内核方面所作的重大改进和优化工作; +3. 单独将内核部分代码单独提出来可以方便项目开发和后期代码维护。 + +gausskernel在代码目录的组织结构上主要有以下变化: + +1. 保持bootstrap、optimizer和storage这3个目录,但是这几个目录中所包含的内容发生了变化(后文会讲到); +2. 新增了cbb、dbmind和security这3个目录,其中dbmind目录包含了人工智能和数据库结合的最新研究成果; +3. 新建process目录,原来PG中的postmaster目录被移动到process目录下作为子目录之一,说明华为在进程和线程管理方面做了很多改进; +4. 新建runtime目录,原来PG中的executor目录被移动到runtime目录下作为子目录之一,说明华为在执行器方面做了很多增强,比如增加了向量化执行引擎。 + + +#### **(2) 公共组件common目录结构对比** + +openGauss将PG的backend目录的公共功能模块都统一移动到新建的common目录下,这样做的原因可能有两点: +1. openGuass认为这些模块是数据库系统共有的公共组件或者功能模块,比如PG中backend目录下的catalog、lib、libpq等模块; +2. openGuass基本都保留了这些模块的接口和公共函数代码,所以openGauss与现有的PG生态兼容性较好。openGauss仅对这些代码做了适当优化,所以单独创建common目录可以和gausskernel这样修改较大的模块区分开来。 + +注意openGauss也有backend目录,但是该目录只保留了一些公用的功能模块,并且被移动到了common目录下。 + +#### **(3) optimizer目录的变化** + +``` +PostgreSQL-9.2.4\src +├─backend +│ ├─commands +│ ├─optimizer +│ │ ├─geqo (遗传算法查询优化) +│ │ ├─path (使用parser的输出创建查询路径) +│ │ ├─plan (优化path输出生成查询计划) +│ │ ├─prep (处理特殊的查询计划) +│ │ └─util (优化器支持函数) +│ ├─rewrite +``` + +``` +openGauss-2.0.1\src +├─gausskernel (高斯内核) +│ ├─optimizer +│ │ ├─commands +│ │ ├─geqo +│ │ ├─path +│ │ ├─plan +│ │ ├─prep +│ │ ├─rewrite +│ │ └─util +``` + +openGuass在优化器目录中的变化主要是将PG中和optimzier同一目录级别的commands和rewrite移动到optimzier目录下,这说明openGauss将命令模块和查询重写模块归为优化器的一部分。 + +#### **(4) 从postmaster到process的变化** + +在架构层面PostgreSQL是多进程架构,为了提高并发度,openGauss将其进一步优化成了多线程架构,openGauss属于单进程多线程模型的数据库。 + +``` +PostgreSQL-9.2.4\src +├─backend +│ ├─postmaster +│ ├─tcop +``` + +``` +openGauss-2.0.1\src +├─gausskernel +│ ├─process +│ │ ├─datasource +│ │ ├─globalplancache +│ │ ├─job +│ │ ├─main +│ │ ├─postmaster +│ │ ├─stream +│ │ ├─tcop +│ │ └─threadpool (线程池) +``` + +从上面的对比可以看出,openGauss在gausskernel目录下新建了process目录,将PG的postmaster和tcop目录移动到process目录下,并且增加了很多的其他的功能模块,比如线程池threadpool模块等。 + + +#### **(5) 从executor到runtime的变化** + +``` +PostgreSQL-9.2.4\src +├─backend +│ ├─executor +``` + +``` +openGauss-2.0.1\src +├─gausskernel +│ ├─runtime +│ │ ├─codegen (代码生成) +│ │ │ ├─codegenutil +│ │ │ ├─executor +│ │ │ ├─llvmir (LLVM动态编译) +│ │ │ └─vecexecutor +│ │ ├─executor +│ │ └─vecexecutor (向量化执行引擎) +│ │ ├─vecnode +│ │ ├─vecprimitive +│ │ └─vectorsonic +``` + +从上面的对比可以看出,openGauss在gausskernel目录下新建了runtime目录,将PG的executor目录移动到runtime目录下,并且增加了codegen和vecexecutor两个目录。codegen目录中用到了业界流行的开源编译框架LLVM,用于生成高性能的代码来进一步提升性能;vecexecutor目录则包含了向量化执行引擎的相关代码,用于提升SQL引擎的计算性能。 + +代码生成和向量化执行是当前学术界和工业界用于提升SQL计算引擎性能的两种有效方法,而这两种方法在openGauss中都已经实现了。 + +#### **(6) access目录的变化** + +openGauss将从backend目录下的access目录移动到gausskernel/storag目录下,这是因为对数据的访问是和数据库的存储结构密切相关的。数据一般存储在磁盘上的,所以数据在磁盘上组织形式决定了访问数据的效率,比如是堆文件还是顺序文件,以及读取时是顺序读取还是通过索引来读取。 + +``` +PostgreSQL-9.2.4\src +├─backend +│ ├─access +│ │ ├─common (公共存取函数) +│ │ ├─gin +│ │ ├─gist (可自定义的存取方法) +│ │ ├─hash (哈希用于存取表) +│ │ ├─heap (堆用于存取表) +│ │ ├─index (索引存取表) +│ │ ├─nbtree (Lehman and Yao的btree管理算法) +│ │ ├─spgist +│ │ └─transam (事务管理器) +``` + +``` +openGauss-2.0.1\src +├─gausskernel +│ └─storage +│ ├─access +│ │ ├─cbtree +│ │ ├─common +│ │ ├─dfs +│ │ ├─gin +│ │ ├─gist +│ │ ├─hash +│ │ ├─hbstore +│ │ ├─heap +│ │ ├─index +│ │ ├─nbtree +│ │ ├─obs +│ │ ├─psort +│ │ ├─redo +│ │ ├─rmgrdesc +│ │ ├─spgist +│ │ ├─table +│ │ └─transam +``` + +#### **(7) storage目录的变化** + +``` +PostgreSQL-9.2.4\src +├─backend +│ ├─storage +│ │ ├─buffer (行存储共享缓冲区模块) +│ │ ├─file (文件操作和虚拟文件描述符模块) +│ │ ├─freespace (行存储空闲空间模块) +│ │ ├─ipc (进程间通信模块) +│ │ ├─large_object (大对象模块) +│ │ ├─lmgr (锁管理模块) +│ │ ├─page (页面模块) +│ │ └─smgr (存储介质管理模块) +``` + +``` +openGauss-2.0.1\src +├─gausskernel +│ └─storage +│ ├─access +│ ├─buffer +│ ├─bulkload (外表批量导入模块) +│ ├─cmgr (列存储只读共享缓冲区模块) +│ ├─cstore (列存储访存模块) +│ ├─dfs (外表服务器连接模块) +│ ├─file +│ ├─freespace +│ ├─ipc +│ ├─large_object +│ ├─lmgr +│ ├─mot (内存引擎模块) +│ ├─page +│ ├─remote (备机页面修复模块) +│ ├─replication +│ └─smgr +``` + +从上面的对比可以看出,openGauss在storage目录的变化主要包括: + +- 新增了列存储相关的功能模块如cmgr和cstore,这是openGauss相比PG的一大增强,通过增加列存储使得openGauss能适用于更多的场景; +- 新增了mot模块,mot模块是openGauss引入的MOT(Memory-Optimized Table)存储引擎,是openGauss数据库最先进的生产级特性,它针对多核和大内存服务器进行了优化,能为事务性工作负载提供更高的性能; +- 新增了外表功能的相关模块,如dfs和bulkload等; +- 新增了备机页面修复模块remote; +- 将replication模块从backend目录移动到storage目录下; +- 保留了buffer、file、freespace、ipc、large_object、lmgr、page和smgr等8个模块。 + +#### **(8) security目录:数据安全的保障** + +``` +openGauss-2.0.1\src +├─gausskernel +│ ├─security +│ │ ├─gs_policy +│ │ ├─iprange +│ │ └─keymanagement +``` +openGauss在gausskernel目录下新建了security目录,用于存放数据库安全的相关功能模块的代码,比如安全认证、角色管理、审计与追踪以及数据加密等模块的源代码。 + + +#### **(9) dbmind目录:数据库的AI大脑** + +AI与数据库结合是近年的研究热点,据我所知,即使最新版的PostgreSQL和MySQL目前仍然不具备这样的功能,可以说openGauss在这个领域走在了业界前列。AI与数据库结合的相关源代码都在dbmind目录下。值得注意的是dbmind位于gausskernel下说明华为是将数据库的AI能力作为未来数据库内核的一种基础能力来进行构建的。 + +``` +openGauss-2.0.1\src +├─gausskernel +│ ├─dbmind (AI4DB和DB4AI模块) +│ │ ├─deepsql (DB4AI: 库内AI算法) +│ │ │ └─madlib_modules (开源的MADlib机器学习框架) +│ │ └─tools (AI4DB工具集) +│ │ ├─anomaly_detection (数据库指标采集、预测与异常监控) +│ │ ├─index_advisor (索引推荐) +│ │ ├─predictor (AI查询时间预测) +│ │ ├─sqldiag (慢SQL诊断发现) +│ │ └─xtuner (参数调优与诊断) +``` + +AI和数据库结合一般可分为AI4DB与DB4AI两个方向: +- **AI4DB**指利用AI技术来优化数据库的性能或者增强运维管理的能力,主要包括基于AI的自调优、自诊断、自安全、自运维、自愈等。openGauss目前在dbmind/tools目录下已经提供了5个功能模块。 +- **DB4AI**指打通数据库到人工智能应用的端到端流程,达到高性能和节约成本等目的。目前主要手段是将常用的机器学习算法封装为SQL语句,从而可以直接在SQL语句中调用机器学习算法,来充分发挥openGauss数据库高并行、列存储等优势。deepsql目录实现了库内AI算法,目前已经支持60多个常用算法,主要通过开源的MADlib机器学习框架来实现。 + + + diff --git "a/content/zh/post/zhengwen2/openGauss\345\234\250docker\344\270\212\347\232\204\345\256\211\350\243\205\357\274\214\350\277\236\346\216\245\344\273\245\345\217\212java\350\277\236\346\216\245.md" "b/content/zh/post/zhengwen2/openGauss\345\234\250docker\344\270\212\347\232\204\345\256\211\350\243\205\357\274\214\350\277\236\346\216\245\344\273\245\345\217\212java\350\277\236\346\216\245.md" new file mode 100644 index 0000000000000000000000000000000000000000..907b01060d80ea8666e21965e745952560fcfa31 --- /dev/null +++ "b/content/zh/post/zhengwen2/openGauss\345\234\250docker\344\270\212\347\232\204\345\256\211\350\243\205\357\274\214\350\277\236\346\216\245\344\273\245\345\217\212java\350\277\236\346\216\245.md" @@ -0,0 +1,236 @@ ++++ + +title = "openGauss在docker上的安装,连接以及java连接" + +date = "2021-07-09" + +tags = ["openGauss在docker上的安装,连接以及java连接"] + +archives = "2021-07" + +author = "ZeroRains" + +summary = "openGauss在docker上的安装,连接以及java连接" + +img = "/zh/post/zhengwen2/img/img28.png" + +times = "12:30" + ++++ + +# openGauss在docker上的安装,连接以及java连接 + +# 一、openguass的安装 + +## 1. 如何快速简洁地安装openguass +安装opengauss的方式在我已知范围内有两种,一种是在虚拟机上安装centos(其实我感觉是个linux就行,但是我也没试过)然后在使用openguass的镜像进行手动安装。第二种是直接在docker上拉取镜像即可。从上面的描述中,第一种方法看上去很复杂实际上也很复杂,所以我都是使用第二种方法的。 +你要问这两种安装方式有什么不同吗?在我使用的范围内我感觉是没有什么不同的,主要是安装简单和方便 +那我们来看看怎么安装吧 +## 2. docker下载 +点击这个链接就可以开始下载docker了:docker下载 +在安装之前需要确保系统开启了虚拟服务,不过默认好像都是开启的 +## 3.开始安装docker +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171214690.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) +开始安装,如果你是win10专业版的话红色箭头的地方就不用√了,如果不是一定要√,点击OK就可以开始安装了,安装结束后会进行一次重启 +安装完成,然后开始启动docker: +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171243111.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) +进入到docker当中我们会看到这样一个界面(我可能是之前安装过了,很顺利,如果有遇到问题的小伙伴,可以看看解决方案) + +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171303782.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) +专业版的小伙伴们注意一下啊,进入这个界面之后点击设置按钮, +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702192947521.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) + + +然后查看这个是否被打开,如果被打开了,记得一定要关掉,不然你的内存就会被占用得特别多,如果是家庭版就没得选择这个必须开 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171336467.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) +! +在设置界面中点击1的位置,然后将下面这段代码复制到2的框中(这是一个换源过程,能够帮你在装Openguass的时候下载快一些) + +``` +{ + "registry-mirrors": [ + "http://docker.mirrors.ustc.edu.cn", + "http://hub-mirror.c.163.com", + "http://registry.docker-cn.com" + ], + "insecure-registries": [ + "docker.mirrors.ustc.edu.cn", + "registry.docker-cn.com" + ] +} +``` + +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171354360.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) +点击Apply&Restart按钮,等待docker重启即可 +## 4. docker安装可能存在的问题以及对应的解决方案 + + - WSL2或者Hyper-V: + +可能存在说咱们他不开docker需要安装wsl2。 +首先要确定的是,我们的windows10版本是不是专业版,如果是专业版,无论他说使用wsl2有更好的体验,不要管他,使用hyper-v,直接选hyper-v,其他都不要管。 +如果是家庭版,那就只能装wsl2了,他会给出一个连接,安装对应的补丁,然后直接装就行了 +这两个都是是虚拟机的启动器,wsl2相当于启动了一个子linux系统,会特别吃内存2~4G左右,因此如果是专业版享受hyper-v或者内存够大的话,那就可以别看下面的部分了 +因为wls2吃内存很大所以需要对其限制内存,但是如果限制得太小的话他就无法启动docker了,经过我在多次不同电脑上安装的经验,大概给wsl2限制在1.5G左右,就能正常启动。当然我也试过限制在500M,1G的内存,这些对于一部分电脑是有用的,但是对于大部分的电脑现在在1.5G左右是比较合适的,大家可以在看完下面限制内存的方法后自己调一下,到底要限制多大才能正常启动。 + + - WSL2内存限制: + +先打开控制台输入:wsl --shutdown确保wsl2服务关闭,不管有没有启动这个服务都先关了 +首先按下Windows + R键,输入 %UserProfile%,然后回车。会弹出一个文件夹,在文件夹下创建.wslconfig文件,然后使用记事本打开 +在文件中输入如下内容 + +``` +[wsl2] +memory=1500MB +swap=2G +processors=1 +``` + +其中对应的内容信息是: + +``` +memory=1500MB # 限制最大使用内存 +swap=2G # 限制最大使用虚拟内存 +processors=1 # 限制最大使用cpu个数 +``` + +虚拟内存好像影响不大,不过觉得不合适的话也是可以改的,在memory上就要进行改动了,具体设置多少需要自己进行试验 +## 5. 拉取openguass镜像 +重启结束后,点击键盘上的开始按钮,输入powershell,回车,输入指令,下面指定了对应的版本,1.0.1也算是一个稳定版本,因为如果使用最新版本他可能比较不稳定会出现su: Authentication failure 的情况,这个目前的解决方案就是不是用latest标签的最新版本而是使用1.0.1的版本。 +docker pull enmotech/opengauss:1.0.1 +等待openguass镜像下载 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171411487.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) +opengauss的安装,需要使用Openguass的镜像,在镜像中内置了超级用户omm以及测试用户guassdb,因此在安装时,需要给他们设置密码, +openGauss的密码有复杂度要求,需要:密码长度8个字符及以上,必须同时包含英文字母大小写,数字,以及特殊符号 +比如密码2222@aaaA +在安装是默认端口号为5432,但是如果想要在外部链接的话需要修改端口号, +执行下面的语句,在中文的地方改成对应的内容即可,我这里修改的端口号改为了15432 +docker run --name opengauss --privileged=true -d -e GS_PASSWORD=(这里是你的密码) -p 15432(端口号):5432 enmotech/opengauss:1.0.1 +安装完成: +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171424520.png) +回到docker主界面,我们会看到有一个Openguass的选项,如下图: +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171437698.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) +! +当1处的位置为灰色说明当前openguass服务尚未启动,点击2处的按钮可以开始启动 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171449579.png) +当1处图标变绿说明服务启动成功,点击2处的按钮进入控制台模式 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171501243.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) +其实只要看到上图的界面我们就算安装成功了,不过保险起见,我们可以使用su - omm指令进入超级用户的模式,然后使用指令gsql打开openguass看看能不能正常使用(这个控制台是一个微型的linux控制台,输入密码的时候看不到输入情况,所以放心输入,输入正确了回车就行) +![在这里插入图片描述](https://img-blog.csdnimg.cn/2021070217151375.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) +安装成功,如果忘记里密码可以点这里绿色图标旁边的文字,然后点击INSPECT +![在这里插入图片描述](https://img-blog.csdnimg.cn/2021070217152696.png) +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171538641.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) +openguass的密码就保存再这里,同时还有一些必要的信息 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171551422.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) +当我们不需要使用openguass数据库的时候可以点击这个按钮将其关闭,当需要使用时向之前说的那样开启就可以了,记住只有docker里的这个图标变绿了才能使用可视化工具连接openguass数据库 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171601739.png) +## 6. 确认opengauss可用 +# 二、opengauss数据库的可视化链接 +可视化工具其实是可以随便选的,比如jetbrains的datagrip还有Navicat等等 +不过后面要演示java链接数据库,因此我决定使用jetbrains的idea作为数据库的可视化工具 +疑问:啊?idea不是用来写java的么?还能用来数据库可视化? +答:可以的,如果是idea旗舰版在里面是有一个类似于datagrip的插件的 +疑问:那idea旗舰版怎么得 +答:可以通过学校邮箱在jetabrains官网上注册一个账号并使用学校邮箱申请一年使用期,只要你还在学校一天,你就能一直续下去,获取流程会在这里说明一下(当然,如果使用navicat的话可以跳过获取正版jetbrains的说明部分,直接看连接部分) +## 1. 获取旗舰版的idea +首先进入jetbrains的官网,并进行账号注册 +注册好帐号后,进行登录,回到主页,主页的右上角可以切换成中文,如图进行点击学习工具->学生和教师 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171614713.png) +点击立即申请 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171625378.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) +按照对应的条件填写如下表格 + +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171658749.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) +然后申请免费产品,申请后会给你的学校邮箱发一个邮件,进入邮件点击他提供的连接,你的帐号就能获得一年的许可证,这时候你在官网下载正版的专业版,下载安装好后,他要你激活软件的时候,选择登录自己的帐号,就可以使用专业版了。 +不过,记得把之前装过的盗版全部清理干净不然会激活失败的。 +## 2. 使用idea的数据库可视化工具插件连接数据库 +回归正题,怎么用idea进行数据库连接呢?其实和大部分的可视化工具是一样的 +首先打开我们opengauss的虚拟机或着docker +我们先创建一个java项目,然后看到他的右边有一个database +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171716180.png) +点击一下,因为我之前连过一个了,可能会不太一样点开后按照顺序点击 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702191923544.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) + +界面如下,在箭头所指的地方有一个要下载的插件,但是我下过了,所以没有了,记得点击下载。 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171757405.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) +按照顺序依次填入: +自己openguass的ip地址,这个如果是跟着我上面的步骤安装就是用localhost或者127.0.0.1就可以了 +自己openguass的端口号,这个如果是跟着我上面的步骤安装就是15432 +填gaussdb不管你创建的时候起的什么名字都填gaussdb(gaussdb是docker安装时的内置测试用户,权限很高) +填自己设置的密码(就是自己设置的密码了,如果你连密码都跟我一样就是2222@aaaA) +点击测试一下连接 +如果成功了会有这样的结果 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171813409.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) +! +点击应用,和ok就可以连接了,逐级点开选项就可以看到,自己的表格模式信息了,右键我图中的test +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171827847.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) +# 三、java链接openguass数据库 +右键数据库图标的test,new->Query Console就可以进入sql语句的编写界面了。 +然后是用java连接数据库,首先要下载一个jar包,如果是jdk8以上的版本,可以点击这个连接下载https://jdbc.postgresql.org/download/postgresql-42.2.20.jar +不是的话需要到postgresql的官网寻找合适的jar +下载好后放到我们能找到的位置 +然后右键我们的工程文件名 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171839676.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) +点击Open Module Setting->dependencies +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171852572.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) +点击+号,点击jar or derectiories,找到之前下载的jar包,添加进去,然后打个勾,就可以了 +![在这里插入图片描述](https://img-blog.csdnimg.cn/2021070217190554.png) +最后长这样就行,记住一定要是自己的工程目录下,点错了就没有用的。 +代码的话,随便建个java文件,写个主类,然后写下面的代码就行,都有注释的,使用idea的时候如果下面代码有报错,就点击alt + 回车他会自动帮你补错 + +``` +Connection c = null; + Statement stmt = null; + try { + // 获取数据库源,固定写法 + Class.forName("org.postgresql.Driver"); + // 这个是连接,该一下中文的部分,就是之前你用database连数据库的那些参数 + c = DriverManager + .getConnection("jdbc:postgresql://你的IP地址:你的端口号/要连接的数据库", + "gaussdb", "高斯数据库的密码"); + // 连接成功 + System.out.println("Opened database successfully"); +// 这里创建一个类似于可视化工具中的console的那个脚本文件 + stmt = c.createStatement(); +// 这里写sql语句,做创建表的演示 + String sql = "CREATE TABLE COMPANY1 " + + "(ID INT PRIMARY KEY NOT NULL," + + " NAME TEXT NOT NULL, " + + " AGE INT NOT NULL, " + + " ADDRESS CHAR(50), " + + " SALARY REAL)"; +// String sql = "DROP TABLE COMPANY"; + stmt.executeUpdate(sql); +// 关闭脚本文件 + stmt.close(); +// 结束连接 + c.close(); + } catch ( Exception e ) { + System.err.println( e.getClass().getName()+": "+ e.getMessage() ); + System.exit(0); + } + System.out.println("Table created successfully"); +} +``` + +展示一下运行结果: +这是运行前数据库的内容,我们执行上面的代码后会创建一个表company1的表 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171919787.png) +结果如下: +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702171929641.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70) +写sql语句的地方是可以换成任何一个sql语句的 +# 四、最后说一个小问题 +重新装了个数据库现在没法展示了,但是我记得问题,就是当前sql语句中所有的表都报错,他最有可能的是没有选对模式 +点击database旁边的xxx.,点击对应的数据然后选择自己的表所在的模式, +选择好后,点OK就可以了。 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210702192143900.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) + +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210409150842510.jpg?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center, =200x200 ) +
    Gauss松鼠会是汇集数据库爱好者和关注者的大本营,
    + +
    大家共同学习、探索、分享数据库前沿知识和技术,
    + +
    互助解决问题,共建数据库技术交流圈。
    + +
    +openGauss官网 +
    diff --git "a/content/zh/post/zhengwen2/openGauss\345\234\250kubernetes\351\233\206\347\276\244\347\216\257\345\242\203\344\270\212\347\232\204\351\203\250\347\275\262.md" "b/content/zh/post/zhengwen2/openGauss\345\234\250kubernetes\351\233\206\347\276\244\347\216\257\345\242\203\344\270\212\347\232\204\351\203\250\347\275\262.md" new file mode 100644 index 0000000000000000000000000000000000000000..f9abb6d47aba108512df7f0b4f0f17b60d110989 --- /dev/null +++ "b/content/zh/post/zhengwen2/openGauss\345\234\250kubernetes\351\233\206\347\276\244\347\216\257\345\242\203\344\270\212\347\232\204\351\203\250\347\275\262.md" @@ -0,0 +1,253 @@ ++++ + +title = "openGauss在kubernetes集群环境上的部署" + +date = "2021-07-10" + +tags = ["openGauss在kubernetes集群环境上的部署"] + +archives = "2021-07" + +author = "华军" + +summary = "openGauss在kubernetes集群环境上的部署" + +img = "/zh/post/zhengwen2/img/img21.png" + +times = "12:30" + ++++ + +# opengauss实践总结学习心 + +openGauss是一款开源关系型数据库管理系统 , 深度融合华为在数据库领域多年的经验,结合企业级场景需求,持续构建竞争力特性;kubernetes也是一个开源的,用于管理云平台中多个主机上的容器化的应用,Kubernetes的目标是让部署容器化的应用简单并且高效,Kubernetes提供了应用部署,规划,更新,维护的一种机制, 本篇文章将介绍openGauss在kubernetes集群环境上的部署探索。 + +# 1.检查k8s运行环境 + +``` +[root@n-k8s-m ~]# kubectl get node +>NAME STATUS ROLES AGE VERSION +n-k8s-m Ready master 349d v1.18.0 +``` + + + +# 2.查看准备好的openGauss的docker镜像
    + + + +``` +[root@n-k8s-m ~]# docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +nginx latest 4cdc5dd7eaad 36 hours ago 133MB +opengauss 2.0.0 757bf74560e3 5 weeks ago 639MB +``` + + + +# 3.安装NFS服务器存储
    + +``` +#安装依赖包 +[root@n-k8s-m ~]#yum -y install nfs-utils rpcbind +#开机启动 +[root@n-k8s-m ~]#systemctl enable rpcbind.service +[root@n-k8s-m ~]#systemctl enable nfs-server.service +[root@n-k8s-m ~]#systemctl start rpcbind.service #端口是111 +[root@n-k8s-m ~]#systemctl start nfs-server.service # 端口是 2049 +#配置NFS +[root@n-k8s-m ~]#mkdir /home/pv1 +[root@n-k8s-m ~]#chown nfsnobody:nfsnobody /home/pv1 +[root@n-k8s-m ~]#cat /etc/exports

    + +
    /home/pv1 192.168.137.0/24(rw,async,all_squash)  
    +
    +

    [root@n-k8s-m ~]#exportfs -rv

    +
    exporting 192.168.137.0/24:/home/pv1
    +
    + +/home/pv1 192.168.137.0/24(rw,async,all_squash) +>[root@n-k8s-m ~]#exportfs -rv +exporting 192.168.137.0/24:/home/pv1 +``` + + + +# 4.创建openGauss所使用的存储pv
    + +``` +#编写yaml文件 +[root@n-k8s-m ~]# cat opengauss_pv.yml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: opengauss-pv + labels: + type: nfs001 +spec: + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Recycle + nfs: + path: "/home/pv1" + server: 192.168.137.61 + readOnly: false + +#创建pv + [root@n-k8s-m opengauss]# kubectl create -f opengauss_pv.yml + persistentvolume/opengauss-pv created +#查看创建pv + [root@n-k8s-m opengauss]# kubectl get pv + NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +opengauss-pv 1Gi RWX Recycle Available +``` + + + +# 5.创建openGauss所使用的存储pvc
    + +``` +#编写yaml文件 +[root@n-k8s-m ~]# cat opengauss_pvc.yml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: opengauss-pvc +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +#创建pvc + [root@n-k8s-m opengauss]# kubectl create -f opengauss_pvc.yml + persistentvolumeclaim/opengauss-pv created +#查看创建pvc + [root@n-k8s-m opengauss]# kubectl get pvc + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +opengauss-pvc Bound opengauss-pv 1Gi RWX 4s +``` + + + +# 6.创建openGauss的Deployment
    + +``` +#编写yaml文件 +apiVersion: apps/v1 +kind: Deployment +metadata: + name: opengauss-deployment +spec: + selector: + matchLabels: + app: opengauss + strategy: + type: Recreate + template: + metadata: + labels: + app: opengauss + spec: + containers: + - image: opengauss:2.0.0 + name: opengauss-service + imagePullPolicy: IfNotPresent + env: + - name: GS_PASSWORD + value: Gauss@123 + ports: + - containerPort: 5432 + name: opengauss + volumeMounts: # 挂载Pod上的卷到容器 + - name: opengauss-persistent-storage # Pod上卷的名字,与“volumes”名字匹配 + mountPath: /var/lib/opengauss # 挂载的Pod的目录 + volumes: # 挂载持久卷到Pod + - name: opengauss-persistent-storage # 持久卷名字, 与“volumMounts”名字匹配 + persistentVolumeClaim: + claimName: opengauss-pvc # 持久卷申请名字 + + #创建Deployment
    [root@n-k8s-m opengauss]# kubectl create -f opengauss_deploy.yaml + deployment.apps/opengauss-deployment create + #查看创建Deploymen + [root@n-k8s-m opengauss]# kubectl get deploy + NAME                           READY   UP-TO-DATE   AVAILABLE   AGEopengauss-deployment     1/1                 1           1           110s + +#创建Deployment +[root@n-k8s-m opengauss]# kubectl create -f opengauss_deploy.yaml +deployment.apps/opengauss-deployment created + +#查看创建Deployment +[root@n-k8s-m opengauss]# kubectl get deploy +NAME READY UP-TO-DATE AVAILABLE AGE +opengauss-deployment 1/1 1 1 110s​ +``` + + + +# 7.创建openGauss的Service提供集群内部和外部的高可用访问
    + +``` +#编写yaml文件 +apiVersion: v1 +kind: Service +metadata: + name: opengauss-service + labels: + app: opengauss +spec: + type: NodePort + selector: + app: opengauss + ports: + - protocol : TCP + nodePort: 32222 + port: 5432 + targetPort: 5432 +#创建openGauss的Service + [root@n-k8s-m opengauss]# kubectl create -f opengauss_svc.yaml + >service/opengauss-service created +#查看创建Service + [root@n-k8s-m opengauss]# kubectl get svc + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +opengauss-service NodePort 10.101.64.232 <none> 5432:32222/TCP 6s +``` + + + +# 8.连接openGauss数据库
    + +``` +#使用kubectl内部连接数据库 +[root@n-k8s-m opengauss]# kubectl get pod +NAME READY STATUS RESTARTS AGE +opengauss-deployment-6b8b4645f8-bfk4w 1/1 Running 0 15m +[root@n-k8s-m opengauss]# kubectl exec -it opengauss-deployment-6b8b4645f8-bfk4w sh +kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [COMMAND] instead. +sh-4.2# su - omm +[omm@opengauss-deployment-6b8b4645f8-bfk4w ~]$ gsql +gsql ((openGauss 2.0.0 build 78689da9) compiled at 2021-03-31 21:04:03 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. +omm=# \c +Non-SSL connection (SSL connection is recommended when requiring high-security) +You are now connected to database "omm" as user "omm". +omm=# \l + List of databases +Name | Owner | Encoding | Collate | Ctype | Access privileges +-----------+-------+----------+------------+------------+------------------- + omm | omm | UTF8 | en_US.utf8 | en_US.utf8 | + postgres | omm | UTF8 | en_US.utf8 | en_US.utf8 | + template0 | omm | UTF8 | en_US.utf8 | en_US.utf8 | =c/omm + + | | | | | omm=CTc/omm + template1 | omm | UTF8 | en_US.utf8 | en_US.utf8 | =c/omm + + | | | | | omm=CTc/omm +(4 rows) +``` + +

    #使用pgadmin4外部连接数据库
    +image.png
    +image.png

    + diff --git "a/content/zh/post/zhengwen2/openGauss\345\256\236\351\252\214\345\277\203\345\276\227\344\271\213gs_basebackup\347\211\251\347\220\206\345\244\207\344\273\275\344\270\216\346\201\242\345\244\215.md" "b/content/zh/post/zhengwen2/openGauss\345\256\236\351\252\214\345\277\203\345\276\227\344\271\213gs_basebackup\347\211\251\347\220\206\345\244\207\344\273\275\344\270\216\346\201\242\345\244\215.md" new file mode 100644 index 0000000000000000000000000000000000000000..524496efd2d481d5d56ab8c1e7ddc61e0eb540a4 --- /dev/null +++ "b/content/zh/post/zhengwen2/openGauss\345\256\236\351\252\214\345\277\203\345\276\227\344\271\213gs_basebackup\347\211\251\347\220\206\345\244\207\344\273\275\344\270\216\346\201\242\345\244\215.md" @@ -0,0 +1,156 @@ ++++ + +title = "openGauss实验心得之gs_basebackup物理备份与恢复" + +date = "2021-07-10" + +tags = ["openGauss实验心得之gs_basebackup物理备份与恢复"] + +archives = "2021-07" + +author = "Mia" + +summary = "openGauss实验心得之gs_basebackup物理备份与恢复" + +img = "/zh/post/zhengwen2/img/img24.png" + +times = "12:30" + ++++ + +# openGauss实验心得之gs_basebackup物理备份与恢复 + +2021年4月份开始接触openGauss并做openGauss的有关实验,今天记下gs_basebackup物理备份的实验经历:-D,以免未来忘记。(部分内容可能有疏漏,望包容和指出) +注:实验的设计思路参考于华为openGauss的指导手册。 + +# 1.数据库物理备份介绍 + +数据库物理备份指的是对数据库一些关键文件如日志、配置文件、关键数据等进行备份在数据库遭到破坏时能从备份处进行恢复。同时gs_basebackup备份的是数据库的二进制文件,因此在恢复是可以直接拷贝替换原有的文件或者直接在备份的库启动数据库。 + +## 2.gs_basebackup实验 + + +

    2.1 物理备份

    +

    用ssh命令首先登入openGauss所在的弹性公网并切换到omm用户

    +
    ssh root@弹性公网地址 //并输入密码
    +cd /opt/software/openGauss/script
    +su - omm
    +
    +

    成功登入的截图如下:
    +1.png
    +创建存储备份文件的文件夹并用ls命令查看

    +
    ls
    +//第一次结果显示为collector.json  logical
    +mkdir -p /home/omm/physical/backup
    +//第二次结果显示为collector.json  logical  physical
    +
    +

    为了进行对比在破坏数据库前先启动数据库

    +
    gs_om -t start
    +
    +

    结果显示如下:

    +
    Starting cluster.
    +=========================================
    +=========================================
    +Successfully started.
    +
    +

    将数据库进行物理备份(物理备份前一定要先启动数据库)

    +
    gs_basebackup -D /home/omm/physical/backup -p 26000
    +//参数-D directory表示备份文件输出的目录,是必选项。
    +
    +

    结果显示如下:

    +
    INFO:  The starting position of the xlog copy of the full build is: 0/5000028. The slot minimum LSN is: 0/0.
    +begin build tablespace list
    +finish build tablespace list
    +begin get xlog by xlogstream
    +                                                                                 check identify system success
    +                                                                                 send START_REPLICATION 0/5000000 success
    +                                                                                 keepalive message is received
    +                                                                                 keepalive message is received
    +                                                                                 keepalive message is received
    +                                                                                 keepalive message is received
    +                                                                                 keepalive message is received
    +gs_basebackup: base backup  successfully
    +
    +
    +

    2.2 数据库破坏

    +

    停止数据库服务

    +
    gs_om -t stop
    +
    +

    结果显示为:

    +
    Stopping cluster.
    +=========================================
    +Successfully stopped cluster.
    +=========================================
    +End stop cluster.
    +
    +

    查看数据库文件

    +
    cd /gaussdb/data
    +ls
    +//此处结果显示为db1(不同数据库节点文件可能不一样)
    +cd db1
    +ls
    +
    +

    结果显示如下:

    +
    backup_label.old    pg_ctl.lock       pg_replslot      postgresql.conf.bak
    +base                pg_errorinfo      pg_serial        postgresql.conf.lock
    +cacert.pem          pg_hba.conf       pg_snapshots     postmaster.opts
    +gaussdb.state       pg_hba.conf.bak   pg_stat_tmp      server.crt
    +global              pg_hba.conf.lock  pg_tblspc        server.key
    +gswlm_userinfo.cfg  pg_ident.conf     pg_twophase      server.key.cipher
    +mot.conf            pg_llog           PG_VERSION       server.key.rand
    +pg_clog             pg_multixact      pg_xlog
    +pg_csnlog           pg_notify         postgresql.conf
    +
    +

    其中.conf文件为认证文件,log文件为日志文件
    +破坏db1文件

    +
    rm -rf  *
    +ls
    +
    +

    在破坏后该文件夹的内容应该为空
    +尝试重新启动数据库

    +
    gs_om -t start
    +
    +

    此时显示的结果如下:

    +
    Starting cluster.
    +=========================================
    +[GAUSS-53600]: Can not start the database, the cmd is source /home/omm/.bashrc; python3 '/opt/huawei/wisequery/script/local/StartInstance.py' -U omm -R /opt/gaussdb/app -t 300 --security-mode=off,  Error:
    +[FAILURE] ecs-a560:
    +[GAUSS-51607] : Failed to start instance. Error: Please check the gs_ctl log for failure details.
    +[2021-07-05 21:12:05.825][8792][][gs_ctl]: gs_ctl started,datadir is /gaussdb/data/db1 
    +[2021-07-05 21:12:05.904][8792][][gs_ctl]: /gaussdb/data/db1/postgresql.conf cannot be opened..
    +
    +

    数据库服务启动失败

    +

    2.3 数据库恢复

    +

    利用cp命令恢复数据库

    +
     cp -r /home/omm/physical/backup/.  /gaussdb/data/db1
    +
    +

    备份完后查看db1中内容

    +
    cd /gaussdb/data/db1
    +ls
    +
    +

    结果显示如下:

    +
    backup_label        pg_ctl.lock       pg_replslot      postgresql.conf.bak
    +backup_label.old    pg_errorinfo      pg_serial        postgresql.conf.lock
    +base                pg_hba.conf       pg_snapshots     server.crt
    +cacert.pem          pg_hba.conf.bak   pg_stat_tmp      server.key
    +global              pg_hba.conf.lock  pg_tblspc        server.key.cipher
    +gswlm_userinfo.cfg  pg_ident.conf     pg_twophase      server.key.rand
    +mot.conf            pg_llog           PG_VERSION
    +pg_clog             pg_multixact      pg_xlog
    +pg_csnlog           pg_notify         postgresql.conf
    +
    +

    此时再次启动数据库服务

    +
    gs_om -t start
    +
    +

    显示结果如下,数据库服务重新启动,备份恢复成功

    +
    Starting cluster.
    +=========================================
    +=========================================
    +Successfully started.
    +
    +

    3.实验心得

    +

    (1)在做过实验后不难体会到物理备份就是将数据库的关键文件拷贝到指定目录,在数据库遭到破坏后将指定目录文件拷贝回去的方法。
    +(2)总结该实验的步骤:创建用于存放备份的目录—使用gs_basebackup进行备份——利用rm模拟数据库的破坏——利用cp命令将备份文件复制回遭到破坏的目录。
    +(3)openGauss有趣的命令很多,有待大家一起发掘探讨:-D

    + +注:本篇文章为原创文章,转载请注明出处哦~ diff --git "a/content/zh/post/zhengwen2/openGauss\345\277\253\351\200\237\345\256\211\350\243\205\346\226\271\346\263\225(docker).md" "b/content/zh/post/zhengwen2/openGauss\345\277\253\351\200\237\345\256\211\350\243\205\346\226\271\346\263\225(docker).md" new file mode 100644 index 0000000000000000000000000000000000000000..6415bc7efd90106bfd1039f9d1df956204fd2c95 --- /dev/null +++ "b/content/zh/post/zhengwen2/openGauss\345\277\253\351\200\237\345\256\211\350\243\205\346\226\271\346\263\225(docker).md" @@ -0,0 +1,88 @@ ++++ + +title = "opengauss快速安装方法(docker)" + +date = "2021-07-09" + +tags = [ "opengauss快速安装方法(docker)"] + +archives = "2021-10" + +author = "DSLS" + +summary = "opengauss快速安装方法(docker)" + +img = "/zh/post/zhengwen2/img/img20.png" + +times = "12:30" + ++++ + +# opengauss实践总结学习心 + +放开安全组(可选) +云服务器需要开放端口以供外部连接。设置如下: + +

     su

    +

    开放22端口,用于远程SSH的连接。开放8887端口,用于数据库的连接。

    +

    如果你闲麻烦,大可开放所有端口。

    +

    8887端口不是固定的,可以任意设置,但不要和已占用的端口冲突。。

    +

    如果是本地的虚拟机,则不需要上述设置。顺便一提,如果你想让同一局域网的其他设备(比如你舍友的电脑)连接到你的数据库,请把Windows防火墙关闭。

    +

     f

    +

    登录服务器

    +使用SSH远程登录到服务器之后,即可开始之后的步骤。 +

    执行命令SSH 账户名@域名或IP地址连接到远程服务器,连接上之后输入密码登录。

    +

    如SSH root@db.example.cn或SSH root@127.0.0.1。

    +

    如果是本地虚拟机,请开机输入密码登录即可。

    +

    关闭防火墙

    +执行命令systemctl stop firewalld.service停止防火墙。 +

    执行命令systemctl disable firewalld.service关闭防火墙。

    +

    之后reboot重启。

    +

    换源(可选)(耗时警告)

    +换国内源以加快程序包下载速度。注意系统版本:CentOS 7 +

    执行命令:cp /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bak备份。

    +

    执行命令wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo更换阿里源。

    +

    执行命令yum clean all清除缓存。

    +

    执行命令yum makecache生成缓存。

    +

    执行命令yum -y update更新yum源。

    +

    安装dokcer

    +执行命令yum -y install docker安装docker。 +

    执行命令systemctl start docker启动docker服务。

    +

    执行命令systemctl enable docker开机启动docker。(可选)

    +

    docker加速(可选)

    +为了pull镜像更快,可以配置镜像加速服务器。镜像加速地址可以百度,暂时可以用我的加速地址:https://8h88nptu.mirror.aliyuncs.com。 +

    顺便一提:阿里云镜像获取地址:https://cr.console.aliyun.com/cn-hangzhou/instances/mirrors,登陆后,左侧菜单选中镜像加速器就可以看到你的专属地址了

    +

    配置镜像地址,执行命令vi /etc/docker/daemon.json修改配置文件,如该文件不存在,则创建。在其中加入内容:

    +
    1. {"registry-mirrors":["https://8h88nptu.mirror.aliyuncs.com"]}

    +加速地址仅供参考

    +

    依次执行命令systemctl daemon-reload和systemctl restart docker重新启动docker。

    +

    拉取openGauss镜像并启动

    +执行docker run --name opengauss --privileged=true -d -e GS_PASSWORD=Enmo@123 -p 8887:5432 enmotech/opengauss:latest拉取镜像并创建容器。 +

    其中,opengauss为容器名,8887:5432为容器内部的5432端口映射到外部8887端口,默认密码为Enmo@123。

    +

    之后执行docker start opengauss启动openGauss镜像。

    +

    通过docker update --restart=always opengauss来设置openGauss镜像随着docker的启动而启动

    +

    至此openGauss安装完成

    +

    数据库的设置

    +执行命令docker exec -it opengauss bash进入容器。 +

    执行命令su - omm切换到omm账户。

    +

    执行命令gsql进入数据库。

    +

    因为外部连接时,不允许使用初始账户omm,所以新建一个账户。

    +

    执行语句CREATE USER testuser WITH PASSWORD ‘Enmo@123’;创建一个名为testuser,密码为Enmo@123的账户。

    +

    执行语句GRANT ALL PRIVILEGES ON DATABASE omm testuser;给予testuser默认数据库omm权限。

    +

    执行语句GRANT ALL PRIVILEGES ON all tables in schema public TO testuser;给予全部表权限给testuser。

    +

    完成设置。

    +

    外部连接

    +这里使用开源软件DBeaver来连接数据库。 +

    如下图所示,在左侧区域右键,创建->连接。

    +

     1

    +

    选择PostgreSQL。

    +

     2

    +

    设置主机地址为你的服务器/虚拟机IP地址,端口设置为8887。数据库为omm,用户名和密码为刚才设置的用户名和密码。(testuser,Enmo@123)

    +

     5

    +

    进入SQL编辑器,输入语句SELECT 1;来测试可用性。

    +

     6

    +

    结束

    + + + + diff --git "a/content/zh/post/zhengwen2/openGauss\346\225\260\346\215\256\345\272\223\347\273\264\346\212\244\347\256\241\347\220\206.md" "b/content/zh/post/zhengwen2/openGauss\346\225\260\346\215\256\345\272\223\347\273\264\346\212\244\347\256\241\347\220\206.md" new file mode 100644 index 0000000000000000000000000000000000000000..fc2048b41b791ab6d414af877f690267a77a2b6f --- /dev/null +++ "b/content/zh/post/zhengwen2/openGauss\346\225\260\346\215\256\345\272\223\347\273\264\346\212\244\347\256\241\347\220\206.md" @@ -0,0 +1,1400 @@ ++++ + +title = "openGauss数据库维护管理.md" + +date = "2021-07-09" + +tags = ["openGauss数据库维护管理.md"] + +archives = "2021-07" + +author = "七月" + +summary = "openGauss数据库维护管理.md" + +img = "/zh/post/zhengwen2/img/img25.png" + +times = "12:30" + ++++ + +# openGauss数据库维护管理.md + +1 操作系统参数检查
    +1.1 实验介绍
    +1.1.1 关于本实验
    +gs_checkos工具用来帮助检查操作系统、控制参数、磁盘配置等内容,并对系统控制参数、I/O配置、网络配置和THP服务等信息进行配置。
    +本实验主要是通过gs_checkos工具来检查操作系统参数设置是否合理。先进行场景设置,然后根据检查结果进行参数调整。
    +1.1.2 实验目的
    +掌握gs_checkos工具的基本使用;
    +1.2 场景设置及操作步骤
    +步骤 1用ROOT用户登录装有openGauss数据库服务的操作系统,登录后信息如下:

    + +

    Welcome to 4.19.90-2003.4.0.0036.oe1.aarch64
    +System information as of time: Mon Jul 20 16:41:11 CST 2020
    +System load: 0.00
    +Processes: 113
    +Memory used: 7.0%
    +Swap used: 0.0%
    +Usage On: 15%
    +IP address: 192.168.0.96
    +Users online: 2
    +[root@ecs-e1b3 ~]#

    +

    步骤 2在ROOT用户下执行gs_checkos先对系统参数进行检查。
    +[root@ecs-e1b3 ~]# gs_checkos -i A
    +Checking items:
    +A1. [ OS version status ] : Normal
    +A2. [ Kernel version status ] : Normal
    +A3. [ Unicode status ] : Normal
    +A4. [ Time zone status ] : Normal
    +A5. [ Swap memory status ] : Normal
    +A6. [ System control parameters status ] : Warning
    +A7. [ File system configuration status ] : Normal
    +A8. [ Disk configuration status ] : Normal
    +A9. [ Pre-read block size status ] : Normal
    +A10.[ IO scheduler status ] : Normal
    +BondMode Null
    +A11.[ Network card configuration status ] : Warning
    +A12.[ Time consistency status ] : Warning
    +A13.[ Firewall service status ] : Normal
    +A14.[ THP service status ] : Normal
    +Total numbers:14. Abnormal numbers:0. Warning numbers:3.
    +说明事项:
    +Normal 为正常项,Abnormal为必须处理项,Warning可以不处理。
    +Total numbers:14. Abnormal numbers:0. Warning numbers:3。
    +表示:总共检查14项,其中Abnormal必须处理项为0,Warning告警项为3。

    +

    步骤 3调整系统参数值。
    +在参数配置文件(/etc/sysctl.conf)中将参数 vm.min_free_kbytes(表示:内核内存分配保留的内存量) 的值调整为3488。输入“i”进入INSERT模式,进行修改。
    +[root@ecs-e1b3 ~]# vi /etc/sysctl.conf
    +net.ipv4.conf.default.accept_redirects=0
    +net.ipv4.conf.all.secure_redirects=0
    +net.ipv4.conf.default.secure_redirects=0
    +net.ipv4.icmp_echo_ignore_broadcasts=1
    +net.ipv4.icmp_ignore_bogus_error_responses=1
    +…………
    +net.ipv4.tcp_rmem = 8192 250000 16777216
    +net.ipv4.tcp_wmem = 8192 250000 16777216
    +vm.min_free_kbytes = 3488
    +net.core.netdev_max_backlog = 65535
    +net.ipv4.tcp_max_syn_backlog = 65535
    +net.core.somaxconn = 65535
    +参数值修改好后,按” ESC”键退出编辑模式,然后输入” :wq”后回车进行保存。接着通过执行sysctl -p 命令使刚才修改的参数生效,具体如下:
    +[root@ecs-e1b3 ~]# sysctl -p
    +kernel.sysrq = 0
    +net.ipv4.ip_forward = 0
    +net.ipv4.conf.all.send_redirects = 0
    +net.ipv4.conf.default.send_redirects = 0
    +net.ipv4.conf.all.accept_source_route = 0
    +net.ipv4.conf.default.accept_source_route = 0
    +net.ipv4.conf.all.accept_redirects = 0
    +net.ipv4.conf.default.accept_redirects = 0
    +……………
    +net.core.rmem_default = 21299200
    +net.sctp.sctp_mem = 94500000 915000000 927000000
    +net.sctp.sctp_rmem = 8192 250000 16777216
    +net.sctp.sctp_wmem = 8192 250000 16777216
    +kernel.sem = 250 6400000 1000 25600
    +net.ipv4.tcp_rmem = 8192 250000 16777216
    +net.ipv4.tcp_wmem = 8192 250000 16777216
    +vm.min_free_kbytes = 3488
    +net.core.netdev_max_backlog = 65535
    +net.ipv4.tcp_max_syn_backlog = 65535
    +net.core.somaxconn = 65535
    +kernel.shmall = 1152921504606846720
    +kernel.shmmax = 18446744073709551615

    +

    步骤 4再执行gs_checkos 对系统参数进行检查。
    +[root@ecs-e1b3 ~]# gs_checkos -i A
    +Checking items:
    +A1. [ OS version status ] : Normal
    +A2. [ Kernel version status ] : Normal
    +A3. [ Unicode status ] : Normal
    +A4. [ Time zone status ] : Normal
    +A5. [ Swap memory status ] : Normal
    +A6. [ System control parameters status ] : Abnormal
    +A7. [ File system configuration status ] : Normal
    +A8. [ Disk configuration status ] : Normal
    +A9. [ Pre-read block size status ] : Normal
    +A10.[ IO scheduler status ] : Normal
    +BondMode Null
    +A11.[ Network card configuration status ] : Warning
    +A12.[ Time consistency status ] : Warning
    +A13.[ Firewall service status ] : Normal
    +A14.[ THP service status ] : Normal
    +Total numbers:14. Abnormal numbers:1. Warning numbers:2.
    +Do checking operation finished. Result: Abnormal.
    +此时A6. [ System control parameters status ] 的状态为Abnormal为必须处理项;
    +Total numbers:14. Abnormal numbers:1. Warning numbers:2。
    +表示:总共检查14项,其中Abnormal必须处理项为1,Warning告警项为2。

    +

    步骤 5通过执行gs_checkos -i A --detail 查看更详细的信息。
    +[root@ecs-e1b3 ~]# gs_checkos -i A --detail
    +Checking items:
    +A1. [ OS version status ] : Normal
    +[ecs-e1b3]
    +openEuler_20.03_64bit
    +A2. [ Kernel version status ] : Normal
    +The names about all kernel versions are same. The value is “4.19.90-2003.4.0.0036.oe1.aarch64”.
    +A3. [ Unicode status ] : Normal
    +The values of all unicode are same. The value is “LANG=en_US.UTF-8”.
    +A4. [ Time zone status ] : Normal
    +The informations about all timezones are same. The value is “+0800”.
    +A5. [ Swap memory status ] : Normal
    +The value about swap memory is correct.
    +A6. [ System control parameters status ] : Abnormal
    +[ecs-e1b3]
    +Abnormal reason: variable ‘vm.min_free_kbytes’ RealValue ‘3488’ ExpectedValue ‘348844’.
    +Warning reason: variable ‘net.ipv4.tcp_retries1’ RealValue ‘3’ ExpectedValue ‘5’.
    +Warning reason: variable ‘net.ipv4.tcp_syn_retries’ RealValue ‘6’ ExpectedValue ‘5’.
    +Warning reason: variable ‘net.sctp.path_max_retrans’ RealValue ‘5’ ExpectedValue ‘10’.
    +Warning reason: variable ‘net.sctp.max_init_retransmits’ RealValue ‘8’ ExpectedValue ‘10’.
    +Check_SysCtl_Parameter failed.
    +A7. [ File system configuration status ] : Normal
    +Both soft nofile and hard nofile are correct.
    +A8. [ Disk configuration status ] : Normal
    +The value about XFS mount parameters is correct.
    +A9. [ Pre-read block size status ] : Normal
    +The value about Logical block size is correct.
    +A10.[ IO scheduler status ] : Normal
    +The value of IO scheduler is correct.
    +BondMode Null
    +A11.[ Network card configuration status ] : Warning
    +[ecs-e1b3]
    +BondMode Null
    +Warning reason: Failed to obtain the network card speed value. Maybe the network card “eth0” is not working.
    +A12.[ Time consistency status ] : Warning
    +[ecs-e1b3]
    +The NTPD not detected on machine and local time is “2020-07-20 17:16:41”.
    +A13.[ Firewall service status ] : Normal
    +The firewall service is stopped.
    +A14.[ THP service status ] : Normal
    +The THP service is stopped.
    +Total numbers:14. Abnormal numbers:1. Warning numbers:2.
    +Do checking operation finished. Result: Abnormal.
    +在详细信息中,可以明确看出那些参数设置有问题,并给出了问题参数要求修改的参考值,如下:
    +A6. [ System control parameters status ] : Abnormal
    +[ecs-e1b3]
    +Abnormal reason: variable ‘vm.min_free_kbytes’ RealValue ‘3488’ ExpectedValue ‘348844’.
    +Warning reason: variable ‘net.ipv4.tcp_retries1’ RealValue ‘3’ ExpectedValue ‘5’.
    +Warning reason: variable ‘net.ipv4.tcp_syn_retries’ RealValue ‘6’ ExpectedValue ‘5’.
    +Warning reason: variable ‘net.sctp.path_max_retrans’ RealValue ‘5’ ExpectedValue ‘10’.
    +Warning reason: variable ‘net.sctp.max_init_retransmits’ RealValue ‘8’ ExpectedValue ‘10’.
    +Check_SysCtl_Parameter failed.

    +

    步骤 6按详细信息中的修改说明对系统参数进行修改。
    +vm.min_free_kbytes的值由3488调整为348844
    +net.ipv4.tcp_retries1的值由3调整为5.
    +net.ipv4.tcp_syn_retries的值由6调整为5.
    +net.sctp.path_max_retrans的值由5调整为10
    +net.sctp.max_init_retransmits的值由8调整为10
    +具体设置如下:
    +vm.min_free_kbytes = 348844
    +net.ipv4.tcp_retries1 = 5
    +net.ipv4.tcp_syn_retries = 5
    +net.sctp.path_max_retrans = 10
    +net.sctp.max_init_retransmits = 10
    +在系统参数文件中进行修改(输入“i”进入INSERT模式,进行修改。):
    +[root@ecs-e1b3 ~]# vi /etc/sysctl.conf

    +

    sysctl settings are defined through files in

    +

    /usr/lib/sysctl.d/, /run/sysctl.d/, and /etc/sysctl.d/.

    +

    +

    Vendors settings live in /usr/lib/sysctl.d/.

    +

    To override a whole file, create a new file with the same in

    +

    /etc/sysctl.d/ and put new settings there. To override

    +

    only specific settings, add a file with a lexically later

    +

    name in /etc/sysctl.d/ and put new settings there.

    +

    +

    For more information, see sysctl.conf(5) and sysctl.d(5).

    +

    kernel.sysrq=0
    +net.ipv4.ip_forward=0
    +net.ipv4.conf.all.send_redirects=0
    +net.ipv4.conf.default.send_redirects=0
    +net.ipv4.conf.all.accept_source_route=0
    +net.ipv4.conf.default.accept_source_route=0
    +net.ipv4.conf.all.accept_redirects=0

    +

    /etc/sysctl.d/ and put new settings there. To override

    +

    only specific settings, add a file with a lexically later

    +

    name in /etc/sysctl.d/ and put new settings there.

    +

    +

    For more information, see sysctl.conf(5) and sysctl.d(5).

    +

    kernel.sysrq=0
    +net.ipv4.ip_forward=0
    +net.ipv4.conf.all.send_redirects=0
    +net.ipv4.conf.default.send_redirects=0
    +net.ipv4.conf.all.accept_source_route=0

    +

    /etc/sysctl.d/ and put new settings there. To override

    +

    only specific settings, add a file with a lexically later

    +

    name in /etc/sysctl.d/ and put new settings there.

    +

    +

    For more information, see sysctl.conf(5) and sysctl.d(5).

    +

    kernel.sysrq=0
    +net.ipv4.ip_forward=0
    +net.ipv4.conf.all.send_redirects=0
    +net.ipv4.conf.default.send_redirects=0
    +net.ipv4.conf.all.accept_source_route=0
    +net.ipv4.conf.default.accept_source_route=0
    +……………
    +net.sctp.sctp_rmem = 8192 250000 16777216
    +net.sctp.sctp_wmem = 8192 250000 16777216
    +kernel.sem = 250 6400000 1000 25600
    +net.ipv4.tcp_rmem = 8192 250000 16777216
    +net.ipv4.tcp_wmem = 8192 250000 16777216
    +vm.min_free_kbytes = 348844
    +net.core.netdev_max_backlog = 65535
    +net.ipv4.tcp_max_syn_backlog = 65535
    +net.core.somaxconn = 65535
    +kernel.shmall = 1152921504606846720
    +kernel.shmmax = 18446744073709551615
    +net.ipv4.tcp_retries1 = 5
    +net.ipv4.tcp_syn_retries = 5
    +net.sctp.path_max_retrans = 10
    +net.sctp.max_init_retransmits = 10
    +参数值修改好后,按”ECS”键退出编辑模式,然后输入”:wq”后回车进行保存。接着通过执行sysctl -p 命令使刚才修改的参数生效,具体如下:
    +[root@ecs-e1b3 ~]# sysctl -p
    +kernel.sysrq = 0
    +net.ipv4.ip_forward = 0
    +net.ipv4.conf.all.send_redirects = 0
    +net.ipv4.conf.default.send_redirects = 0
    +net.ipv4.conf.all.accept_source_route = 0
    +net.ipv4.conf.default.accept_source_route = 0
    +net.ipv4.conf.all.accept_redirects = 0
    +net.ipv4.conf.default.accept_redirects = 0
    +net.ipv4.conf.all.secure_redirects = 0
    +net.ipv4.conf.default.secure_redirects = 0
    +net.ipv4.icmp_echo_ignore_broadcasts = 1
    +net.ipv4.icmp_ignore_bogus_error_responses = 1
    +net.ipv4.conf.all.rp_filter = 1
    +net.ipv4.conf.default.rp_filter = 1
    +net.ipv4.tcp_syncookies = 1
    +kernel.dmesg_restrict = 1
    +net.ipv6.conf.all.accept_redirects = 0
    +net.ipv6.conf.default.accept_redirects = 0
    +vm.swappiness = 0
    +net.ipv4.tcp_max_tw_buckets = 10000
    +net.ipv4.tcp_tw_reuse = 1
    +…………….
    +net.ipv4.tcp_rmem = 8192 250000 16777216
    +net.ipv4.tcp_wmem = 8192 250000 16777216
    +vm.min_free_kbytes = 348844
    +net.core.netdev_max_backlog = 65535
    +net.ipv4.tcp_max_syn_backlog = 65535
    +net.core.somaxconn = 65535
    +kernel.shmall = 1152921504606846720
    +kernel.shmmax = 18446744073709551615
    +net.ipv4.tcp_retries1 = 5
    +net.ipv4.tcp_syn_retries = 5
    +net.sctp.path_max_retrans = 10
    +net.sctp.max_init_retransmits = 10

    +

    步骤 7再次通过执行gs_checkos -i A 查看系统参数检查是否能通过。
    +[root@ecs-e1b3 ~]# gs_checkos -i A
    +Checking items:
    +A1. [ OS version status ] : Normal
    +A2. [ Kernel version status ] : Normal
    +A3. [ Unicode status ] : Normal
    +A4. [ Time zone status ] : Normal
    +A5. [ Swap memory status ] : Normal
    +A6. [ System control parameters status ] : Normal
    +A7. [ File system configuration status ] : Normal
    +A8. [ Disk configuration status ] : Normal
    +A9. [ Pre-read block size status ] : Normal
    +A10.[ IO scheduler status ] : Normal
    +BondMode Null
    +A11.[ Network card configuration status ] : Warning
    +A12.[ Time consistency status ] : Warning
    +A13.[ Firewall service status ] : Normal
    +A14.[ THP service status ] : Normal
    +Total numbers:14. Abnormal numbers:0. Warning numbers:2.
    +从检查结果可以看出,系统参数检查已经通过。其中A6. [ System control parameters status ]的状态由原来的Abnormal变为了Normal。
    +操作系统参数检查实验结束。

    +

    2 openGauss运行健康状态检查
    +2.1 实验介绍
    +2.1.1 关于本实验
    +gs_check能够帮助用户在openGauss运行过程中,全量的检查openGauss运行环境,操作系统环境,网络环境及数据库执行环境,也有助于在openGauss重大操作之前对各类环境进行全面检查,有效保证操作执行成功。
    +本实验主要是通过gs_check工具来检查openGauss数据库运行状态。先进行场景设置,然后根据检查结果进行数据库调整。
    +语法如下:
    +单项检查:
    +gs_check -i ITEM […] [-U USER] [-L] [-l LOGFILE] [-o OUTPUTDIR] [–skip-root-items][–set][–routing]
    +场景检查:
    +gs_check -e SCENE_NAME [-U USER] [-L] [-l LOGFILE] [-o OUTPUTDIR] [–hosts] [–skip-root-items] [–time-out=SECS][–set][–routing][–skip-items]
    +场景检查项。默认的场景有inspect(例行巡检)、upgrade(升级前巡检)、binary_upgrade(就地升级前巡检)、health(健康检查巡检)、install(安装),等,用户可以根据需求自己编写场景。
    +显示帮助信息。
    +gs_check -? | --help
    +2.1.2 实验目的
    +掌握gs_check工具的基本使用;

    +

    2.2 场景设置及操作步骤
    +步骤 1用ROOT用户登录装有openGauss数据库服务的操作系统然后用 su – omm命令切换至OMM用户环境,登录后信息如下。
    +Welcome to 4.19.90-2003.4.0.0036.oe1.aarch64
    +System information as of time: Tue Jul 21 09:21:11 CST 2020
    +System load: 0.01
    +Processes: 109
    +Memory used: 6.7%
    +Swap used: 0.0%
    +Usage On: 15%
    +IP address: 192.168.0.96
    +Users online: 1
    +[root@ecs-e1b3 ~]# su - omm
    +Last login: Fri Jul 10 19:05:39 CST 2020 on pts/0
    +Welcome to 4.19.90-2003.4.0.0036.oe1.aarch64
    +System information as of time: Tue Jul 21 09:21:25 CST 2020
    +System load: 0.01
    +Processes: 111
    +Memory used: 7.0%
    +Swap used: 0.0%
    +Usage On: 15%
    +IP address: 192.168.0.96
    +Users online: 1
    +[omm@ecs-e1b3 ~]$

    +

    步骤 2确认openGauss数据库服务是否启动。
    +[omm@ecs-e1b3 ~]$ gs_om -t status;

    +

    cluster_state : Normal
    +redistributing : No

    +

    cluster_state : Normal 表示已启动,可以正常使用。如果状态为非Normal表示不可用
    +为了实验场景设置,如果数据库服务已经启动,请执行步骤3先关闭服务。

    +

    步骤 3关闭openGauss数据库服务。
    +[omm@ecs-e1b3 ~]$ gs_om -t stop;
    +Stopping cluster.

    +

    Successfully stopped cluster.

    +

    End stop cluster.

    +

    步骤 4检查openGauss实例连接。
    +[omm@ecs-e1b3 ~]$ gs_check -i CheckDBConnection
    +Parsing the check items config file successfully
    +Distribute the context file to remote hosts successfully
    +Start to health check for the cluster. Total Items:1 Nodes:1

    +

    Checking… [=========================] 1/1
    +Start to analysis the check result
    +CheckDBConnection…NG
    +The item run on 1 nodes. ng: 1
    +The ng[ecs-e1b3] value:
    +The database can not be connected.

    +

    Analysis the check result successfully
    +Failed. All check items run completed. Total:1 NG:1
    +For more information please refer to /opt/huawei/wisequery/script/gspylib/inspection/output/CheckReport_2020072139449163171.tar.gz
    +说明:
    +CheckDBConnection…NG 表示连接检查项无用;
    +The database can not be connected. 表示实例不能连接;
    +Failed. All check items run completed. Total:1 NG:1 表示共检查1项并且检查结果未通过。

    +

    步骤 5启动openGauss数据库服务。
    +[omm@ecs-e1b3 ~]$ gs_om -t start;
    +Starting cluster.

    +

    =========================================
    +Successfully started.
    +[omm@ecs-e1b3 ~]$

    +

    步骤 6确认openGauss数据库服务已启动。
    +[omm@ecs-e1b3 ~]$ gs_om -t status;

    +

    cluster_state : Normal
    +redistributing : No

    +

    [omm@ecs-e1b3 ~]$

    +

    步骤 7再次检查openGauss实例连接。
    +[omm@ecs-e1b3 ~]$ gs_check -i CheckDBConnection
    +Parsing the check items config file successfully
    +Distribute the context file to remote hosts successfully
    +Start to health check for the cluster. Total Items:1 Nodes:1

    +

    Checking… [=========================] 1/1
    +Start to analysis the check result
    +CheckDBConnection…OK
    +The item run on 1 nodes. success: 1

    +

    Analysis the check result successfully
    +Success. All check items run completed. Total:1 Success:1
    +For more information please refer to /opt/huawei/wisequery/script/gspylib/inspection/output/CheckReport_2020072140672174672.tar.gz

    +

    说明:
    +CheckDBConnection…OK 表示连接检查项正常;
    +Success. All check items run completed. Total:1 Success:1 表示共检查1项并且检查结果成功。
    +openGauss数据库运行健康状态检查实验结束。

    +

    3 数据库性能检查
    +3.1 实验介绍
    +3.1.1 关于本实验
    +openGauss 不仅提供了gs_checkperf工具来帮助用户了解openGauss的负载情况。
    +本实验主要是通过gs_checkperf工具来检查openGauss数据库性能以及通过EXPLAIN来进行SQL语句优化。
    +3.1.2 实验目的
    +掌握gs_checkperf工具的基本使用;
    +3.2 通过gs_checkperf工具来检查数据库性能
    +说明:
    +gs_checkperf可以对以下级别进行检查:
    +openGauss级别(主机CPU占用率、Gauss CPU占用率、I/O使用情况等)、
    +节点级别(CPU使用情况、内存使用情况、I/O使用情况)、
    +会话/进程级别(CPU使用情况、内存使用情况、I/O使用情况)、
    +SSD性能(写入、读取性能)
    +其中检查SSD性能要用root用户执行,检查openGauss性能要用openGauss安装用户执行
    +本实验为检查openGauss性能。

    +

    步骤 1用ROOT用户登录装有openGauss数据库服务的操作系统然后用 su – omm命令切换至OMM用户环境,登录后信息如下。
    +Welcome to 4.19.90-2003.4.0.0036.oe1.aarch64
    +System information as of time: Tue Jul 21 09:21:11 CST 2020
    +System load: 0.01
    +Processes: 109
    +Memory used: 6.7%
    +Swap used: 0.0%
    +Usage On: 15%
    +IP address: 192.168.0.96
    +Users online: 1
    +[root@ecs-e1b3 ~]# su - omm
    +Last login: Fri Jul 10 19:05:39 CST 2020 on pts/0
    +Welcome to 4.19.90-2003.4.0.0036.oe1.aarch64
    +System information as of time: Tue Jul 21 09:21:25 CST 2020
    +System load: 0.01
    +Processes: 111
    +Memory used: 7.0%
    +Swap used: 0.0%
    +Usage On: 15%
    +IP address: 192.168.0.96
    +Users online: 1
    +[omm@ecs-e1b3 ~]$

    +

    步骤 2先启动数据库服务,再用gs_checkperf检查下,再使用gsql客户端以管理员用户身份连接postgres数据库,假设端口号为26000。
    +先启动数据库服务。
    +[omm@ecs-e1b3 ~]$ gs_om -t start;
    +Starting cluster.

    + +

    =========================================
    +Successfully started.
    +用gs_checkperf检查下。
    +[omm@ecs-e1b3 ~]$ gs_checkperf
    +Cluster statistics information:
    +Host CPU busy time ratio : .72 %
    +MPPDB CPU time % in busy time : .33 %
    +Shared Buffer Hit ratio : 97.33 %
    +In-memory sort ratio : 0
    +Physical Reads : 466
    +Physical Writes : 175
    +DB size : 47 MB
    +Total Physical writes : 175
    +Active SQL count : 3
    +Session count : 4
    +确认openGauss数据库服务是否正常。
    +[omm@ecs-e1b3 ~]$ gs_om -t status;

    +

    cluster_state : Unavailable
    +redistributing : No

    +

    cluster_state : Normal 表示已启动,可以正常使用。如果状态为Unavailable表示不可用
    +为了实验继续进行,请先启动数据库服务。
    +启动数据库服务(如果数据库服务是正常的,此步骤可以不执行)。
    +[omm@ecs-e1b3 ~]$ gs_om -t start;
    +Starting cluster.

    +

    =========================================
    +Successfully started.
    +然后连接postgres数据库。
    +[omm@ecs-e1b3 ~]$ gsql -d postgres -p 26000 -r
    +gsql ((openGauss 1.0.0 build 38a9312a) compiled at 2020-05-27 14:57:08 commit 472 last mr 549 )
    +Non-SSL connection (SSL connection is recommended when requiring high-security)
    +Type “help” for help.
    +postgres=#

    +

    步骤 3对PMK模式下的表进行统计信息收集。
    +postgres=# analyze pmk.pmk_configuration;
    +ANALYZE
    +postgres=# analyze pmk.pmk_meta_data;
    +ANALYZE
    +postgres=# analyze pmk.pmk_snapshot;
    +ANALYZE
    +postgres=# analyze pmk.pmk_snapshot_datanode_stat;
    +ANALYZE
    +postgres=#
    +说明:
    +gs_checkperf工具的监控信息依赖于pmk模式下的表的数据,如果pmk模式下的表未执行analyze操作,则可能导致gs_checkperf工具执行失败。

    +

    步骤 4执行简要性能检查。
    +用 \q 先退出postgres数据库,然后在操作系统用户 omm 环境下去执行gs_checkperf检查工具,具体如下:
    +postgres=#
    +postgres=# \q
    +[omm@ecs-e1b3 ~]$ gs_checkperf
    +Cluster statistics information:
    +Host CPU busy time ratio : 1.66 % -----主机CPU占用率
    +MPPDB CPU time % in busy time : 2.51 % ----Gauss CPU占用率
    +Shared Buffer Hit ratio : 99.14 % ----共享内存命中率
    +In-memory sort ratio : 0 —内存中排序比率
    +Physical Reads : 504 —物理读次数
    +Physical Writes : 162 —物理写次数
    +DB size : 57 MB —DB大小
    +Total Physical writes : 162 —总物理写次数
    +Active SQL count : 4 —当前SQL执行数
    +Session count : 5 —Session数量

    +

    步骤 5执行详细性能检查。
    +[omm@ecs-e1b3 ~]$ gs_checkperf --detail
    +Cluster statistics information:
    +Host CPU usage rate:
    +Host total CPU time : 45719980.000 Jiffies
    +Host CPU busy time : 761060.000 Jiffies
    +Host CPU iowait time : 6640.000 Jiffies
    +Host CPU busy time ratio : 1.66 %
    +Host CPU iowait time ratio : .01 %
    +MPPDB CPU usage rate:
    +MPPDB CPU time % in busy time : 5.12 %
    +MPPDB CPU time % in total time : .09 %
    +Shared buffer hit rate:
    +Shared Buffer Reads : 1057
    +Shared Buffer Hits : 139798
    +Shared Buffer Hit ratio : 99.25 %
    +In memory sort rate:
    +In-memory sort count : 0
    +In-disk sort count : 0
    +In-memory sort ratio : 0
    +I/O usage:
    +Number of files : 106
    +Physical Reads : 584
    +Physical Writes : 362
    +Read Time : 5794 ms
    +Write Time : 4046 ms
    +Disk usage:
    +DB size : 57 MB
    +Total Physical writes : 362
    +Average Physical write : 89471.08
    +Maximum Physical write : 362
    +Activity statistics:
    +Active SQL count : 4
    +Session count : 5
    +Node statistics information:
    +dn_6001:
    +MPPDB CPU Time : 38960 Jiffies
    +Host CPU Busy Time : 761060 Jiffies
    +Host CPU Total Time : 45719980 Jiffies
    +MPPDB CPU Time % in Busy Time : 5.12 %
    +MPPDB CPU Time % in Total Time : .09 %
    +Physical memory : 7144341504 Bytes
    +DB Memory usage : 14922285056 Bytes
    +Shared buffer size : 1073741824 Bytes
    +Shared buffer hit ratio : 99.25 %
    +Sorts in memory : 0
    +Sorts in disk : 0
    +In-memory sort ratio : 0
    +Number of files : 106
    +Physical Reads : 584
    +Physical Writes : 362
    +Read Time : 5794
    +Write Time : 4046
    +Session statistics information(Top 10):
    +Session CPU statistics:
    +1 dn_6001-postgres-omm:
    +Session CPU time : 2
    +Database CPU time : 39020
    +Session CPU time % : .01 %
    +……………
    +Session Memory statistics:
    +1 dn_6001-postgres-omm:
    +Buffer Reads : 1309
    +Shared Buffer Hit ratio : 93.03
    +In Memory sorts : 0
    +In Disk sorts : 0
    +In Memory sorts ratio : 0
    +Total Memory Size : 7433136
    +Used Memory Size : 6443268
    +…………………
    +Session IO statistics:
    +1 dn_6001-postgres-omm:
    +Physical Reads : 98
    +Read Time : 1069
    +2 dn_6001-postgres-omm:
    +Physical Reads : 13
    +Read Time : 173
    +…………
    +[omm@ecs-e1b3 ~]$
    +gs_checkperf 检查实验结束。
    +3.3 通过EXPLAIN进行SQL语句优化
    +说明:
    +使用explain能显示SQL语句的执行计划;
    +执行计划将显示SQL语句所引用的表会采用什么样的扫描方式,如:简单的顺序扫描、索引扫描等。如果引用了多个表,执行计划还会显示用到的JOIN算法;
    +执行计划的最关键的部分是语句的预计执行开销,这是计划生成器估算执行该语句将花费多长的时间;
    +若指定了ANALYZE选项,则该语句模拟执行并形成最优的执行计划(并非真正执行),然后根据实际的运行结果显示统计数据,包括每个计划节点内时间总开销(毫秒为单位)和实际返回的总行数。这对于判断计划生成器的估计是否接近现实非常有用。

    +

    步骤 1用ROOT用户登录装有openGauss数据库服务的操作系统然后用 su – omm命令切换至OMM用户环境,登录后信息如下。
    +Welcome to 4.19.90-2003.4.0.0036.oe1.aarch64
    +System information as of time: Tue Jul 21 09:21:11 CST 2020
    +System load: 0.01
    +Processes: 109
    +Memory used: 6.7%
    +Swap used: 0.0%
    +Usage On: 15%
    +IP address: 192.168.0.96
    +Users online: 1
    +[root@ecs-e1b3 ~]# su - omm
    +Last login: Fri Jul 10 19:05:39 CST 2020 on pts/0
    +Welcome to 4.19.90-2003.4.0.0036.oe1.aarch64
    +System information as of time: Tue Jul 21 09:21:25 CST 2020
    +System load: 0.01
    +Processes: 111
    +Memory used: 7.0%
    +Swap used: 0.0%
    +Usage On: 15%
    +IP address: 192.168.0.96
    +Users online: 1
    +[omm@ecs-e1b3 ~]$

    +

    步骤 2先启动数据库服务,然后使用gsql客户端以管理员用户身份连接postgres数据库,假设端口号为26000。
    +启动数据库服务。
    +[omm@ecs-e1b3 ~]$ gs_om -t start;
    +Starting cluster.

    +

    =========================================
    +Successfully started.
    +然后连接postgres数据库。
    +[omm@ecs-e1b3 ~]$ gsql -d postgres -p 26000 -r
    +gsql ((openGauss 1.0.0 build 38a9312a) compiled at 2020-05-27 14:56:08 commit 472 last mr 549 )
    +Non-SSL connection (SSL connection is recommended when requiring high-security)
    +Type “help” for help.

    +

    postgres=#

    +

    步骤 3创建student表。
    +postgres=# CREATE TABLE student
    +( std_id INT NOT NULL,
    +std_name VARCHAR(20) NOT NULL,
    +std_sex VARCHAR(6),
    +std_birth DATE,
    +std_in DATE NOT NULL,
    +std_address VARCHAR(100)
    +);

    +

    CREATE TABLE

    +

    步骤 4表数据插入。
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (1,‘张一’,‘男’,‘1993-01-01’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (2,‘张二’,‘男’,‘1993-01-02’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (3,‘张三’,‘男’,‘1993-01-03’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (4,‘张四’,‘男’,‘1993-01-04’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (5,‘张五’,‘男’,‘1993-01-05’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (6,‘张六’,‘男’,‘1993-01-06’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (7,‘张七’,‘男’,‘1993-01-07’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (8,‘张八’,‘男’,‘1993-01-08’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (9,‘张九’,‘男’,‘1993-01-09’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (10,‘李一’,‘男’,‘1993-01-10’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (11,‘李二’,‘男’,‘1993-01-11’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (12,‘李三’,‘男’,‘1993-01-12’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (13,‘李四’,‘男’,‘1993-01-13’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (14,‘李五’,‘男’,‘1993-01-14’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (15,‘李六’,‘男’,‘1993-01-15’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (16,‘李七’,‘男’,‘1993-01-16’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (17,‘李八’,‘男’,‘1993-01-17’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (18,‘李九’,‘男’,‘1993-01-18’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (19,‘王一’,‘男’,‘1993-01-19’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (20,‘王二’,‘男’,‘1993-01-20’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (21,‘王三’,‘男’,‘1993-01-21’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (22,‘王四’,‘男’,‘1993-01-22’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (23,‘王五’,‘男’,‘1993-01-23’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (24,‘王六’,‘男’,‘1993-01-24’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (25,‘王七’,‘男’,‘1993-01-25’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (26,‘王八’,‘男’,‘1993-01-26’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (27,‘王九’,‘男’,‘1993-01-27’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (28,‘钱一’,‘男’,‘1993-01-28’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (29,‘钱二’,‘男’,‘1993-01-29’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (30,‘钱三’,‘男’,‘1993-01-30’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (31,‘钱四’,‘男’,‘1993-02-01’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (32,‘钱五’,‘男’,‘1993-02-02’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (33,‘钱六’,‘男’,‘1993-02-03’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (34,‘钱七’,‘男’,‘1993-02-04’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (35,‘钱八’,‘男’,‘1993-02-05’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (36,‘钱九’,‘男’,‘1993-02-06’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (37,‘吴一’,‘男’,‘1993-02-07’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (38,‘吴二’,‘男’,‘1993-02-08’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (39,‘吴三’,‘男’,‘1993-02-09’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (40,‘吴四’,‘男’,‘1993-02-10’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (41,‘吴五’,‘男’,‘1993-02-11’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (42,‘吴六’,‘男’,‘1993-02-12’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (43,‘吴七’,‘男’,‘1993-02-13’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (44,‘吴八’,‘男’,‘1993-02-14’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (45,‘吴九’,‘男’,‘1993-02-15’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (46,‘柳一’,‘男’,‘1993-02-16’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (47,‘柳二’,‘男’,‘1993-02-17’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (48,‘柳三’,‘男’,‘1993-02-18’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (49,‘柳四’,‘男’,‘1993-02-19’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (50,‘柳五’,‘男’,‘1993-02-20’,‘2011-09-01’,‘江苏省南京市雨花台区’);

    +

    步骤 5数据查询统计。。
    +postgres=# select count(*) from student;
    +count

    +
    50
    +
    +

    (1 row)

    +

    postgres=# select * from student order by std_id;
    +std_id | std_name | std_sex | std_birth | std_in | std_address
    +--------±---------±--------±--------------------±--------------------±---------------------
    +1 | 张一 | 男 | 1993-01-01 00:00:00 | 2011-09-01 00:00:00 | 江苏省南京市雨花台区
    +2 | 张二 | 男 | 1993-01-02 00:00:00 | 2011-09-01 00:00:00 | 江苏省南京市雨花台区
    +3 | 张三 | 男 | 1993-01-03 00:00:00 | 2011-09-01 00:00:00 | 江苏省南京市雨花台区
    +4 | 张四 | 男 | 1993-01-04 00:00:00 | 2011-09-01 00:00:00 | 江苏省南京市雨花台区
    +………………

    +

    步骤 6查看表信息。
    +postgres=# \d student
    +Table “public.student”
    +Column | Type | Modifiers
    +-------------±-------------------------------±----------
    +std_id | integer | not null
    +std_name | character varying(20) | not null
    +std_sex | character varying(6) |
    +std_birth | timestamp(0) without time zone |
    +std_in | timestamp(0) without time zone | not null
    +std_address | character varying(100) |

    +

    步骤 7收集表的统计信息。
    +postgres=# ANALYZE VERBOSE student;
    +INFO: analyzing “public.student”(dn_6001 pid=48036)
    +INFO: ANALYZE INFO : “student”: scanned 1 of 1 pages, containing 50 live rows and 0 dead rows; 50 rows in sample, 50 estimated total rows(dn_6001 pid=48036)
    +ANALYZE
    +使用ANALYZE VERBOSE语句更新统计信息,会同时输出表的相关信息。
    +步骤 8查看语句的执行计划。
    +postgres=# explain select * from student where std_id=30;
    +QUERY PLAN

    +

    Seq Scan on student (cost=0.00…1.62 rows=1 width=62)
    +Filter: (std_id = 30)
    +(2 rows)
    +Seq Scan on student 表示使用的是全表扫描。

    +

    步骤 9给表添加主键。
    +postgres=# alter table student add primary key (std_id);
    +NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index “student_pkey” for table “student”
    +ALTER TABLE

    +

    步骤 10再次查看表信息。
    +确定主键是否建好。
    +postgres=# \d student
    +Table “public.student”
    +Column | Type | Modifiers
    +-------------±-------------------------------±----------
    +std_id | integer | not null
    +std_name | character varying(20) | not null
    +std_sex | character varying(6) |
    +std_birth | timestamp(0) without time zone |
    +std_in | timestamp(0) without time zone | not null
    +std_address | character varying(100) |
    +Indexes:
    +“student_pkey” PRIMARY KEY, btree (std_id) TABLESPACE pg_default
    +student_pkey 为主键名称。
    +步骤 11通过hint来优化语句扫描方式。
    +通过加hint来使查询语句进行索引扫描。
    +postgres=# explain select /+indexscan(student student_pkey)/ * from student where std_id=30;
    +QUERY PLAN

    + +

    [Bypass]
    +Index Scan using student_pkey on student (cost=0.00…8.27 rows=1 width=62)
    +Index Cond: (std_id = 30)
    +(3 rows)

    +

    postgres=#
    +Index Scan using student_pkey on student 表示语句通过student表上的主键索引student_pkey进行了索引扫描。
    +步骤 12退出数据库
    +postgres=# \q
    +EXPLAIN进行SQL优化实验结束。

    +

    4 日志检查
    +4.1 实验介绍
    +4.1.1 关于本实验
    +数据库运行时,某些操作在执行过程中可能会出现错误,数据库依然能够运行。但是此时数据库中的数据可能已经发生不一致的情况。建议检查openGauss运行日志,及时发现隐患。
    +当openGauss发生故障时,使用 gs_collector 此工具收集OS信息、日志信息以及配置文件等信息,来定位问题。
    +本实验主要是先手工设置收集配置信息,然后通过gs_collector工具调整用配置来收集相关日志信息。
    +4.1.2 实验目的
    +掌握gs_collector工具的基本使用;
    +4.2 通过gs_collector工具来收集日志信息
    +步骤 1设置收集配置文件。
    +[omm@ecs-e1b3 ~]$ pwd
    +/home/omm
    +[omm@ecs-e1b3 ~]$ vi collector.json
    +在用vi collector.json创建配置文件后,输入”i”进入INSERT模式,并将以下文本内容添加至配置文件中,具体如下:
    +{
    +“Collect”:
    +[
    +{“TypeName”: “System”, “Content”:“RunTimeInfo, HardWareInfo”,“Interval”:“0”, “Count”:“1”},
    +{“TypeName”: “Log”, “Content” : “Coordinator,DataNode,Gtm,ClusterManager”, “Interval”:“0”, “Count”:“1”},
    +{“TypeName”: “Database”, “Content”: “pg_locks,pg_stat_activity,pg_thread_wait_status”,“Interval”:“0”, “Count”:“1”},
    +{“TypeName”: “Config”, “Content”: “Coordinator,DataNode,Gtm”, “Interval”:“0”, “Count”:“1”}
    +]
    +}
    +内容添加好后,按下“Esc”键,然后输入“:wq”进行保存文件退出。
    +配置文件中
    +利用TypeName指定需要收集的信息类型;
    +利用Content指定每一类信息的具体内容;
    +利用Count指定此类信息收集的次数;
    +利用Interval指定收集间隔,单位为秒;
    +TypeName和Content不允许缺失或者内容为空;
    +Interval和Count可以不指定,如果没有指定Count,则默认收集一次;
    +如果没有指定Interval则表示间隔为0秒,Interval和Count的值不能小于0;
    +如果不指定则使用默认的配置文件;
    +可以根据gs_collector内容收集对照表进行个性化定制配置;
    +配置文件格式采用json格式。

    +

    步骤 2确定数据库服务是否启动。
    +[omm@ecs-e1b3 ~]$ gs_om -t status;

    +

    cluster_state : Unavailable
    +redistributing : No

    +

    cluster_state : Normal 表示已启动,可以正常使用。如果状态为Unavailable表示不可用
    +为了实验继续进行,请先启动数据库服务。
    +启动数据库服务(如果数据库服务是正常状态,此步骤可以不执行)。
    +[omm@ecs-e1b3 ~]$ gs_om -t start;
    +Starting cluster.

    +

    =========================================
    +Successfully started.
    +步骤 3收集OS信息及日志信息。
    +begin-time、end-time的值根据自己实际想收集的时间来设置。
    +[omm@ecs-e1b3 ~]$ gs_collector --begin-time=“20200720 23:00” --end-time=“20200729 20:00” -C /home/omm/collector.json
    +Successfully parsed the configuration file.
    +create Dir.
    +Successfully create dir.
    +do system check interval 0 : count 1
    +Collecting OS information.
    +Failed to collect OS information.
    +do database check interval 0 : count 1
    +Collecting catalog statistics.
    +Successfully collected catalog statistics.
    +do log check interval 0 : count 1
    +Collecting Log files.
    +Successfully collected Log files.
    +do Config check 0:1
    +Collecting Config files.
    +Successfully collected Config files.
    +Collecting files.
    +Successfully collected files.
    +All results are stored in /opt/huawei/wisequery/omm_mppdb/collector_20200727_094932.tar.gz.
    +收集完后,所有的结果存放在/opt/huawei/wisequery/omm_mppdb/collector_20200727_094932.tar.gz包中,请注意自己生成的文件包名称,因为每次的文件包名不一样。
    +步骤 4查看日志信息。
    +先进入日志包所在的目录,然后将日志包进行解压。
    +[omm@ecs-e1b3 omm_mppdb]$ cd /opt/huawei/wisequery/omm_mppdb/
    +[omm@ecs-e1b3 omm_mppdb]$ ll
    +total 48K
    +-rw------- 1 omm dbgrp 46K Jul 27 09:49 collector_20200727_094932.tar.gz
    +[omm@ecs-e1b3 omm_mppdb]$ tar -zxvf collector_20200727_094932.tar.gz
    +collector_20200727_094932/
    +collector_20200727_094932/ecs-e1b3.tar.gz
    +collector_20200727_094932/Summary.log
    +collector_20200727_094932/Detail.log
    +接下来,进入解压后的文件夹collector_20200727_094932,并对ecs-e1b3.tar.gz包进一步解压。
    +[omm@ecs-e1b3 omm_mppdb]$ cd collector_20200727_094932
    +[omm@ecs-e1b3 collector_20200727_094932]$ ll
    +total 24K
    +-rw-------. 1 omm dbgrp 16K Feb 7 15:16 db1.tar.gz
    +-rw-------. 1 omm dbgrp 2.7K Feb 7 15:16 Detail.log
    +-rw-------. 1 omm dbgrp 1.1K Feb 7 15:16 Summary.log
    +[omm@ecs-e1b3 collector_20200727_094932]$ tar -zxvf db1.tar.gz
    +ecs-e1b3/
    +ecs-e1b3/logfiles/
    +ecs-e1b3/logfiles/log_20200727_094935975042.tar.gz
    +ecs-e1b3/planSimulatorfiles/
    +ecs-e1b3/catalogfiles/
    +ecs-e1b3/catalogfiles/dn_6001_pg_thread_wait_status_20200727_094935303146.csv
    +ecs-e1b3/catalogfiles/gs_clean_20200727_094935470508.txt
    +…………………………
    +ecs-e1b3/systemfiles/
    +ecs-e1b3/systemfiles/OS_information_20200727_094933424734.txt
    +ecs-e1b3/systemfiles/database_system_info_20200727_094933446671.txt
    +[omm@ecs-e1b3 collector_20200727_094932]$
    +在解压的db1(指的是服务器名,各自的不一样,请注意观察)下有各种定制收集的日志类型目录如下:
    +[omm@ecs-e1b3 collector_20200727_094932]$ cd db1
    +[omm@ecs-e1b3 ecs-e1b3]$ ll
    +total 32K
    +drwx------ 2 omm dbgrp 4.0K Jul 27 09:49 catalogfiles
    +drwx------ 2 omm dbgrp 4.0K Jul 27 09:49 configfiles
    +drwx------ 2 omm dbgrp 4.0K Jul 27 09:49 coreDumpfiles
    +drwx------ 2 omm dbgrp 4.0K Jul 27 09:49 gstackfiles
    +drwx------ 2 omm dbgrp 4.0K Jul 27 09:49 logfiles
    +drwx------ 2 omm dbgrp 4.0K Jul 27 09:49 planSimulatorfiles
    +drwx------ 2 omm dbgrp 4.0K Jul 27 09:49 systemfiles
    +drwx------ 2 omm dbgrp 4.0K Jul 27 09:49 xlogfiles
    +[omm@ecs-e1b3 ecs-e1b3]$ cd catalogfiles/
    +[omm@ecs-e1b3 catalogfiles]$ ll
    +total 16K
    +-rw------- 1 omm dbgrp 389 Jul 27 09:49 dn_6001_pg_locks_20200727_094934961507.csv
    +-rw------- 1 omm dbgrp 1.4K Jul 27 09:49 dn_6001_pg_stat_activity_20200727_094935134988.csv
    +-rw------- 1 omm dbgrp 878 Jul 27 09:49 dn_6001_pg_thread_wait_status_20200727_094935303146.csv
    +-rw------- 1 omm dbgrp 281 Jul 27 09:49 gs_clean_20200727_094935470508.txt
    +步骤 5下载收集后的日志文件。
    +根据自己需要比如可以通过WinSCP或者XFTP等SSH工具将日志文件下载至自己本地电脑。
    +使用root用户和密码登录数据库服务器(主机名为ecs的弹性公网IP):

    +

    点击“打开目录/书签”,输入目录路径“/opt/huawei/wisequery/omm_mppdb/”,点击确定后进入此目录:

    +

    逐层查找到“catalogfiles”文件夹,点击选中文件夹,然后点击“下载”,下载到Windows对应文件夹下:

    +

    查看下载后的文件夹内容:

    +

    图4-1日志文件下载

    +

    5 最大连接数设置
    +5.1 实验介绍
    +5.1.1 关于本实验
    +当应用程序与数据库的连接数超过最大值,则新的连接无法建立。建议对连接数进行监控,及时释放空闲的连接或者增加最大连接数。
    +本实验主要是讲如何来设置数据库最大连接个数。
    +5.1.2 实验目的
    +掌握对数据库最大连接数的设置方法。
    +5.2 场景设置及操作步骤
    +步骤 1用ROOT用户登录装有openGauss数据库服务的操作系统然后用 su – omm命令切换至OMM用户环境,登录后信息如下。
    +Welcome to 4.19.90-2003.4.0.0036.oe1.aarch64
    +System information as of time: Mon Jul 27 11:22:46 CST 2020
    +System load: 0.03
    +Processes: 154
    +Memory used: 2.3%
    +Swap used: 0.0%
    +Usage On: 14%
    +IP address: 192.168.0.12
    +Users online: 3
    +[root@ecs-e1b3 ~]# su - omm
    +Last login: Mon Jul 27 09:23:44 CST 2020 on pts/0
    +Welcome to 4.19.90-2003.4.0.0036.oe1.aarch64
    +System information as of time: Mon Jul 27 11:23:37 CST 2020
    +System load: 0.01
    +Processes: 156
    +Memory used: 2.4%
    +Swap used: 0.0%
    +Usage On: 14%
    +IP address: 192.168.0.12
    +Users online: 3
    +步骤 2确认openGauss数据库服务是否启动
    +[omm@ecs-e1b3 ~]$ gs_om -t status;

    +

    cluster_name : dbCluster
    +cluster_state : Normal
    +redistributing : No

    +

    cluster_state : Normal 表示已启动,可以正常使用。如果状态为非Normal表示不可用
    +为了实验场景设置,如果数据库服务没有启动,请执行步gs_om -t start 命令启动服务。
    +步骤 3登录数据库
    +使用gsql客户端以管理员用户身份连接postgres数据库,假设端口号为26000。
    +[omm@ecs-e1b3 ~]$ gsql -d postgres -p 26000 -r
    +gsql ((openGauss 1.0.0 build 38a9312a) compiled at 2020-05-27 14:57:08 commit 472 last mr 549 )
    +Non-SSL connection (SSL connection is recommended when requiring high-security)
    +Type “help” for help.

    +

    postgres=#
    +步骤 4查看当前数据库已使用的连接数
    +postgres=# select count(1) from pg_stat_activity;
    +count

    +
    10
    +
    +

    (1 row)
    +10表示当前有10个应用已连接到数据库
    +步骤 5查看数据库设置的最大连接数
    +postgres=# SHOW max_connections;
    +max_connections

    +

    5000
    +(1 row)
    +5000 表示数据库设置的最大连接个数为5000。如果当前数据库已使用的连接数快接近于最大连接数时,运维人员先要果断的增加最大连接数以防系统新的连接无法建立。
    +步骤 6调整最大连接数参数
    +参数修改方式一:
    +先 \q 退出数据库,然后在omm 用户环境下通过gs_guc工具来增大参数值,如下:
    +[omm@ecs-e1b3 ~]$ gs_guc reload -I all -c “max_connections= 6000”;
    +expected instance path: [/gaussdb/data/db1/postgresql.conf]
    +gs_guc reload: max_connections=6000: [/gaussdb/data/db1/postgresql.conf]
    +server signaled
    +Total instances: 1. Failed instances: 0.
    +Success to perform gs_guc!
    +参数修改方式二:
    +也可以用alter system set 语句来设置此参数,如下:
    +[omm@ecs-e1b3 ~]$ gsql -d postgres -p 26000 -r
    +gsql ((openGauss 1.0.0 build 38a9312a) compiled at 2020-05-27 14:57:08 commit 472 last mr 549 )
    +Non-SSL connection (SSL connection is recommended when requiring high-security)
    +Type “help” for help.

    +

    postgres=# alter system set max_connections=6000;
    +NOTICE: please restart the database for the POSTMASTER level parameter to take effect.
    +ALTER SYSTEM SET
    +postgres=#\q
    +步骤 7重启数据库
    +gs_om -t stop先关闭数据库,然后用gs_om -t start再启动数据库
    +[omm@ecs-e1b3 ~]$ gs_om -t stop;
    +Stopping cluster.

    +

    Successfully stopped cluster.

    +

    End stop cluster.
    +[omm@ecs-e1b3 ~]$ gs_om -t start;
    +Starting cluster.

    +

    =========================================
    +Successfully started.
    +步骤 8验证参数设置是否成功
    +使用gsql客户端以管理员用户身份连接postgres数据库,然后查看参数值。
    +[omm@ecs-e1b3 ~]$ gsql -d postgres -p 26000 -r
    +gsql ((openGauss 1.0.0 build 38a9312a) compiled at 2020-05-27 14:57:08 commit 472 last mr 549 )
    +Non-SSL connection (SSL connection is recommended when requiring high-security)
    +Type “help” for help.

    +

    postgres=# SHOW max_connections;
    +max_connections

    +

    6000
    +(1 row)
    +这里显示max_connections 为 6000,说明前面参数的修改已经生效。
    +步骤 9退出数据库
    +postgres=#\q
    +最大连接数设置实验结束。

    +

    6 例行表、索引的维护
    +6.1 实验介绍
    +6.1.1 关于本实验
    +为了保证数据库的有效运行,数据库必须在插入/删除操作后,基于客户场景,定期做VACUUM FULL和ANALYZE,更新统计信息,以便获得更优的性能;
    +VACUUM FULL可回收已更新或已删除的数据所占据的磁盘空间,同时将小数据文件合并;
    +VACUUM对每个表维护了一个可视化映射来跟踪包含对别的活动事务可见的数组的页。一个普通的索引扫描首先通过可视化映射来获取对应的数组,来检查是否对当前事务可见。若无法获取,再通过堆数组抓取的方式来检查。因此更新表的可视化映射,可加速唯一索引扫描;
    +ANALYZE可收集与数据库中表内容相关的统计信息。统计结果存储在系统表PG_STATISTIC中。查询优化器会使用这些统计数据,生成最有效的执行计划。
    +数据库经过多次删除操作后,索引页面上的索引键将被删除,造成索引膨胀。例行重建索引,可有效的提高查询效率。
    +本实验主要是通过使用VACUUM、VACUUM FULL FULL来收缩表,用ANALYZE来收集表的统计信息以及对表上的索引进行重建。
    +6.1.2 实验目的
    +掌握VACUUM、VACUUM FULL FULL、ANALYZE基本的使用及如何重建索引;
    +6.2 场景设置及操作步骤
    +步骤 1用ROOT用户登录装有openGauss数据库服务的操作系统然后用 su – omm命令切换至OMM用户环境,登录后信息如下。
    +Welcome to 4.19.90-2003.4.0.0036.oe1.aarch64
    +System information as of time: Tue Jul 27 16:21:11 CST 2020
    +System load: 0.01
    +Processes: 109
    +Memory used: 6.7%
    +Swap used: 0.0%
    +Usage On: 15%
    +IP address: 192.168.0.96
    +Users online: 1
    +[root@ecs-e1b3 ~]# su - omm
    +Last login: Fri Jul 27 16:22:11 CST 2020 on pts/0
    +Welcome to 4.19.90-2003.4.0.0036.oe1.aarch64
    +System information as of time: Tue Jul 27 16:21:11 CST 2020
    +System load: 0.01
    +Processes: 111
    +Memory used: 7.0%
    +Swap used: 0.0%
    +Usage On: 15%
    +IP address: 192.168.0.96
    +Users online: 1
    +[omm@ecs-e1b3 ~]$
    +步骤 2启动服务器后,然后使用gsql客户端以管理员用户身份连接postgres数据库,假设端口号为26000。
    +启动数据库服务。
    +[omm@ecs-e1b3 ~]$ gs_om -t start;
    +Starting cluster.

    +

    =========================================
    +Successfully started.
    +连接postgres数据库。
    +[omm@ecs-e1b3 ~]$ gsql -d postgres -p 26000 -r
    +gsql ((openGauss 1.0.0 build 38a9312a) compiled at 2020-05-27 14:56:08 commit 472 last mr 549 )
    +Non-SSL connection (SSL connection is recommended when requiring high-security)
    +Type “help” for help.

    +

    postgres=#
    +步骤 3创建student表
    +postgres=# drop table student;
    +postgres=# CREATE TABLE student
    +( std_id INT NOT NULL,
    +std_name VARCHAR(20) NOT NULL,
    +std_sex VARCHAR(6),
    +std_birth DATE,
    +std_in DATE NOT NULL,
    +std_address VARCHAR(100)
    +);

    +

    CREATE TABLE
    +步骤 4表数据插入
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (1,‘张一’,‘男’,‘1993-01-01’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (2,‘张二’,‘男’,‘1993-01-02’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (3,‘张三’,‘男’,‘1993-01-03’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (4,‘张四’,‘男’,‘1993-01-04’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (5,‘张五’,‘男’,‘1993-01-05’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (6,‘张六’,‘男’,‘1993-01-06’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (7,‘张七’,‘男’,‘1993-01-07’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (8,‘张八’,‘男’,‘1993-01-08’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (9,‘张九’,‘男’,‘1993-01-09’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (10,‘李一’,‘男’,‘1993-01-10’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (11,‘李二’,‘男’,‘1993-01-11’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (12,‘李三’,‘男’,‘1993-01-12’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (13,‘李四’,‘男’,‘1993-01-13’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (14,‘李五’,‘男’,‘1993-01-14’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (15,‘李六’,‘男’,‘1993-01-15’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (16,‘李七’,‘男’,‘1993-01-16’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (17,‘李八’,‘男’,‘1993-01-17’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (18,‘李九’,‘男’,‘1993-01-18’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (19,‘王一’,‘男’,‘1993-01-19’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (20,‘王二’,‘男’,‘1993-01-20’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (21,‘王三’,‘男’,‘1993-01-21’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (22,‘王四’,‘男’,‘1993-01-22’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (23,‘王五’,‘男’,‘1993-01-23’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (24,‘王六’,‘男’,‘1993-01-24’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (25,‘王七’,‘男’,‘1993-01-25’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (26,‘王八’,‘男’,‘1993-01-26’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (27,‘王九’,‘男’,‘1993-01-27’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (28,‘钱一’,‘男’,‘1993-01-28’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (29,‘钱二’,‘男’,‘1993-01-29’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (30,‘钱三’,‘男’,‘1993-01-30’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (31,‘钱四’,‘男’,‘1993-02-01’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (32,‘钱五’,‘男’,‘1993-02-02’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (33,‘钱六’,‘男’,‘1993-02-03’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (34,‘钱七’,‘男’,‘1993-02-04’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (35,‘钱八’,‘男’,‘1993-02-05’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (36,‘钱九’,‘男’,‘1993-02-06’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (37,‘吴一’,‘男’,‘1993-02-07’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (38,‘吴二’,‘男’,‘1993-02-08’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (39,‘吴三’,‘男’,‘1993-02-09’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (40,‘吴四’,‘男’,‘1993-02-10’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (41,‘吴五’,‘男’,‘1993-02-11’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (42,‘吴六’,‘男’,‘1993-02-12’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (43,‘吴七’,‘男’,‘1993-02-13’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (44,‘吴八’,‘男’,‘1993-02-14’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (45,‘吴九’,‘男’,‘1993-02-15’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (46,‘柳一’,‘男’,‘1993-02-16’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (47,‘柳二’,‘男’,‘1993-02-17’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (48,‘柳三’,‘男’,‘1993-02-18’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (49,‘柳四’,‘男’,‘1993-02-19’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +INSERT INTO student(std_id,std_name,std_sex,std_birth,std_in,std_address) VALUES (50,‘柳五’,‘男’,‘1993-02-20’,‘2011-09-01’,‘江苏省南京市雨花台区’);
    +步骤 5数据查询统计
    +postgres=# select count(*) from student;
    +count

    +
    50
    +
    +

    (1 row)

    +

    postgres=# select * from student order by std_id;
    +std_id | std_name | std_sex | std_birth | std_in | std_address
    +--------±---------±--------±--------------------±--------------------±---------------------
    +1 | 张一 | 男 | 1993-01-01 00:00:00 | 2011-09-01 00:00:00 | 江苏省南京市雨花台区
    +2 | 张二 | 男 | 1993-01-02 00:00:00 | 2011-09-01 00:00:00 | 江苏省南京市雨花台区
    +3 | 张三 | 男 | 1993-01-03 00:00:00 | 2011-09-01 00:00:00 | 江苏省南京市雨花台区
    +4 | 张四 | 男 | 1993-01-04 00:00:00 | 2011-09-01 00:00:00 | 江苏省南京市雨花台区
    +5 | 张五 | 男 | 1993-01-05 00:00:00 | 2011-09-01 00:00:00 | 江苏省南京市雨花台区
    +………………
    +步骤 6查看表信息
    +postgres=# \d student
    +Table “public.student”
    +Column | Type | Modifiers
    +-------------±-------------------------------±----------
    +std_id | integer | not null
    +std_name | character varying(20) | not null
    +std_sex | character varying(6) |
    +std_birth | timestamp(0) without time zone |
    +std_in | timestamp(0) without time zone | not null
    +std_address | character varying(100) |
    +步骤 7使用VACUUM命令,进行磁盘空间回收
    +postgres=# vacuum student;
    +VACUUM
    +步骤 8删除表中数据
    +postgres=# delete from student where std_id>30;
    +DELETE 20
    +步骤 9使用VACUUM FULL命令,进行磁盘空间回收
    +postgres=# vacuum full student;
    +VACUUM
    +步骤 10使用ANALYZE语句更新统计信息
    +postgres=# analyze student;
    +ANALYZE
    +步骤 11使用ANALYZE VERBOSE语句更新统计信息,并输出表的相关信息
    +postgres=# analyze verbose student;
    +INFO: analyzing “public.student”(dn_6001 pid=37195)
    +INFO: ANALYZE INFO : “student”: scanned 1 of 1 pages, containing 30 live rows and 20 dead rows; 30 rows in sample, 30 estimated total rows(dn_6001 pid=37195)
    +ANALYZE
    +步骤 12执行VACUUM ANALYZE命令进行查询优化
    +postgres=# vacuum analyze student;
    +VACUUM
    +步骤 13查看特定表的统计信息
    +postgres=# select relname,n_tup_ins,n_tup_upd,n_tup_del,last_analyze,vacuum_count from PG_STAT_ALL_TABLES where relname=‘student’;
    +relname | n_tup_ins | n_tup_upd | n_tup_del | last_analyze | vacuum_count
    +---------±----------±----------±----------±-----------------------------±-------------
    +student | 50 | 0 | 20 | 2020-07-27 17:07:19.17167+08 | 3
    +(1 row)
    +postgres=#
    +PG_STAT_ALL_TABLES视图将包含当前数据库中每个表的一行统计信息,以上查询结果中各列分别表示:
    +Relname 表名
    +n_tup_ins 插入行数
    +n_tup_upd 更新行数
    +n_tup_del 删除行数
    +last_analyze 上次手动分析该表的时间
    +vacuum_count 这个表被手动清理的次数
    +步骤 14索引维护
    +说明:
    +如果数据发生大量删除后,索引页面上的索引键将被删除,导致索引页面数量的减少,造成索引膨胀。重建索引可回收浪费的空间。
    +新建的索引中逻辑结构相邻的页面,通常在物理结构中也是相邻的,所以一个新建的索引比更新了多次的索引访问速度要快。
    +重建索引有以下两种方式:
    +1、使用REINDEX语句重建索引;
    +2、先删除索引(DROP INDEX),再创建索引(CREATE INDEX)。
    +先在student表的std_name列上创建一个索引,如下:
    +postgres=# create index inx_stu01 on student(std_name);
    +CREATE INDEX
    +postgres=#
    +方式1:使用REINDEX语句重建索引,具体如下:
    +postgres=# reindex table student;
    +REINDEX
    +postgres=#
    +方式2:先删除索引(DROP INDEX),再创建索引(CREATE INDEX),具体如下:
    +postgres=# drop index inx_stu01;
    +DROP INDEX
    +postgres=# create index inx_stu01 on student(std_name);
    +CREATE INDEX
    +postgres=#
    +查看表结构信息,具体如下:
    +postgres=# \d student;
    +Table “public.student”
    +Column | Type | Modifiers
    +-------------±-------------------------------±----------
    +std_id | integer | not null
    +std_name | character varying(20) | not null
    +std_sex | character varying(6) |
    +std_birth | timestamp(0) without time zone |
    +std_in | timestamp(0) without time zone | not null
    +std_address | character varying(100) |
    +Indexes:
    +“inx_stu01” btree (std_name) TABLESPACE pg_default
    +步骤 15退出数据库
    +postgres=#\q
    +例行表、索引的维护实验结束。

    +

    实验结果:
    +截图一:操作系参数检查截图
    +图片1.png图片2.png
    +截图二:设置最大连接数
    +图片3.png图片4.png
    +截图三:
    +图片5.png
    +分析总结: +本次实验我掌握了系统参数检查、openGauss健康状态检查、数据库性能检查、日志检查和清理等操作。实验比较简单,没有遇到什么问题。 + diff --git "a/content/zh/post/zhengwen2/openGauss\347\232\204 Helm Chart\345\214\205\347\274\226\345\206\231\346\216\242\347\264\242.md" "b/content/zh/post/zhengwen2/openGauss\347\232\204 Helm Chart\345\214\205\347\274\226\345\206\231\346\216\242\347\264\242.md" new file mode 100644 index 0000000000000000000000000000000000000000..e07987e05762e117e7402d9d1167a56ec0a9b07a --- /dev/null +++ "b/content/zh/post/zhengwen2/openGauss\347\232\204 Helm Chart\345\214\205\347\274\226\345\206\231\346\216\242\347\264\242.md" @@ -0,0 +1,430 @@ ++++ + +title = "openGauss的Helm Chart包编写探索" + +date = "2021-07-10" + +tags = ["openGauss的Helm Chart包编写探索"] + +archives = "2021-07" + +author = "华军" + +summary = " openGauss的Helm Chart包编写探索" + +img = "/zh/post/zhengwen2/img/img29.png" + +times = "12:30" + ++++ + +# openGauss的Helm Chart包编写探索 + +Helm 是 Kubernetes 的包管理器,包管理器类似于我们在 Ubuntu 中使用的apt、Centos中使用的yum一样,能快速查找、下载和安装软件包,本篇文章探索编写openGauss的Helm Chart的包,方便openGauss的在Kubernetes的快速部署. + +# 1.环境清单 + +## 检查k8s运行环境 + +```powershell +[root@n-k8s-m ~]# kubectl get node +NAME STATUS ROLES AGE VERSION +n-k8s-m Ready master 349d v1.18.0 +``` + +## 检查Helm运行环境 + +```powershell +[root@n-k8s-m ~]# helm version +version.BuildInfo{Version:"v3.6.0", GitCommit:"7f2df6467771a75f5646b7f12afb408590ed1755", GitTreeState:"clean", GoVersion:"go1.16.3"} +``` + +## 查看存储类 + +```powershell +[root@n-k8s-m ~]#kubectl get sc +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +managed-nfs-storage fuseim.pri/ifs Delete Immediate false 157m +``` + +# 2.创建openGauss的包管理模板 + +```powershell +[root@n-k8s-m helm]# helm create opengauss +Creating opengauss +``` + +# 3.查看包管理模板 + +```powershell +[root@n-k8s-m helm]# tree opengauss +opengauss +├── charts +├── Chart.yaml +├── templates +│ ├── deployment.yaml +│ ├── _helpers.tpl +│ ├── hpa.yaml +│ ├── ingress.yaml +│ ├── NOTES.txt +│ ├── pvc.yaml +│ ├── serviceaccount.yaml +│ ├── service.yaml +│ └── tests +│ └── test-connection.yaml +└── values.yaml +``` + +# 4.编写变量文件values.yaml + +```powershell +[root@n-k8s-m helm]#cat values.yaml + +# Default values for opengauss. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: opengauss + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "2.0.0" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +## Specify password for root user# +## Default: random 10 character string +RootPassword: Gauss@123 + +## Persist data to a persistent volume +persistence: + enabled: true + ## database data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "managed-nfs-storage" + accessMode: ReadWriteOnce + size: 4Gi + annotations: {} + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: NodePort + port: 5432 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} +``` + +# 5.编写deployment.yaml + +```powershell +[root@n-k8s-m templates]# cat deployment.yaml + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "opengauss.fullname" . }} + labels: + {{- include "opengauss.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "opengauss.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "opengauss.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "opengauss.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + volumeMounts: + - name: opengauss-persistent-storage + mountPath: /var/lib/opengauss + env: + - name: GS_PASSWORD + value: {{ .Values.RootPassword }} + ports: + - containerPort: 5432 + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: opengauss-persistent-storage + persistentVolumeClaim: + claimName: {{ include "opengauss.fullname" . }} +``` + +# 6.编写Service.yaml + +```powershell +[root@n-k8s-m helm]#vim Service.yaml + +apiVersion: v1 +kind: Service +metadata: + name: {{ include "opengauss.fullname" . }} + labels: + {{- include "opengauss.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: 5432 + protocol: TCP + name: http + selector: + {{- include "opengauss.selectorLabels" . | nindent 4 }} +``` + +# 7.编写pvc.yaml + +```powershell +[root@n-k8s-m templates]# cat pvc.yaml +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "opengauss.fullname" . }} + namespace: {{ .Release.Namespace }} +{{- with .Values.persistence.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + labels: + app: {{ template "opengauss.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end }} +``` + +# 8.通过helm安装openGauss数据库 + +```powershell +[root@n-k8s-m helm]# helm install opengauss2 opengauss/ + +NAME: opengauss2 +LAST DEPLOYED: Fri Jul 9 06:36:50 2021 +NAMESPACE: default +STATUS: deployed +REVISION: 1 +NOTES: +1. Get the application URL by running these commands: + export NODE_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services opengauss2) + export NODE_IP=$(kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT + +#查看安装列表 +[root@n-k8s-m helm]# helm list +NAME NAMESPACE REVISION UPDATED STATUS CHART +APP VERSION +opengauss2 default 1 2021-07-09 06:36:50.181491555 -0400 EDT deployed opengauss-0.1.0 +1.16.0 +``` + +# 9.检查k8s的相关资源 + +```powershell +[root@n-k8s-m helm]# kubectl get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +opengauss2 Bound pvc-5d7ae6f4-4b73-4bcc-a6ea-db1a9c232ba8 4Gi RWO managed-nfs-storage 8s +[root@n-k8s-m helm]# kubectl get pod +NAME READY STATUS RESTARTS AGE +ingress-86f59dc97d-d7nrw 1/1 Running 5 3d +ingress-86f59dc97d-qjsjg 1/1 Running 5 3d +nfs-client-provisioner-6b9dc8c7cb-nsr5l 1/1 Running 5 3d +opengauss2-6f5747d6dc-7ps4b 0/1 Running 0 11s +[root@n-k8s-m templates]# kubectl get svc +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +ingress ClusterIP 10.98.109.186 80/TCP 336d +kubernetes ClusterIP 10.96.0.1 443/TCP 351d +opengauss2 NodePort 10.101.6.139 5432:30118/TCP 63m +``` + +# 10.连接opengauss数据库 + +```powershell +[root@n-k8s-m helm]# kubectl exec -it opengauss2-6f5747d6dc-7ps4b sh +sh-4.2# ls +anaconda-post.log dev entrypoint.sh home lib64 mnt proc run srv tmp var +bin docker-entrypoint-initdb.d etc lib media opt root sbin sys usr +sh-4.2# id omm +uid=70(omm) gid=70(omm) groups=70(omm) +sh-4.2# su - omm +[omm@opengauss2-6f5747d6dc-7ps4b ~]$ gsql +gsql ((openGauss 2.0.0 build 78689da9) compiled at 2021-03-31 21:04:03 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. + +omm=# \l + List of databases + Name | Owner | Encoding | Collate | Ctype | Access privileges +-----------+-------+----------+------------+------------+------------------- + omm | omm | UTF8 | en_US.utf8 | en_US.utf8 | + postgres | omm | UTF8 | en_US.utf8 | en_US.utf8 | + template0 | omm | UTF8 | en_US.utf8 | en_US.utf8 | =c/omm + + | | | | | omm=CTc/omm + template1 | omm | UTF8 | en_US.utf8 | en_US.utf8 | =c/omm + + | | | | | omm=CTc/omm +(4 rows) + +omm=# \c +Non-SSL connection (SSL connection is recommended when requiring high-security) +You are now connected to database "omm" as user "omm". +``` + +# 11.打包openGauss +```powershell +[root@n-k8s-m helm]# helm package opengauss +Successfully packaged chart and saved it to: /root/helm/opengauss-0.1.0.tgz +``` + +# 12.通过helm卸载openGauss数据库 + +```powershell +[root@n-k8s-m helm]# helm uninstall opengauss2 +release "opengauss2" uninstalled +``` + +# 13.通过openGauss包安装数据库 + +```powershell +[root@n-k8s-m helm]# helm install opengauss3 opengauss-0.1.0.tgz +NAME: opengauss3 +LAST DEPLOYED: Fri Jul 9 08:03:06 2021 +NAMESPACE: default +STATUS: deployed +REVISION: 1 +NOTES: +1. Get the application URL by running these commands: + export NODE_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services opengauss3) + export NODE_IP=$(kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT + +[root@n-k8s-m helm]# helm list +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +opengauss3 default 1 2021-07-09 08:03:06.589888037 -0400 EDT deployed opengauss-0.1.0 1.16.0 +``` + +# 14.最后就可以把opengauss-0.1.0.tgz上传的Helm仓库,方便openGauss的在Kubernetes的快速部署. + + + diff --git "a/content/zh/post/zhengwen2/openGauss\347\264\242\345\274\225\346\237\245\350\257\242\345\222\214\347\264\242\345\274\225\350\247\204\345\210\231\345\256\236\351\252\214.md" "b/content/zh/post/zhengwen2/openGauss\347\264\242\345\274\225\346\237\245\350\257\242\345\222\214\347\264\242\345\274\225\350\247\204\345\210\231\345\256\236\351\252\214.md" new file mode 100644 index 0000000000000000000000000000000000000000..7f1f7a661fab1eef5d94f21e47a56f3536a94a54 --- /dev/null +++ "b/content/zh/post/zhengwen2/openGauss\347\264\242\345\274\225\346\237\245\350\257\242\345\222\214\347\264\242\345\274\225\350\247\204\345\210\231\345\256\236\351\252\214.md" @@ -0,0 +1,292 @@ ++++ + +title = "openGauss索引查询和索引规则实验" + +date = "2021-07-09" + +tags = ["openGauss索引查询和索引规则实验"] + +archives = "2021-07" + +author = "滋味" + +summary = "openGauss索引查询和索引规则实验" + +img = "/zh/post/zhengwen2/img/img28.png" + +times = "12:30" + ++++ + +# openGauss索引查询和索引规则实验 + + + + +

            建立索引是提高数据库访问速度的重要手段之一。本文将对openGauss2.0.0的4个主要索引方式进行实验,验证建立索引前后查询性能的差异和部分索引规则。

    +

    实验环境

    +

    软件:openGauss2.0.0, openEuler20.03, VirtualBox6.1.16,虚拟机配置2处理器,4G内存
    +硬件:CPU: Intel i5-8265U

    +

    openGauss索引介绍

    +

    根据openGauss2.0.0手册,openGauss有四种索引和根据一个索引对表做一次性聚集操作的CLUSTER语句。

    +

    4种索引.PNG

    +
    +

    表1 openGauss的4种索引方式(截图自openGauss2.0.0手册《创建和管理索引》章节)

    +
    +

    CLUSTER语句.PNG

    +
    +

    图1 openGauss的CLUSTER语句描述(截图自openGauss2.0.0手册《CLUSTER》章节)

    +
    +

            据此可以推测,前3类索引(唯一索引、多字段索引、部分索引)都是在对应的属性(集合)上创建B树的辅助索引,不改变表中条目的物理存储顺序;且这些索引都是稠密的,因为辅助索引均为稠密索引。而CLUSTER就指定一个索引,根据索引排序表的条目,被指定的索引成为聚集索引,其他索引仍为辅助索引。

    +

    实验1:索引查询实验

    +

    建表和插入数据

    +

    建立phi表,有pno, pname, location, healthstatus4个属性,具体建表代码如下。

    + +```sql +create table phi( +pno varchar(18) primary key, +pname varchar(20), +location varchar(20), +healthstatus varchar(20)); +``` + +

            随后建立函数插入数据。通过随机数函数生成大整数并cast到varchar作为pno主码插入数据。但是如果插入主码相同的数据,会产生错误和回滚,导致之前插入的数据也丢失。因此可以采用以下两种方法。

    +

    方案1:查重法。每生成一个新pno,就在已插入的表中的pno中查找有没有重复的。一开始对pgSQL的变量作用域不太熟悉,在微信群中的华为工程师帮忙调试了部分代码,函数可以运行(代码见附录1)。但是复杂度O(n^2),插入10万条数据要超过1小时,插入100万条的时间是无法接受的。

    +

    方案2:双随机数法。生成一个18位整型随机数(ran)和另一个12位整型随机数(ran2),令ran-ran2作为pno主码,不进行查重检验,直接插入(代码见附录2)。实际操作中主码重合的概率极低。插入效率大约是每秒钟1万条数据,比较高效。

    +

    分别建立了有5000、10000、100000、1000000个数据条目的表进行索引实验。

    +

    建立索引

    +

    每个数量级的表均会建立5个索引和2次CLUSTER操作。

    +索引1:建立pno上的普通索引 + +```sql +create index index_uni_pno on phi(pno); +``` + +索引2:建立pname上的普通索引 +```sql +create index index_uni_pna on phi(pname); +``` + +索引3:建立(pname,pno)的多值索引 +```sql +create index index_mul_pna_pno on phi(pname,pno); +``` + +索引4:建立部分索引 +```sql +create index index_par_loc on phi(location) +where location='Shanghai' and healthstatus='Health'; +``` + +索引5:建立表达式索引 +```sql +create index index_exp_pno on phi(substr(pno,1,4)); +``` + +聚集操作1:对pno聚集操作 +```sql +cluster verbose phi using index_uni_pno; +``` + +聚集操作2:对pno,location做聚集操作 +```sql +create index index_mul_pno_loc on phi(pno,location); +cluster verbose phi using index_mul_pno_loc; +``` + +查询执行 +每个数量级的表均会执行15条查询语句,查询语句和执行的条件如下。 + +无索引时,进行如下查询,编号为1-6。 +```sql +explain analyze select * from phi where pno>'500000000000000000'; +explain analyze select * from phi where pname>'p678900000000'; +explain analyze select * from phi where pno>'500000000000000000' and pname>'p678900000000'; +explain analyze select * from phi where location='Shanghai' and healthstatus='Health'; +explain analyze select * from phi where pno like '5678%'; +explain analyze select healthstatus,count(*) from phi where location='Shanghai' group by healthstatus; +``` + +建立索引1-5后,进行如下查询,编号为7-11。 +```sql +explain analyze select * from phi where pno>'500000000000000000'; +explain analyze select * from phi where pname>'p678900000000'; +explain analyze select * from phi where pno>'500000000000000000' and pname>'p678900000000'; +explain analyze select * from phi where location='Shanghai' and healthstatus='Health'; +explain analyze select * from phi where pno like '5678%'; +``` + +对pno聚集操作后,进行如下查询,编号为12、13。 +```sql +explain analyze select * from phi where pno>'500000000000000000'; +explain analyze select healthstatus,count(*) from phi where location='Shanghai' group by healthstatus; +``` + +对location,pno聚集操作后,进行如下查询,编号14。 +```sql +explain analyze select healthstatus,count(*) from phi where location='Shanghai' group by healthstatus; +``` + +

    实验结果

    +

    114查询时间.png

    +
    +

    表2 各查询语句的执行用时(单位:毫秒)

    +
    +

    结论1:对比运行时间1、7、12,查询pno上特定范围的数据。建立pno上的索引后、或者对该索引聚集后,访问pno>’500000000000000000’的速度略微加快;explain analyze显示添加索引后仍是遍历访问。
    +pno顺序_100w.png

    +
    +

    图2 100万条数据时查询12的结果

    +
    +

    结论2:对比运行时间2、8,查询pname上特定范围的数据。建立pname上的索引后,访问pname>’p678900000000’的速度略微加快。同样,explain analyze显示添加索引后对pname仍是遍历访问。

    +

    结论3:对比运行时间3、9,查询pno和pname都在特定范围内的数据。建立索引1-5后,查询pname>’p678900000000’ and pno>’500000000000000000’的速度明显加快,基本节省一半时间。但是看explain analyze的信息,发现查询过程是在满足pno条件后用pname的普通索引找的。
    +8_100w.png

    +
    +

    图3 100万条数据时查询9的结果

    +
    +

    结论4:对比运行时间4、10,查找位于上海的健康人。可以发现使用对应的部分索引可以明显加快访问,因为已经把要的数据建成树了。

    +

    结论5:对比运行时间5、11,查找pno开头是5678的人。发现表达式索引作用尚不显著,可能本身用时就比较快。

    +

    结论6:对比运行时间6、13、14,该查询要求显示在上海的各healthsatus的人数。显然6是遍历的时间(因为6的用时和1、3接近);13根据pno聚集后,用时小幅缩短;14按(location,pno)聚集后,用时减少超过一半。据此可以从查询策略上猜想,6、13都是遍历;14根据(location,pno)索引准确找到了location=’Shanghai’的位置并只遍历上海的数据,因此最快。

    +

    实验2:索引规则实验

    +

    表达式索引

    +

    表2中编号5和11的查询没有收到预期效果(11应远远快于5)。查询手册发现openGauss要求表达式索引只有在查询时使用与创建时相同的表达式才有效,下面进行验证。

    +

    查询指令

    + +```sql +select * from phi where substr(pno,1,4)='2345'; +``` + +

    分别在无和有索引5的情况下运行(图4、图5),时间分别为0.544ms和848.343ms,可以看到差别巨大。也表明表达式索引在大数据量时非常有用,但使用条件非常苛刻,要求表达式相同。

    +

    如果表达式不同(图4、图6),那么在查询执行时就不会用到表达式索引。不过有趣的是不用索引5的运行时间比用索引还短那么一点点。

    +

    pic4.png

    +
    +

    图4 有索引5时的查询select * from phi where substr(pno,1,4)=‘2345’;

    +
    +

    pic5.png

    +
    +

    图5 无索引5时的查询select * from phi where substr(pno,1,4)=‘2345’;

    +
    +

    pic6.png

    +
    +

    图6 有索引5时的查询select * from phi where pno like ‘2345%’;

    +
    +

    主键索引

    +

    可以看到建表时openGauss默认创建的索引phi_pkey和我创建的在pno上的普通索引index_uni_pno大小一致,猜测他们都是关于pno的普通索引。分别对两个索引进行cluster操作并查看数据,发现两者都是按pno按字典序排列。因此认为openGauss的表的主键索引{tablename}_pkey是建立在表主码上的B树索引。

    +

    pic7.png

    +
    +

    图7 表的所有索引

    +
    +

    附录

    +

    附录1:插入数据的函数(查重方法)

    + +```sql +create or replace function insert_data(numb integer) returns void +as $$ +begin + declare counter integer :=1; + declare ran integer := random()*1000000000 as integer; + declare pn varchar(18) := cast( ran as varchar(18)); + declare pna varchar(20) :=concat('p',pn); + declare loc varchar(20) := 'China'; + declare hs varchar(20) := 'Health'; + TYPE var20_array IS VARRAY(5) OF varchar(20); + loc_arr var20_array := var20_array(); + hs_arr var20_array := var20_array(); + begin + loc_arr[1] :='Shanghai'; + loc_arr[2] :='Beijing'; + loc_arr[3] :='Guangzhou'; + loc_arr[4] :='Wuhan'; + hs_arr[1] :='Health'; + hs_arr[2] :='Uncertain'; + hs_arr[3] :='Diagnosis'; + hs_arr[4] :='Cure'; + begin raise notice 'start at %',statement_timestamp(); end; + while counter<=numb + loop + ran := random()*1000000000 as integer; + pn := cast( ran as varchar(18)); + begin + while pn in (select pno from phi) + loop + ran := random()*1000000000 as integer; + pn := cast( ran as varchar(18)); + end loop; + end; + pna :=concat('p',pn); + ran :=floor(1 + (random() * 4)); + loc := loc_arr[ran]; + ran :=floor(1 + (random() * 4)); + hs := hs_arr[ran]; + begin + insert into phi(pno,pname,location,healthstatus) values(pn,pna,loc,hs); + end; + begin + if counter % 1000=0 + then + begin raise notice 'counter: % at %',counter, statement_timestamp(); end; + end if; + end; + counter :=counter+1; + end loop; + end; +end; +$$ language plpgsql; +``` + +

    附录2:插入数据的函数(双随机数方法)

    + +```sql +create or replace function insert_data2(numb integer) returns void +as $$ +begin + declare counter integer :=1; + declare ran bigint := random()*1000000000000000000 as bigint; + declare ran2 bigint := random()*1000000000000 as bigint; + declare pn varchar(18) := cast( ran as varchar(18)); + declare pna varchar(20) :=concat('p',pn); + declare loc varchar(20) := 'China'; + declare hs varchar(20) := 'Health'; + TYPE var20_array IS VARRAY(5) OF varchar(20); + loc_arr var20_array := var20_array(); + hs_arr var20_array := var20_array(); + begin + loc_arr[1] :='Shanghai'; + loc_arr[2] :='Beijing'; + loc_arr[3] :='Guangzhou'; + loc_arr[4] :='Wuhan'; + hs_arr[1] :='Health'; + hs_arr[2] :='Uncertain'; + hs_arr[3] :='Diagnosis'; + hs_arr[4] :='Cure'; + begin raise notice 'start at %',statement_timestamp(); end; + while counter<=numb + loop + ran := random()*1000000000000000000 as bigint; + ran2 := random()*1000000000000 as bigint; + ran := ran-ran2; + pna :=concat('p',pn); + pn := cast( ran as varchar(18)); + ran :=floor(1 + (random() * 4)); + loc := loc_arr[ran]; + ran :=floor(1 + (random() * 4)); + hs := hs_arr[ran]; + begin + insert into phi(pno,pname,location,healthstatus) values(pn,pna,loc,hs); + end; + begin + if counter % 10000=0 + then + begin raise notice 'counter: % at %',counter, statement_timestamp(); end; + end if; + end; + counter :=counter+1; + end loop; + end; +end; +$$ language plpgsql; +``` + + + + + diff --git "a/content/zh/post/zhengwen2/openGuass\345\256\236\351\252\214\345\277\203\345\276\227\344\271\213gs_dump\351\200\273\350\276\221\345\244\207\344\273\275\344\270\216\346\201\242\345\244\215.md" "b/content/zh/post/zhengwen2/openGuass\345\256\236\351\252\214\345\277\203\345\276\227\344\271\213gs_dump\351\200\273\350\276\221\345\244\207\344\273\275\344\270\216\346\201\242\345\244\215.md" new file mode 100644 index 0000000000000000000000000000000000000000..17b05df42fee9868e0580ea30561b2d02a8998d6 --- /dev/null +++ "b/content/zh/post/zhengwen2/openGuass\345\256\236\351\252\214\345\277\203\345\276\227\344\271\213gs_dump\351\200\273\350\276\221\345\244\207\344\273\275\344\270\216\346\201\242\345\244\215.md" @@ -0,0 +1,208 @@ ++++ + +title = "openGauss实验心得之gs_dump逻辑备份与恢复" + +date = "2021-07-10" + +tags = ["openGauss实验心得之gs_dump逻辑备份与恢复"] + +archives = "2021-07" + +author = "Mia" + +summary = "openGauss实验心得之gs_dump逻辑备份与恢复" + +img = "/zh/post/zhengwen2/img/img24.png" + +times = "12:30" + ++++ + +# openGauss实验心得之gs_dump逻辑备份与恢复 + + 2021年4月份开始接触openGauss并做openGauss的有关实验,今天记下gs_dump逻辑备份的实验经历,以免未来忘记。(部分内容可能有疏漏,望包容和指出) +注:实验的设计思路参考于华为openGauss的指导手册。 + +

    1,数据库逻辑备份介绍

    +

            数据库逻辑备份指将数据库对象和文件导出到文件的格式。那么物理备份和逻辑备份的区别在哪呢?做过物理备份实验gs_basebackup(在小编的上一篇文章也有哦)不难发现物理备份是将数据库关键文件转储,在恢复数据库时利用转储文件和cp命令进行恢复。而此次的逻辑备份指的是对数据库对象进行文件导出。逻辑备份是对象级备份,可移植性会更高,而且在逻辑备份中导出的文件格式可以自己指定哦。
    +        本实验的关键在于一些逻辑命令参数的指定,参数表在华为官方文章有哦(网址:华为gs_dump文章),以下主要以实例进行说明~

    +

    2,gs_dump逻辑备份实验

    +

    2.1 导出数据库全量信息,导出文件为纯文本格式

    +

    (1)以操作系统用户omm登录数据库主节点。

    +
    Ssh root@弹性公网ip //并输入密码
    +cd /opt/software/openGauss/script
    +su - omm
    +
    +

    成功登入的截图如下:
    +21.png
    +创建存储备份文件的文件夹。

    +
    mkdir -p /home/omm/logical/backup
    +
    +

    执行gs_dump,导出的MPPDB_backup.sql文件格式为纯文本格式。

    +
    gs_dump -U omm -W Bigdata@123 -f /home/omm/logical/backup/MPPDB_backup.sql -p 26000 postgres -F p
    +
    +

    其中-U表示用户,-W用于指定用户连接的密码,-f表示指定输出文件,-p表示指定端口,-F表示表示输出格式,p表示纯文本格式
    +执行后结果为:

    +
    gsql ((openGauss 1.1.0 build 392c0438) compiled at 2020-12-31 20:08:06 commit 0 last mr  )
    +Non-SSL connection (SSL connection is recommended when requiring high-security)
    +Type "help" for help.
    +postgres=# \q
    +[omm@ecs-a560 ~]$ gs_dump -U omm -W Bigdata@123 -f /home/omm/logical/backup/MPPDB_backup.sql -p 26000 postgres -F p
    +gs_dump[port='26000'][postgres][2021-07-06 09:38:53]: The total objects number is 443.
    +gs_dump[port='26000'][postgres][2021-07-06 09:38:53]: [100.00%] 443 objects have been dumped.
    +gs_dump[port='26000'][postgres][2021-07-06 09:38:53]: dump database postgres successfully
    +gs_dump[port='26000'][postgres][2021-07-06 09:38:53]: total time: 378  ms
    +切换到backup文件夹,查看MPPDB_backup.sql文件。
    +ll /home/omm/logical/backup/
    +total 112K
    +-rw------- 1 omm dbgrp 109K Jul  6 09:38 MPPDB_backup.sql
    +末尾部分内容显示如下:
    +CREATE INDEX ix_pmk_snapshot_time ON pmk_snapshot USING btree (current_snapshot_time DESC) TABLESPACE pg_default;
    +SET search_path = public;
    +--
    +-- Name: inx_stu01; Type: INDEX; Schema: public; Owner: omm; Tablespace: 
    +--
    +CREATE INDEX inx_stu01 ON student USING btree (std_name) TABLESPACE pg_default;
    +--
    +-- Name: public; Type: ACL; Schema: -; Owner: omm
    +--
    +REVOKE ALL ON SCHEMA public FROM PUBLIC;
    +REVOKE ALL ON SCHEMA public FROM omm;
    +GRANT CREATE,USAGE ON SCHEMA public TO omm;
    +GRANT USAGE ON SCHEMA public TO PUBLIC;
    +--
    +-- PostgreSQL database dump complete
    +--
    +
    +

    2.2 导出数据库全量信息,导出文件格式为tar格式。

    +

    首先以操作系统用户omm登录数据库主节点。(操作步骤如上哦)
    +执行gs_dump,导出的MPPDB_backup.tar文件格式为tar格式。

    +
    gs_dump -U omm -W Bigdata@123 -f  /home/omm/logical/backup/MPPDB_backup.tar -p 26000 postgres -F t
    +
    +

    其中-t表示输出格式为tar
    +结果显示如下:

    +
    gs_dump[port='26000'][postgres][2021-07-06 09:45:05]: The total objects number is 443.
    +gs_dump[port='26000'][postgres][2021-07-06 09:45:05]: [100.00%] 443 objects have been dumped.
    +gs_dump[port='26000'][postgres][2021-07-06 09:45:05]: dump database postgres successfully
    +gs_dump[port='26000'][postgres][2021-07-06 09:45:05]: total time: 311  ms
    +
    +

    查看生成的文件信息。

    +
    ll /home/omm/logical/backup/
    +
    +

    结果显示如下:

    +
    total 356K
    +-rw------- 1 omm dbgrp 109K Jul  6 09:38 MPPDB_backup.sql
    +-rw------- 1 omm dbgrp 241K Jul  6 09:45 MPPDB_backup.tar
    +
    +

    2.3 导出数据库全量信息,导出文件格式为自定义归档格式。

    +

    首先以操作系统用户omm登录数据库主节点。(操作步骤同上哦)
    +执行gs_dump,导出的MPPDB_backup.dmp文件格式为自定义归档格式。

    +
    gs_dump -U omm -W Bigdata@123 -f  /home/omm/logical/backup/MPPDB_backup.dmp -p 26000 postgres -F c
    +
    +

    其中c表示文件格式为自定义格式。
    +结果显示如下:

    +
    gs_dump[port='26000'][postgres][2021-07-06 09:47:44]: The total objects number is 443.
    +gs_dump[port='26000'][postgres][2021-07-06 09:47:44]: [100.00%] 443 objects have been dumped.
    +gs_dump[port='26000'][postgres][2021-07-06 09:47:44]: dump database postgres successfully
    +gs_dump[port='26000'][postgres][2021-07-06 09:47:44]: total time: 312  ms
    +
    +

    查看生成的文件信息。

    +
    ll /home/omm/logical/backup/
    +//以下为显示结果
    +total 468K
    +-rw------- 1 omm dbgrp 110K Jul  6 09:47 MPPDB_backup.dmp
    +-rw------- 1 omm dbgrp 109K Jul  6 09:38 MPPDB_backup.sql
    +-rw------- 1 omm dbgrp 241K Jul  6 09:45 MPPDB_backup.tar
    +
    +

    2.4 导出数据库全量信息,导出文件格式为目录格式。

    +

    首先以操作系统用户omm登录数据库主节点。(操作步骤同上哦)
    +执行gs_dump,导出的MPPDB_backup文件格式为目录格式。

    +
    gs_dump -U omm -W Bigdata@123 -f /home/omm/logical/backup/MPPDB_backup -p 26000  postgres -F d
    +
    +

    其中d指定为目录格式
    +显示结果如下:

    +
    gs_dump[port='26000'][postgres][2021-07-06 09:52:12]: The total objects number is 443.
    +gs_dump[port='26000'][postgres][2021-07-06 09:52:12]: [100.00%] 443 objects have been dumped.
    +gs_dump[port='26000'][postgres][2021-07-06 09:52:12]: dump database postgres successfully
    +gs_dump[port='26000'][postgres][2021-07-06 09:52:12]: total time: 312  ms
    +
    +

    (3)查看生成的文件信息。

    +
    ll /home/omm/logical/backup/
    +
    +

    显示结果如下:

    +
    total 472K
    +drwx------ 2 omm dbgrp 4.0K Jul  6 09:52 MPPDB_backup
    +-rw------- 1 omm dbgrp 110K Jul  6 09:47 MPPDB_backup.dmp
    +-rw------- 1 omm dbgrp 109K Jul  6 09:38 MPPDB_backup.sql
    +-rw------- 1 omm dbgrp 241K Jul  6 09:45 MPPDB_backup.tar
    +
    +

    进一步查看目录内部内容

    +
    cd /home/omm/logical/backup/MPPDB_backup
    +ls
    +
    +

    结果显示如下:

    +
    4522.dat.gz  4524.dat.gz  4526.dat.gz  4528.dat.gz  dir.lock
    +4523.dat.gz  4525.dat.gz  4527.dat.gz  4529.dat.gz  toc.dat
    +
    +

    2.5 导出数据库的表(或视图、或序列、或外表)对象。

    +

    以操作系统用户omm登录数据库主节点。(步骤如上面哦)
    +执行gs_dump,导出的表customer_t1

    +
    gs_dump -U omm -W Bigdata@123 -f /home/omm/logical/backup/bkp_shl2.sql -t public.customer_t1 -p 26000 postgres
    +
    +

    其中customer_t1为事先建立好的表,bkp_shl2,sql为导出的文件
    +运行结果如下:

    +
    gs_dump[port='26000'][postgres][2021-07-06 09:57:45]: The total objects number is 379.
    +gs_dump[port='26000'][postgres][2021-07-06 09:57:45]: [100.00%] 379 objects have been dumped.
    +gs_dump[port='26000'][postgres][2021-07-06 09:57:45]: dump database postgres successfully
    +gs_dump[port='26000'][postgres][2021-07-06 09:57:45]: total time: 178  ms
    +
    +

    查看生成的文件信息

    +
    ll /home/omm/logical/backup/
    +cat /home/omm/logical/backup/bkp_shl2.sql 
    +
    +

    显示的结果部分如下:

    +
    --
    +-- PostgreSQL database dump
    +--
    +SET statement_timeout = 0;
    +SET xmloption = content;
    +SET client_encoding = 'UTF8';
    +SET standard_conforming_strings = on;
    +SET check_function_bodies = false;
    +SET client_min_messages = warning;
    +SET search_path = public;
    +SET default_tablespace = '';
    +SET default_with_oids = false;
    +--
    +-- Name: customer_t1; Type: TABLE; Schema: public; Owner: omm; Tablespace: 
    +--
    +CREATE TABLE customer_t1 (
    +    c_customer_sk integer,
    +    c_customer_id character(5),
    +    c_first_name character(6),
    +    c_last_name character(8)
    +)
    +WITH (orientation=row, compression=no);
    +ALTER TABLE public.customer_t1 OWNER TO omm;
    +--
    +-- Data for Name: customer_t1; Type: TABLE DATA; Schema: public; Owner: omm
    +--
    +COPY customer_t1 (c_customer_sk, c_customer_id, c_first_name, c_last_name) FROM stdin;
    +3769	hello	\N	\N
    +6885	maps 	Joes  	\N
    +4321	tpcds	Lily  	\N
    +9527	world	James 	\N
    +\.
    +;
    +--
    +-- PostgreSQL database dump complete
    +--
    +
    +

    3, 实验小结

    +

    逻辑备份实验的步骤大体上为登录数据库主节点—进行逻辑备份—查看逻辑文件,总体上比较简单。逻辑备份对于数据库的恢复非常重要,是数据库安全机制重要的一环。openGauss逻辑备份可以指定文件格式的机制也非常灵活。

    +

    注:本篇文章为原创文章,转载请注明出处哦~

    +
    + + + + diff --git "a/content/zh/post/zhengwen2/opengauss 2.0.0 \345\215\207\345\210\2602.0.1.md" "b/content/zh/post/zhengwen2/opengauss 2.0.0 \345\215\207\345\210\2602.0.1.md" new file mode 100644 index 0000000000000000000000000000000000000000..737f226c6ad213ec6bc61e8d0f0490be544f3686 --- /dev/null +++ "b/content/zh/post/zhengwen2/opengauss 2.0.0 \345\215\207\345\210\2602.0.1.md" @@ -0,0 +1,183 @@ ++++ + +title = "opengauss 2.0.0 升到2.0.1" + +date = "2021-06-29" + +tags = ["opengauss 2.0.0 升到2.0.1"] + +archives = "2021-07" + +author = "张兴龙" + +summary = "opengauss 2.0.0 升到2.0.1" + +img = "/zh/post/zhengwen2/img/img28.png" + +times = "19:30" + ++++ + +# opengauss 2.0.0 升到2.0.1 + +操作系统是openeuler 20.03 LTS sp1,4c4g,opengauss 2.0 是用贾军锋老师的一键安装脚本安装的。 + +```sql +[omm@openeuler ~]$ gs_om -V +gs_om (openGauss OM 2.0.0 build 7ef5c80a) compiled at 2021-03-31 21:16:05 commit 0 last mr +``` + +将2.0.1的包放在原安装目录/soft/openGauss下 + +```sql +ls +bin openGauss-2.0.1-openEuler-64bit-om.sha256 +clusterconfig.xml openGauss-2.0.1-openEuler-64bit-om.tar.gz +etc openGauss-2.0.1-openEuler-64bit.sha256 +include openGauss-2.0.1-openEuler-64bit.tar.bz2 +jre openGauss-Package-bak_78689da9.tar.gz +lib openGauss-Package-bak_d97c0e8a.tar.gz +openGauss-2.0.0-openEuler-64bit-all.tar.gz script +openGauss-2.0.0-openEuler-64bit-om.sha256 share +openGauss-2.0.0-openEuler-64bit-om.tar.gz simpleInstall +openGauss-2.0.0-openEuler-64bit.sha256 upgrade_sql.sha256 +openGauss-2.0.0-openEuler-64bit.tar.bz2 upgrade_sql.tar.gz +openGauss-2.0.1-openEuler-64bit-all.tar.gz version.cfg +``` + +进行安装前预检查 + +```sql +[root@openeuler openGauss]# script/gs_preinstall -X clusterconfig.xml -U omm -G dbgrp +Parsing the configuration file. +Successfully parsed the configuration file. +Installing the tools on the local node. +Successfully installed the tools on the local node. +Setting pssh path +Successfully set core path. +Are you sure you want to create the user[omm] and create trust for it (yes/no)? no +Preparing SSH service. +Successfully prepared SSH service. +Checking OS software. +Successfully check os software. +Checking OS version. +Successfully checked OS version. +Creating cluster's path. +Successfully created cluster's path. +Setting SCTP service. +Successfully set SCTP service. +Set and check OS parameter. +Setting OS parameters. +Successfully set OS parameters. +Warning: Installation environment contains some warning messages. +Please get more details by "/soft/openGauss/script/gs_checkos -i A -h openeuler --detail". +Set and check OS parameter completed. +Preparing CRON service. +Successfully prepared CRON service. +Setting user environmental variables. +Successfully set user environmental variables. +Setting the dynamic link library. +Successfully set the dynamic link library. +Setting Core file +Successfully set core path. +Setting pssh path +Successfully set pssh path. +Set ARM Optimization. +No need to set ARM Optimization. +Fixing server package owner. +Setting finish flag. +Successfully set finish flag. +Preinstallation succeeded. +``` +检查完后环境变量变化了,GAUSS_VERSION=2.0.1 + +```sql +cat .bashrc +# Source default setting +[ -f /etc/bashrc ] && . /etc/bashrc +``` + +```sql +# User environment PATH +PATH="$HOME/.local/bin:$HOME/bin:$PATH" +export GPHOME=/gaussdb/om +export PATH=$GPHOME/script/gspylib/pssh/bin:$GPHOME/script:$PATH +export LD_LIBRARY_PATH=$GPHOME/lib:$LD_LIBRARY_PATH +export PYTHONPATH=$GPHOME/lib +export GAUSSHOME=/gaussdb/app +export PATH=$GAUSSHOME/bin:$PATH +export LD_LIBRARY_PATH=$GAUSSHOME/lib:$LD_LIBRARY_PATH +export S3_CLIENT_CRT_FILE=$GAUSSHOME/lib/client.crt +export GAUSS_VERSION=2.0.1 +export PGHOST=/gaussdb/om/omm_mppdb +export GAUSSLOG=/gaussdb/log/omm +umask 077 +export GAUSS_ENV=2 +export GS_CLUSTER_NAME=dbCluster +``` + + +执行升级 + +```sql +[omm@openeuler openGauss]$ script/gs_upgradectl -t auto-upgrade --grey -X clusterconfig.xml +Static configuration matched with old static configuration files. +Successfully set upgrade_mode to 0. +omm@openeuler's password: +omm@openeuler's password: +Checking upgrade environment. +Successfully checked upgrade environment. +Start to do health check. +Successfully checked cluster status. +Upgrade all nodes. +Performing grey rollback. +omm@openeuler's password: +No need to rollback. +The directory /gaussdb/app_78689da9 will be deleted after commit-upgrade, please make sure there is no personal data. +Installing new binary. +Successfully backup hotpatch config file. +Sync cluster configuration. +Successfully synced cluster configuration. +Switch symbolic link to new binary directory. +Successfully switch symbolic link to new binary directory. +Switching all db processes. +Wait for the cluster status normal or degrade. +Successfully switch all process version +The nodes ['openeuler'] have been successfully upgraded to new version. Then do health check. +Start to do health check. +Successfully checked cluster status. +Waiting for the cluster status to become normal. +. +The cluster status is normal. +Upgrade main process has been finished, user can do some check now. +Once the check done, please execute following command to commit upgrade: +gs_upgradectl -t commit-upgrade -X /soft/openGauss/clusterconfig.xml +Successfully upgrade nodes. +``` + +有关软件版本 + +```sql +[omm@openeuler gaussdb]$ gs_om -V +gs_om (openGauss OM 2.0.1 build da8e0828) compiled at 2021-06-02 19:48:48 commit 0 last mr +[omm@openeuler gaussdb]$ gsql -V +gsql (openGauss 2.0.1 build d97c0e8a) compiled at 2021-06-02 19:37:16 commit 0 last mr +``` +提交升级 + +```sql +gs_upgradectl -t commit-upgrade -X /soft/openGauss/clusterconfig.xml +Start to do health check. +Successfully checked cluster status. +Successfully cleaned old install path. +Commit upgrade succeeded. +``` + +连接测试 +```sql +gsql -r -d postgres -p 26000 +gsql ((openGauss 2.0.1 build d97c0e8a) compiled at 2021-06-02 19:37:16 commit 0 last mr ) +Non-SSL connection (SSL connection is recommended when requiring high-security) +Type "help" for help. +``` +升级完成。 diff --git "a/content/zh/post/zhengwen2/opengauss gs_basebackup\345\256\236\350\267\265.md" "b/content/zh/post/zhengwen2/opengauss gs_basebackup\345\256\236\350\267\265.md" new file mode 100644 index 0000000000000000000000000000000000000000..a867c1208781fd86aba77c4642325cdb8610263e --- /dev/null +++ "b/content/zh/post/zhengwen2/opengauss gs_basebackup\345\256\236\350\267\265.md" @@ -0,0 +1,108 @@ ++++ + +title = "opengauss gs_basebackup实践" + +date = "2021-07-09" + +tags = ["opengauss gs_basebackup实践"] + +archives = "2021-07" + +author = "邹阳" + +summary = "opengauss gs_basebackup实践" + +img = "/zh/post/zhengwen2/img/img38.png" + +times = "12:30" + ++++ + +# opengauss gs_basebackup实践 + + + + +

    详细参考 https://gitee.com/opengauss/docs 中的备份与恢复篇

    +

    https://gitee.com/opengauss/docs/blob/master/content/zh/docs/Administratorguide/%E5%A4%87%E4%BB%BD%E4%B8%8E%E6%81%A2%E5%A4%8D.md

    +

    以下文字摘至官方文档。

    +

    openGauss部署成功后,在数据库的运行过程中,往往会遇到各种问题及异常状态。

    +

    openGauss提供了gs_basebackup工具用作基础的物理备份。它可以实现对数据文件的二进制拷贝备份,其实现原理使用了复制协议。

    +

    远程执行gs_basebackup时,需要使用系统管理员账户。

    +

    Ø备份的前提条件

    +

    1.备份客户端可以正常连接openGauss数据库;

    +

    2.pg_hba.conf中需要配置允许复制链接,且该连接必须由一个系统管理员建立;

    +

    3.如果xlog传输模式为stream模式,则需要配置max_wal_senders的数量, 至少有一个可用;

    +

    4.如果xlog传输模式为fetch模式,则需要把wal_keep_segments参数设置得足够高,确保在备份完毕之前日志不会被移除;

    +

    Tips

    +

    1.gs_basebackup 支持全量备份,不支持增量;

    +

    2.gs_basebackup 支持简单备份格式和压缩备份格式;

    +

    3.gs_basebackup 在备份包含绝对路径的表空间时,不能在同一台机器上进行备份,会产生冲突;

    +

    4.若打开增量检测点功能且打开双写, gs_basebackup也会备份双写文件;

    +

    5.若pg_xlog目录为软链接,备份时将不会建立软链接,会直接将数据备份到目的路径的pg_xlog目录下;

    +

    6.备份过程中收回用户的备份权限,可能导致备份失败,或者备份数据不可用。

    + +##### 一、环境简介 +

    两台主机分别为 node01 node02 分别安装opengauss 2.0 opengauss2.0.1 数据库

    + +##### 二、主库中创建新数据库,并备份恢复至备库 +

    CREATE DATABASE mydb WITH ENCODING ‘GBK’ template = template0;

    +

    image20210707182235155.png

    +

    使用该数据库并创建表空间、表

    +

    查看当前使用的数据库

    +

    select current_catalog,current_database();

    +

    列出所有的数据库

    +

    \l

    +

    image20210708163732041.png

    +

    \c <要使用的数据库名称> 连接mydb

    +

    image20210708163915384.png

    +

    \db 查询对应的数据库下的表空间

    +

    image20210708164101815.png

    +

    创建表空间

    +

    create tablespace mytbs RELATIVE LOCATION ‘tablespace/mytbs’;

    +

    创建测试表

    +

    create table table_in_mytbs_ts (col1 char(10)) tablespace mytbs;

    +

    image20210708170539527.png

    +

    gs_basebackup备份参数介绍

    +

    image20210710140932539.png

    +

    在主库修改pg_hba.conf 配置添加配置

    +

    host replication rep1 172.16.100.0/24 sha256

    +

    image20210710141553860.png

    +

    修改参数后重启数据库

    +

    image20210710142554436.png

    +

    创建复制用户

    +

    –创建备份用户并放开权限(远程执行gs_basebackup时,需要使用系统管理员账户)

    +

    postgres=# create user rep1 with sysadmin identified by ‘huawei@1234’;

    +

    在备库创建备份

    +

    su - omm

    +

    gs_basebackup -D /home/omm/gs_bak -h 172.16.100.26 -p 26000 -U rep1 -W

    +

    image20210710143331004.png

    +

    可以看到备份其实是将目录做了拷贝

    +

    image20210710143413887.png

    +

    本机恢复

    +

    首先删除mydb

    +

    drop database mydb

    +

    image20210710152158022.png

    +

    将备份从备机传至主机

    +

    image20210710152658230.png

    +

    将数据库原目录改名,并将备份目录改成原数据库目录的名字

    +

    image20210710152800332.png

    +

    启动数据库

    +

    image20210710152910645.png

    +

    验证原数据库已经找回

    +

    image20210710152953781.png

    +

    备机恢复

    +

    修改备份文件中的postgresql.conf IP地址

    +

    image20210710153241580.png
    +备库停止数据库,并将主库的备份文件挪到数据库目录下

    +

    image20210710153418909.png

    +

    修改目录,将数据库原目录改名,将备份目录改名为数据库目录

    +

    image20210710153455102.png

    +

    启动数据库,验证成功

    +

    image20210710153627215.png

    +

    小贴士,实践验证,opengauss 2.0 中创建的库,可以在opengauss 2.0.1 中正常打开。文中的node01 为2.0 node02 为 2.0.1

    +
    + + + + diff --git "a/content/zh/post/zhengwen2/opengauss\345\256\236\350\267\265\346\200\273\347\273\223\345\255\246\344\271\240\345\277\203\345\276\227.md" "b/content/zh/post/zhengwen2/opengauss\345\256\236\350\267\265\346\200\273\347\273\223\345\255\246\344\271\240\345\277\203\345\276\227.md" new file mode 100644 index 0000000000000000000000000000000000000000..caded8fb8127e8d9836973e4480a59d787d4037b --- /dev/null +++ "b/content/zh/post/zhengwen2/opengauss\345\256\236\350\267\265\346\200\273\347\273\223\345\255\246\344\271\240\345\277\203\345\276\227.md" @@ -0,0 +1,255 @@ ++++ + +title = "opengauss实践总结学习心得" + +date = "2021-07-09" + +tags = ["opengauss实践总结学习心"] + +archives = "2021-07" + +author = "poohanyuzuru" + +summary = "opengauss实践总结学习心" + +img = "/zh/post/zhengwen2/img/img22.jpg" + +times = "12:30" + ++++ + +# opengauss实践总结学习心 + +

    实验一 在ECS上安装部署openGauss数据库

    +

    一、实验内容
    +1、实验内容:本实验主要内容为弹性云服务器(openEuler)上安装部署openGauss数据库,并进行简单的数据库相关操作。

    +

    2、实验概览:
    +5C4402A4232F43A2BCE5E70BDC6C3D72.png
    +实验概览图

    +

    二、实验过程
    +1.进入华为官网,购买弹性云服务器ECS(openEuler ARM 操作系统)。购买时需自定义购买进行基础配置、网路配置、高级配置等。
    +2.修改操作系统配置。使用SSH工具(比如:PuTTY等)从本地电脑通过配置弹性云服务器的弹性公网IP地址(如:124.70.36.251)来连接ECS,并使用ROOT用户来登录。
    +3.设置字符集参数。
    +[root@ecs-c9bf ~]# cat >>/etc/profile<<EOF
    +export LANG=en_US.UTF‐8
    +EOF
    +[root@ecs-c9bf ~]# source /etc/profile
    +4.修改python版本
    +[root@ecs-c9bf ~]# cd /usr/bin
    +[root@ecs-c9bf bin] # mv python python.bak
    +[root@ecs-c9bf bin] # ln -s python3 /usr/bin/python
    +[root@ecs-c9bf bin] # python -V
    +[root@ecs-c9bf ~]# yum install libaio* -y
    +5.下载数据库安装包
    +[root@ecs-c9bf bin]# mkdir -p /opt/software/openGauss[root@ecs-c9bf bin]# chmod 755 -R /opt/software
    +[root@ecs-c9bf bin]# cd /opt/software/openGauss
    +[root@ecs-c9bf openGauss]# wget https://opengauss.obs.cn-south-1.myhuaweicloud.com/1.1.0/arm/openGauss-1.1.0-openEuler-64bit-all.tar.gz
    +6.创建XML配置文件
    +[root@ecs-c9bf bin]# cd /opt/software/openGauss
    +[root@ecs-c9bf openGauss]# vi clusterconfig.xml
    +输入”i”进入INSERT模式,添加文本如下

    + +​ + + +``` + + + + + + + + + + + + + + + +``` + + + +点击“ESC”退出INSERT模式,然后输入“:wq”后回车退出编辑并保存文本。 +7.修改performance.sh文件 +[root@ecs-c9bf openGauss]# vi /etc/profile.d/performance.sh +输入”i”,进入INSERT模式。 +CPUNO=`cat /proc/cpuinfo|grep processor|wc -l` +export GOMP_CPU_AFFINITY=0-$[CPUNO - 1] +

    #sysctl -w vm.min_free_kbytes=112640 &> /dev/null
    +sysctl -w vm.dirty_ratio=60 &> /dev/null
    +sysctl -w kernel.sched_autogroup_enabled=0 &> /dev/null
    +点击“ESC”退出INSERT模式。输入“:wq”后回车,保存退出。
    +8.执行预安装前加载安装包中lib库
    +[root@ecs-c9bf openGauss]# vi /etc/profile
    +输入i,进入INSERT模式
    +export packagePath=/opt/software/openGauss
    +export LD_LIBRARY_PATH=packagePath/script/gspylib/clib:packagePath/script/gspylib/clib:LD_LIBRARY_PATH
    +[root@ecs-c9bf openGauss]# source /etc/profile
    +9.解压安装包
    +[root@ecs-c9bf openGauss]# cd /opt/software/openGauss
    +[root@ecs-c9bf openGauss]# tar -zxvf openGauss-1.1.0-openEuler-64bit-all.tar.gz
    +[root@ecs-c9bf openGauss]# tar -zxvf openGauss-1.1.0-openEuler-64bit-om.tar.gz
    +用ls命令查看
    +[root@ecs-c9bf openGauss]# ls
    +clusterconfig.xml openGauss-Package-bak_392c0438.tar.gz
    +lib script
    +openGauss-1.1.0-openEuler-64bit-all.tar.gz simpleInstall
    +openGauss-1.1.0-openEuler-64bit-om.sha256 upgrade_sql.sha256
    +openGauss-1.1.0-openEuler-64bit-om.tar.gz upgrade_sql.tar.gz
    +openGauss-1.1.0-openEuler-64bit.sha256 version.cfg
    +openGauss-1.1.0-openEuler-64bit.tar.bz2
    +使用gs_preinstall准备好安装环境,切换到gs_preinstall命令所在目录。
    +[root@ecs-c9bf openGauss]# cd /opt/software/openGauss/script/
    +[root@ecs-c9bf script]# python gs_preinstall -U omm -G dbgrp -X /opt/software/openGauss/clusterconfig.xml
    +Are you sure you want to create trust for root (yes/no)? yes
    +Please enter password for root.
    +Password: --说明:此处输入密码时,屏幕上不会有任何反馈,不用担心,这是LINUX操作系统对密码的保护.
    +Creating SSH trust for the root permission user.
    +创建操作系统omm用户,并对omm创建trust,并设置密码
    +Are you sure you want to create the user[omm] and create trust for it (yes/no)? yes
    +Please enter password for cluster user.
    +Password:
    +Please enter password for cluster user again.
    +Password:
    +Successfully created [omm] user on all nodes.
    +10、执行安装
    +[root@ecs-c9bf script]# chmod -R 755 /opt/software/openGauss/script
    +[root@ecs-c9bf script]# su - omm
    +[omm@ecs-c9bf ~]$ gs_install -X /opt/software/openGauss/clusterconfig.xml --gsinit-parameter="–encoding=UTF8" --dn-guc=“max_process_memory=4GB” --dn-guc=“shared_buffers=256MB” --dn-guc=“bulk_write_ring_size=256MB” --dn-guc=“cstore_buffers=16MB”
    +数据库使用
    +[root@ecs-c9bf script]# su - omm
    +[omm@ecs-c9bf ~]$ gs_om -t start
    +Starting cluster.

    +

    =========================================
    +Successfully started.
    +[omm@ecs-c9bf ~]$ gsql -d postgres -p 26000 -r
    +postgres=# alter role omm identified by ‘Bigdata@123’ replace ‘GaussDB@123’;
    +postgres=# CREATE USER joe WITH PASSWORD “Bigdata@123”;
    +postgres=# CREATE DATABASE db_tpcc OWNER joe;
    +postgres=# \q
    +[omm@ecs-c9bf ~]$ gsql -d db_tpcc -p 26000 -U joe -W Bigdata@123 -r
    +db_tpcc=> CREATE SCHEMA joe AUTHORIZATION joe;
    +db_tpcc=> CREATE TABLE mytable (firstcol int);
    +CREATE TABLE
    +db_tpcc=> INSERT INTO mytable values (100);
    +db_tpcc=> SELECT * from mytable; firstcol ---------- 100 (1 row)

    +

    三、实验结果
    +1.启动服务。
    +1B09116E7BBB4DEB85E8BC30F9A71F89.png

    +

    2.使用新用户连接到db_tpcc数据库。
    +77662B2147D448EAA5E67D4FFE24D8DE.png

    +

    3.查看表中数据。
    +3C0F246CE2B544D19BEC15701B67B3A5.png

    +

    四、分析总结
    +通过这个实验,我学习了弹性云服务器(openEuler)上安装部署openGauss数据库,并进行简单的数据库相关操作。
    +这个实验做的过程中必须严格按照实验指导书上的步骤完成。当操作过程中遇到问题时可以认真查找错误,如果检查不出来就要重头开始做或者重新购买服务器。

    +

    参考文献:《数据库指导手册》华为技术有限公司

    +

    实验二 openGauss金融场景化实验

    +

    一、实验内容
    +1、内容描述:本实验以金融行业为场景,设计数据库模型,并使用华为openGauss构建金融场景下的数据库。通过对数据库中的对象(表、约束、视图、索引等)创建,掌握基础SQL语法,并通过对表中数据的增删改查,模拟金融场景下的业务实现。
    +2、实验概览:
    +062A12550AB346879407798FA0F7ED77.png
    +实验概览图

    +

    F6D430E6CDA840CFB100CCC548CEB832.jpeg
    +金融数据模型ER图

    +

    二、实验过程及结果

    +

    1.创建完所有表后,截图查询插入结果,例如select count(*) from bank_card;(挑选2个表)

    +

    ①对client表进行数据初始化。执行insert操作。查询插入结果。
    +8ECFC9F225514F0B825AA68402959D85.jpeg

    +

    ②对bank_card表进行数据初始化。执行insert操作。查询插入结果。
    +3C2BEB41D16A49098F45535C1C3B6DC0.jpeg

    +

    2.截图重新命名索引的过程(重命名语句和成反馈的结果)。
    +BC54E29640264A7887AF40C140F7F989.jpeg

    +

    3.使用JDBC连接数据库的执行结果(查询到websites表中的数据)。
    +6969C8B19F314849AA51E7B4BFB9AD96.png

    +

    三、分析总结
    +这个实验的前两步比较简单,我在做第三步的时候遇到了很多困难。
    +1.当我做到要使用gs_ctl将策略生效时,输入gs_ctl reload -D /gaussdb/data/db1/,此时服务器告诉我数据库里名为“postmaster.pid”的文件不存在,因此我只好购买了一个弹性公网IP地址为“124.70.111.125”的服务器重新安装后,再开始做实验二,顺利解决了问题。
    +7900AF029BC1477EA8FB2E24FB866807.png
    +2.接着往下做,我根据实验报告里提供的连接去下载安装了JDK,但等我安好并且配置完环境变量后,我发现本实验要求的是261版本,和我从官网下载的291版本不符合。因此我又花了一番功夫卸载掉JDK291,删掉注册表,并且修改环境变量,最终才把261版本安装好。

    +

    3.终于安好了JDK,当我要在cmd里对Java程序编译时,总是提醒我“错误:编码utf-8的不可映射字符”,我查询到这是因为我的程序里有中文才会这样,于是我将程序修改为“ANSI”编码,但依然提示错误。最后迫不得已,我将程序中的中文替换成英文,并将不必要的中文注释删除,最后编译成功。
    +93EBB86EF59F412C9A640E26BE152CC1.png
    +通过这次实验,我掌握了创建数据表、插入表数据、如何手工插入一条数据、添加约束、查询数据、数据的修改和删除、视图的使用、创建和重命名索引、创建新用户和授权、删除schema、用JDBC连接数据库等等这些操作方法。这都是这个实验本身带给我的一些非常实用的操作方法。
    +同时,在做第三步时,我还额外学到了怎样把java卸载干净;在重新配置和安装服务器时,我体会到了做实验总是会出现很多意外,一定要有耐心;在编译java时,出现了问题也要尽量去解决,总会有办法的。

    +

    实验三 openGauss数据库维护管理

    +

    一、实验内容
    +1、实验内容:本实验适用于 openGauss数据库,通过该实验可以顺利完成对数据库各项日常基本维护管理。本实验主要包括操作系统参数检查、openGauss健康状态检查、数据库性能检查、日志检查和清理、时间一致性检查、应用连接数检查、例行维护表等。
    +2、实验概览:
    +274FC44D3F604DEAA933F57C8783BA26.png
    +实验概览图

    +

    二、实验过程及结果

    +

    1.操作系统参数检查截图,在参数配置文件(/etc/sysctl.conf)中将参数 vm.min_free_kbytes(表示:内核内存分配保留的内存量) 的值调整为3488后,通过执行gs_checkos -i A --detail 查看更详细的信息。

    +

    ①首先执行gs_checkos 对系统参数进行检查,可以看到A6为warning。
    +163A29E3665F4F50913A6B2BF35E1915.png

    +

    ②修改vm.min_free_kbytes(表示:内核内存分配保留的内存量) 的值调整为3488后,通过执行gs_checkos -i A --detail 查看更详细的信息。
    +162461686DAB45D0A50A586D693C960D.png

    +

    ③按详细信息中的修改说明对系统参数进行修改。
    +vm.min_free_kbytes的值由3488调整为152444
    +net.ipv4.tcp_retries1的值由3调整为5.
    +net.ipv4.tcp_syn_retries的值由6调整为5.
    +net.sctp.path_max_retrans的值由5调整为10
    +net.sctp.max_init_retransmits的值由8调整为10
    +执行sysctl -p 命令使刚才修改的参数生效后,再次通过执行gs_checkos -i A 查看系统参数检查是否能通过。可以看到此时A6为Normal。
    +D282D0B702394126A371AC9077F5D289.jpeg

    +

    2.设置最大连接数,在omm 用户环境下通过gs_guc工具来增大参数值的过程(语句和反馈结果)。
    +①语句和反馈结果
    +F0B67253D71448C08380B8DD7FF84CE4.jpeg

    +

    ②验证是否为设置后的最大连接数
    +BB4B64E1BA4E4DEFB3D2347F2B9E1316.jpeg

    +

    3.例行表、索引的维护,截图查看特定表的统计信息。(查询pg_stat_all_tables这张表的信息)
    +B7AB12DA600B4005A4A6216493FCE40C.jpeg

    +

    三、分析总结
    +通过这个实验,我掌握了操作系统参数检查、openGauss健康状态检查、数据库性能检查、日志检查、最大连接数的设置、例行表和索引的维护等等操作方法。
    +我在这个实验中遇到的问题和收获有:
    +1.在调整系统参数值时,直接复制了实验指导书中的“vm.min_free_kbytes = 348844”,因此在检查系统参数的调整能否通过时一直显示Abnormal,后来我通过执行“gs_checkos -i A --detail”查看更详细的信息,发现应当将“vm.min_free_kbytes”的值调整为152444,经过修改并且使修改的参数生效后,A6终于显示为Normal。 + +2.在进行openGauss健康状态检查、数据库性能检查实验时,要注意数据库服务什么时候该启动和关闭。分别使用“gs_om -t stop;”和“gs_om -t start;”来控制数据库的normal和unavailable状态。有的时候会提示你需要restart,此时需要用“gs_om -t restart;”来重启数据库。 + +3.在设置最大连接数时,我掌握了两种方法来设置。一种是在omm 用户环境下通过gs_guc工具来增大参数值,一种是用alter system set 语句来设置此参数。 diff --git "a/content/zh/post/zhengwen2/\343\200\220openGauss\343\200\221Virtualbox+openEuler\351\203\250\345\261\236openGauss\346\225\260\346\215\256\345\272\223.md" "b/content/zh/post/zhengwen2/\343\200\220openGauss\343\200\221Virtualbox+openEuler\351\203\250\345\261\236openGauss\346\225\260\346\215\256\345\272\223.md" new file mode 100644 index 0000000000000000000000000000000000000000..75673e97af8473b1ec2cde6face3ecc24fee3d65 --- /dev/null +++ "b/content/zh/post/zhengwen2/\343\200\220openGauss\343\200\221Virtualbox+openEuler\351\203\250\345\261\236openGauss\346\225\260\346\215\256\345\272\223.md" @@ -0,0 +1,145 @@ ++++ + +title = "【openGauss】Virtualbox+openEuler部属openGauss数据库" + +date = "2021-07-09" + +tags = ["【openGauss】Virtualbox+openEuler部属openGauss数据库"] + +archives = "2021-07" + +author = "SonK1997" + +summary = "【openGauss】Virtualbox+openEuler部属openGauss数据库接" + +img = "/zh/post/zhengwen2/img/img20.png" + +times = "12:30" + ++++ + +# 【openGauss】Virtualbox+openEuler部属openGauss数据库 +

    +

    +

    前 言

    +

    简介

    +
    +

    openGauss是关系型数据库,采用客户端/服务器,单进程多线程架构,支持单机和一主多备部署方式,备机可读,支持双机高可用和读扩展。
    本实验主要描述openGauss数据库在openEuler 20.03-LTS上的单机安装部署。

    +
    +

    内容描述

    +
    +

    本实验主要内容为在openEuler 20.03-LTS上安装部署openGauss数据库,并进行简单的数据库相关操作。

    +
    +

    前置条件

    +
    +

    由于本实验主要是在openEuler操作系统上进行openGauss数据库的部署,需要掌握Linux系统的基本操作和系统命令,详细请参见附录一。

    +
    +

    实验环境说明

    +
    +

    组网说明
    本实验环境为虚拟机VirtualBox 6.1.14 + openEuler 20.03-LTS + openGauss 1.1.0。
    设备介绍
    为了满足openGauss安装部署实验需要,建议每套实验环境采用以下配置:
    Linux操作系统 openEuler 20.03-LTS
    windows操作系统 win10 x86 64位
    虚拟机 VirtualBox 6.1.14
    Python Python 3.7.X

    +
    +

    单机安装概览

    +

    在这里插入图片描述

    +

    openGauss数据库安装

    +

    1.1 实验介绍

    +

    关于本实验

    +
    +

    本实验主要描述openGauss数据库在虚拟机VirtualBox+openEuler上的安装配置。

    +
    +

    实验目的

    +
    +

    掌握虚拟机VirtualBox的安装配置方法;
    掌握openGauss数据库安装部署方法。

    +
    +

    1.2 虚拟机VirtualBox下载及安装

    +
    +

    步骤 1 进入官方网站下载页面。
    网址:https://www.virtualbox.org/wiki/Downloads
    点击” window主机” 下载windows版本的VirtualBox。
    步骤 2下载完成后,双击执行文件进行安装。
    下载后,文件名为:VirtualBox-6.1.14-140239-Win.exe,双击此执行文件进行安装,安装过程中存放地址可以根据自己想法去设置下,其他所有选项都可以默认,直接按下一步就行,最后安装成功。
    在这里插入图片描述在这里插入图片描述

    +
    +

    1.3 openEuler-20.03-LTS镜像文件下载

    +
    +

    步骤 1进入华为开源镜像站的下载页面。
    网址:https://mirrors.huaweicloud.com/openeuler/openEuler-20.03-LTS/ISO/x86_64/,具体如下:在这里插入图片描述

    +
    +

    1.4 VirtualBox下安装openEuler-20.03-LTS操作系统

    +

    步骤 1新建虚拟电脑。
    打开VirtualBox软件。在这里插入图片描述在这里插入图片描述在这里插入图片描述

    +
    +

    遇到问题:VirtualBox中重建Host-Only网卡后无法启动虚拟机(VERR_INTNET_FLT_IF_NOT_FOUND)
    在这里插入图片描述

    +
    +

    在这里插入图片描述
    在这里插入图片描述

    +

    在这里插入图片描述

    +

    1.5 操作系统环境准备

    +
    +

    为了操作方便,可以使用SSH工具(比如:PuTTY等)从本地电脑通过配置enp0s3网卡的IP地址(如:192.168.56.123)来连接虚拟机,并使用ROOT用户来登录。
    在这里插入图片描述

    +
    +
    +

    关闭防火墙注意命令中是firewalld而不是firewall
    步骤 1关闭防火墙。
    执行以下二个命令将防火墙关闭,

    +
    +
    systemctl stop firewalld.service
    +systemctl disable firewalld.service,具体如下:
    +[root@db1 ~]# systemctl stop firewalld.service
    +[root@db1 ~]# systemctl disable firewalld.service
    +Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
    +Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
    +[root@db1 ~]#
    +
    +

    在这里插入图片描述
    因为测试数据,导致了追加性写入。参考资料:shell实战(二):cat EOF 追加与覆盖文件

    +
    +

    cat >/etc/profile<<EOF
    注意这里输入错误之后,采用了一次覆盖读写。
    在这里插入图片描述在这里插入图片描述

    +
    +

    在这里插入图片描述在这里插入图片描述

    +

    步骤 6清理软件安装包。

    +
    +

    这里并不支持ll命令,查询使用ls -l命令可以看到详细信息。

    +
    +
    [omm@db1 openGauss]$ exit
    +logout
    +[root@db1 /]# cd /root
    +[root@db1 script]# cd /opt/software/openGauss/
    +[root@db1 openGauss]# ll
    +
    +

    成功删除安装包
    在这里插入图片描述

    +

    1.6 安装openGauss数据库

    +

    数据库使用

    +

    1.7 前提条件

    +
    +

    openGauss正常运行。由于本实验是对openGauss数据库的基本使用,需要掌握openGauss数据库的基本操作和SQL语法,openGauss数据库支持SQL2003标准语法,数据库基本操作参见附录二。

    +
    +

    1.8 操作步骤

    +

    步骤 1以操作系统用户omm登录数据库主节点。
    [root@ecs-c9bf script]# su - omm

    +
    +

    进入数据库成功
    在这里插入图片描述

    +
    +
    +

    psql在退出时并不是使用exit,而是使用\q
    alter role omm identified by ‘bigdata@1997’ replace ‘openguass@1997’;
    注意后面这个密码是初始设置的数据库database的密码。

    +

    在这里插入图片描述在这里插入图片描述
    如上创建了一个用户名为goku,密码为bigdata@1997的用户。
    在这里插入图片描述
    创建完db_test数据库后,就\q方法退出postgres数据库,使用新用户连接到此数据库执行接下来的创建表等操作。当然,也可以选择继续在默认的postgres数据库下做后续的体验。
    在这里插入图片描述

    +
    +
    +

    测试用goku用户连接db_test,并且创建SCHEMA,这里对SCHEMA的理解可以参考数据库中的Schema是什么?,关于openGauss对SCHEMA的定义可以参考:华为openGauss 创建和管理schema

    +
    +
    +

    那么CREATE SCHEMA goku AUTHORIZATION goku;实际上就是创建了一个名为goku的SCHEMA,而其访问权限仅限于goku。

    +

    创建一个名称为mytable,只有一列的表。字段名为firstcol,字段类型为integer。CREATE TABLE mytable (firstcol int);
    向表中插入数据:100 INSERT INTO mytable values (100);

    +
    +

    在这里插入图片描述

    +
    [omm@db1997 ~]$ gsql -d db_test -p 26000 -U goku -W bigdata@1997  -r
    +gsql ((openGauss 1.1.0 build 392c0438) compiled at 2020-12-31 20:08:21 commit 0 last mr  )
    +Non-SSL connection (SSL connection is recommended when requiring high-security)
    +Type "help" for help.
    +db_test=> CREATE TABLE mytable (firstcol int);
    +CREATE TABLE
    +db_test=>  INSERT INTO mytable values (100);
    +INSERT 0 1
    +db_test=>  SELECT * from mytable;
    + firstcol
    +----------
    +      100
    +(1 row)
    +————————————————
    +
    +
    diff --git "a/content/zh/post/zhengwen2/\343\200\220openGauss\343\200\221gsql\345\256\242\346\210\267\347\253\257\345\267\245\345\205\267\357\274\210\344\270\200\357\274\211\345\256\236\351\252\214\344\273\213\347\273\215&&gsql\345\256\242\346\210\267\347\253\257\345\267\245\345\205\267.md" "b/content/zh/post/zhengwen2/\343\200\220openGauss\343\200\221gsql\345\256\242\346\210\267\347\253\257\345\267\245\345\205\267\357\274\210\344\270\200\357\274\211\345\256\236\351\252\214\344\273\213\347\273\215&&gsql\345\256\242\346\210\267\347\253\257\345\267\245\345\205\267.md"
    new file mode 100644
    index 0000000000000000000000000000000000000000..01f198588e014857a9c9bb637b202129056a5386
    --- /dev/null
    +++ "b/content/zh/post/zhengwen2/\343\200\220openGauss\343\200\221gsql\345\256\242\346\210\267\347\253\257\345\267\245\345\205\267\357\274\210\344\270\200\357\274\211\345\256\236\351\252\214\344\273\213\347\273\215&&gsql\345\256\242\346\210\267\347\253\257\345\267\245\345\205\267.md"
    @@ -0,0 +1,536 @@
    ++++
    +
    +title = "【openGauss】gsql客户端工具(一)实验介绍&&gsql客户端工具.md" 
    +
    +date = "2021-07-09" 
    +
    +tags = ["【openGauss】gsql客户端工具(一)实验介绍&&gsql客户端工具.md"] 
    +
    +archives = "2021-07" 
    +
    +author = "SogK1997" 
    +
    +summary = "【openGauss】gsql客户端工具(一)实验介绍&&gsql客户端工具.md"
    +
    +img = "/zh/post/zhengwen2/img/img4.png" 
    +
    +times = "12:30"
    +
    ++++
    +
    +# 【openGauss】gsql客户端工具(一)实验介绍&&gsql客户端工具.md
    +

    gsql客户端工具

    +

    +

    前 言

    +

    gsql命令参考官方文档

    +

    简介

    +

    本指导书适用于对数据库开发调试工具的使用,通过该指导书可以使用gsql数据库开发调试工具连接openGauss数据库。

    +

    内容描述

    +

    本实验指导书主要内容为使用gsql数据库开发调试工具连接openGauss数据库。

    +

    前置条件

    +
      +
    • 由于本实验主要是在openEuler操作系统上进行gsql开发调试工具连接数据库,需要掌握Linux系统的基本操作和系统命令,详细请参见附录一。
    • +
    • 连接数据库后可以使用gsql元命令管理和使用数据库,需要掌握openGauss数据库的基本操作,数据库基本操作参见附录二。
      +实验环境说明
    • +
    • 组网说明
      +本实验环境为openGauss数据库管理系统,安装在本机virtualbox的openEuler服务器上。
    • +
    +

    image.png

    +

    客户端工具

    +

    1.1 实验介绍

    +

    1.1.1 关于本实验

    +

    本实验主要描述openGauss数据库的客户端工具的使用和连接数据库的方法。

    +

    1.1.2 实验目的

    +
      +
    • 掌握gsql客户端工具本地连接数据库的方法;
    • +
    • 掌握gsql客户端工具远程连接数据库的方法;
    • +
    • 掌握gsql客户端工具使用方法;
    • +
    • 掌握图形化界面客户端工具Data Studio的安装及使用方法。
    • +
    +

    1.2 gsql客户端工具

    +

    gsql是openGauss提供在命令行下运行的数据库连接工具,可以通过此工具连接服务器并对其进行操作和维护,除了具备操作数据库的基本功能,gsql还提供了若干高级特性,便于用户使用。

    +

    1.2.1 gsql连接数据库

    +

    gsql是openGauss自带的客户端工具。使用gsql连接数据库,可以交互式地输入、编辑、执行SQL语句。

    +

    1.2.1.1 确认连接信息

    +

    客户端工具通过数据库主节点连接数据库。因此连接前,需获取数据库主节点所在服务器的IP地址及数据库主节点的端口号信息。

    +
      +
    • 步骤 1切换到omm用户,以操作系统用户omm登录数据库主节点。
    • +
    +
    [root@db1 script]# su - omm +
    +
      +
    • 步骤 2使用gs_om -t status --detail命令查询openGauss各实例情况。
    • +
    +
    [omm@db1 ~]$ gs_om -t status --detail +
    +

    情况显示如下:
    +image.png
    +如上部署了数据库主节点实例的服务器IP地址为192.168.56.101。数据库主节点数据路径为“/gaussdb/data/db1997

    +
      +
    • 步骤 3确认数据库主节点的端口号。
      +在步骤2查到的数据库主节点数据路径下的postgresql.conf文件中查看端口号信息。示例如下:
      +要根据自己的主节点修改路径
    • +
    +
    [omm@db1 ~]$ cat /gaussdb/data/db1997/postgresql.conf | grep port +
    +

    image.png
    +26000为数据库主节点的端口号。
    +请在实际操作中记录数据库主节点实例的服务器IP地址数据路径和端口号,并在之后操作中按照实际情况进行替换

    +

    1.2.1.2 本地连接数据库

    +
      +
    • 步骤 1切换到omm用户,以操作系统用户omm登录数据库主节点。
    • +
    +
    su - omm +
    +
      +
    • 步骤 2启动数据库服务
    • +
    +
    gs_om -t start +
    +
      +
    • 步骤 3连接数据库。
      +执行如下命令连接数据库。
    • +
    +
    [omm@db1 ~]$ gsql -d postgres -p 26000 -r +
    +

    其中postgres为需要连接的数据库名称,26000为数据库主节点的端口号。请根据实际情况替换。
    +连接成功后,系统显示类似如下信息:
    +在这里插入图片描述
    +omm用户是管理员用户,因此系统显示DBNAME=#。若使用普通用户身份登录和连接数据库,系统显示DBNAME=>
    +Non-SSL connection表示未使用SSL方式连接数据库。如果需要高安全性时,请用SSL进行安全的TCP/IP连接

    +
      +
    • 步骤 4退出数据库。
    • +
    +
    postgres=# \q +
    +

    1.2.2 gsql获取帮助

    +

    1.2.2.2 连接数据库时,可以使用如下命令获取帮助信息

    +

    切换到omm用户。

    +
    su - omm +
    +

    使用如下命令获取帮助信息

    +
    gsql --help +
    +

    image.png

    +

    1.2.2.3 连接到数据库后,可以使用如下命令获取帮助信息

    +
      +
    • 步骤 1使用如下命令连接数据库。
    • +
    +
    gsql -d postgres -p 26000 -r +
    +
      +
    • 步骤 2输入help指令。
    • +
    +
    postgres=# help +
    +

    在这里插入图片描述

    +
      +
    • 步骤 3查看版权信息。
    • +
    +
    postgres=# \copyright +
    +

    在这里插入图片描述

    +
      +
    • 步骤 4查看openGauss支持的所有SQL语句。
    • +
    +
    postgres=# \h +
    +

    image.png

    +
      +
    • 步骤 5查看CREATE DATABASE命令的参数可使用下面的命令。
    • +
    +
    postgres=# \help CREATE DATABASE +
    +

    image.png

    +
      +
    • 步骤 6查看gsql支持的命令。
    • +
    +
    postgres=# \? +
    +

    image.png

    +
      +
    • 步骤 7退出数据库
    • +
    +
    postgres=# \q +
    +

    1.2.3 gsql命令使用

    +

    1.2.3.1 前提条件

    +

    以下操作在openGauss的数据库主节点所在主机上执行(本地连接数据库),切换到omm用户。

    +
    su - omm +
    +

    1.2.3.2 执行一条字符串命令

    +

    gsql命令直接执行一条显示版权信息的字符串命令

    +
    gsql -d postgres -p 26000 -c "\copyright" +
    +

    1.2.3.3 使用文件作为命令源而不是交互式输入

    +
      +
    • 步骤 1创建文件夹存放相关文档。
    • +
    +
    mkdir /home/omm/openGauss +
    +
      +
    • 步骤 2创建文件,例如文件名为“mysql.sql”,并写入可执行sql语句“select * from pg_user;”。
    • +
    +
    vi /home/omm/openGauss/mysql.sql +
    +

    文件打开输入i,进入INSERT模式,输入 select * from pg_user;

    +
    select * from pg_user; +
    +
      +
    • 步骤 3执行如下命令使用文件作为命令源。
    • +
    +
    gsql -d postgres -p 26000 -f /home/omm/openGauss/mysql.sql +
    +

    image.png

    +
      +
    • 步骤 4如果FILENAME是-(连字符),则从标准输入读取。
    • +
    +
    gsql -d postgres -p 26000 -f - +postgres=# select * from pg_user; +
    +
      +
    • 步骤 5退出数据库连接。
    • +
    +
    postgres=# \q +
    +

    1.2.3.4 列出所有可用的数据库(\l的l表示list)

    +
    gsql -d postgres -p 26000 -l +
    +

    image.png

    +

    1.2.3.5 设置gsql变量NAME为VALUE 关键字:-v

    +
      +
    • 步骤 1设置foo的值为bar。
    • +
    +
    gsql -d postgres -p 26000 -v foo=bar +
    +
      +
    • 步骤 2在数据库能够显示foo的值。
    • +
    +
    postgres=# \echo :foo +bar +
    +

    在这里插入图片描述

    +
      +
    • 步骤 3退出数据库连接。
    • +
    +
    postgres=> \q +
    +

    1.2.3.6 打印gsql版本信息。

    +
    gsql -V +
    +

    在这里插入图片描述

    +

    1.2.3.7 使用文件作为输出源 关键字:-L

    +
      +
    • 步骤 1创建文件,例如文件名为“output.txt”。
    • +
    +
    touch /home/omm/openGauss/output.txt +
    +

    在这里插入图片描述

    +
      +
    • 步骤 2执行如下命令,除了正常的输出源之外,把所有查询输出记录到文件中。
    • +
    +
    gsql -d postgres -p 26000 -L /home/omm/openGauss/output.txt +
    +

    在这里插入图片描述

    +

    进入gsql环境,输入以下语句:

    +
    postgres=# create table mytable (firstcol int); +
    +

    CREATE TABLE
    +在这里插入图片描述

    +
    postgres=# insert into mytable values(100); +
    +

    INSERT 0 1
    +在这里插入图片描述

    +
    postgres=# select * from mytable ; +
    +

    在这里插入图片描述
    +退出数据库

    +
    postgres=# \q +
    +
      +
    • 步骤 3查看“output.txt”文档中的内容如下:
    • +
    +
    cat /home/omm/openGauss/output.txt +
    +

    显示如下:
    +image.png

    +

    1.2.3.8 将所有查询输出重定向到文件FILENAME 关键字:-o

    +
      +
    • 步骤 1创建文件,例如文件名为“outputOnly.txt”。
    • +
    +
    touch /home/omm/openGauss/outputOnly.txt +
    +

    在这里插入图片描述

    +
      +
    • 步骤 2执行如下命令。
    • +
    +
    gsql -d postgres -p 26000 -o /home/omm/openGauss/outputOnly.txt +
    +

    在这里插入图片描述

    +
      +
    • 步骤 3进入gsql环境,输入以下语句:
    • +
    +
    postgres=# drop table mytable; +postgres=# create table mytable (firstcol int); +postgres=# insert into mytable values(100); +postgres=# select * from mytable; +postgres=# \q +
    +

    在这里插入图片描述

    +

    所有操作都没有回显。

    +
      +
    • 步骤 4查看“outputOnly.txt”文档中的内容如下:
    • +
    +
    cat /home/omm/openGauss/outputOnly.txt +
    +

    在这里插入图片描述

    +

    1.2.3.9 安静模式 关键字:-q

    +

    安静模式:执行时不会打印出额外信息

    +
    gsql -d postgres -p 26000 -q +
    +

    进入gsql环境,输入以下语句:

    +
    postgres=# create table t_test (firstcol int); +postgres=# insert into t_test values(200); +postgres=# select * from t_test; + firstcol +---------- + 200 +(1 row) + +postgres=# \q +
    + +

    image.png

    +

    连接上数据库,创建数据库和插入数据等都没有回显信息

    +

    1.2.3.10 单行运行模式 关键字:-S

    +

    单行运行模式:这时每个命令都将由换行符结束,像分号那样

    +
    gsql -d postgres -p 26000 -S +
    +

    进入gsql环境,输入以下语句:

    +
    postgres^# select * from t_test; + firstcol +---------- + 200 +(1 row) + +postgres^# select * from t_test + firstcol +---------- + 200 +(1 row) + +postgres=# \q +
    +

    image.png

    +

    语句最后结尾有;号和没有;号,效果都一样

    +

    1.2.3.11 编辑模式 关键字:-r

    +
      +
    • 步骤 1如下命令连接数据库,开启在客户端操作中可以进行编辑的模式。
    • +
    +
    gsql -d postgres -p 26000 -r +
    +
      +
    • 步骤 2进入gsql环境,输入以下语句:
    • +
    +
    select * from t_test; +
    +

    在这里插入图片描述

    +
      +
    • +

      步骤 3写完后不要按回车,光标在最后闪烁。
      +在这里插入图片描述

      +
    • +
    • +

      步骤 4按“向左”键讲光标移动到“”,将“”修改为“firstcol”。
      +image.png
      +image.png

      +
    • +
    +

    编辑模式“上下左右键”,“删除键”和“退格键”都可以使用,并且按下“向上”、“向下”键可以切换输入过的命令。

    +
      +
    • 步骤 5退出数据库连接
    • +
    +
    postgres=# \q +
    +

    1.2.3.12 远程使用用户名和密码连接数据库

    +

    远程使用jack用户连接ip地址为192.168.0.58端口号为26000的数据库

    +
      +
    • 步骤 1登录客户端主机(192.168.0.58),使用以下命令远程登录数据库。
    • +
    +
    gsql -d postgres -h 192.168.0.58 -U jack -p 26000 -W Bigdata@123; +
    +

    -d参数指定目标数据库名、-U参数指定数据库用户名、-h参数指定主机名、-p参数指定端口号信息,-W参数指定数据库用户密码。
    +进入gsql环境,显示如下:

    +
    gsql ((openGauss 1.0 build ec0e781b) compiled at 2020-04-27 17:25:57 commit 2144 last mr 131 ) +SSL connection (cipher: DHE-RSA-AES256-GCM-SHA384, bits: 256) +Type "help" for help. + +postgres=> +
    +

    1.2.4 gsql元命令使用

    +

    1.2.4.1 前提条件

    +

    以下操作在openGauss的数据库主节点所在主机上执行(本地连接数据库),使用gsql连接到openGauss数据库。

    +
      +
    • 步骤 1切换到omm用户,以操作系统用户omm登录数据库主节点。
    • +
    +
    su - omm +
    +
      +
    • 步骤 2gsql连接数据库。
    • +
    +
    gsql -d postgres -p 26000 -r +
    +

    1.2.4.2 打印当前查询缓冲区到标准输出

    +
      +
    • 步骤 1创建“outputSQL.txt”文件。
    • +
    +
    touch ./home/omm/openGauss/outputSQL.txt +
    +

    下图经历了一段切换目录的过程
    +image.png

    +
      +
    • 步骤 2连接数据库。
    • +
    +
    gsql -d postgres -p 26000 -r +
    +
      +
    • 步骤 3输入以下语句。
    • +
    +
    postgres=# select * from pg_roles; +
    +

    image.png

    +
    postgres=# \w /home/omm/openGauss/outputSQL.txt +postgres=# \q +
    +

    在这里插入图片描述

    +
      +
    • 步骤 4打开文件“outputSQL.txt”文件,查看其中内容。
    • +
    +
    cat /home/omm/openGauss/outputSQL.txt +
    +

    显示如下:
    +在这里插入图片描述

    +

    1.2.4.3 导入数据

    +
      +
    • 步骤 1连接数据库。
    • +
    +
    gsql -d postgres -p 26000 -r +
    +
      +
    • 步骤 2创建目标表a。
    • +
    +
    postgres=# CREATE TABLE a(a int); +
    +

    在这里插入图片描述

    +
      +
    • 步骤 3导入数据,从stdin拷贝数据到目标表a。
    • +
    +
    postgres=# \copy a from stdin; +
    +

    在这里插入图片描述

    +

    出现>>符号提示时,输入数据,输入\.时结束。
    +在这里插入图片描述

    +
    >> 1 +>> 2 +>> \. +
    +
      +
    • 步骤 4查询导入目标表a的数据。
    • +
    +
    postgres=# SELECT * FROM a; +
    +

    在这里插入图片描述
    +退出数据库:

    +
    postgres=# \q +
    +
      +
    • 步骤 5从本地文件拷贝数据到目标表a,创建文件/home/omm/openGauss/2.csv。
    • +
    +
    vi /home/omm/openGauss/2.csv +
    +
      +
    • 步骤 6输入i,切换到INSERT模式,插入数据如下:
      +3
      +4
      +5
      +如果有多个数据,分隔符为
      +在导入过程中,若数据源文件比外表定义的列数多,则忽略行尾多出来的列。
    • +
    • 步骤 7按下Esc键,输入:wq后回车,保存并退出。
    • +
    • 步骤 8连接数据库。
    • +
    +
    gsql -d postgres -p 26000 -r +
    +
      +
    • 步骤 9如下命令拷贝数据到目标表。
    • +
    +
    postgres=# \copy a FROM '/home/omm//openGauss/2.csv' WITH (delimiter',',IGNORE_EXTRA_DATA 'on'); +
    +
      +
    • 步骤 10查询导入目标表a的数据。
    • +
    +
    postgres=# SELECT * FROM a; +
    +

    image.png

    +

    1.2.4.4 查询表空间

    +
    postgres=# \db +
    +

    显示如下:

    +

    在这里插入图片描述

    +

    1.2.4.5 查询表的属性 关键字:\d+

    +
      +
    • 步骤 1创建表customer_t1。
    • +
    +
    postgres=# DROP TABLE IF EXISTS customer_t1; +postgres=# CREATE TABLE customer_t1 +
    +

    image.png

    +
      +
    • 步骤 2查询表的属性。
    • +
    +
    postgres=# \d+; +
    +

    显示如下:
    +在这里插入图片描述

    +
      +
    • 步骤 3查询表customer_t1的属性。
    • +
    +
    postgres=# \d+ customer_t1; +
    +

    image.png

    +

    1.2.4.6 查询索引信息 关键字:\di+

    +
      +
    • 步骤 1在表customer_t1上创建索引。
    • +
    +
    create index customer_t1_index1 on customer_t1(c_customer_id); +
    +
      +
    • 步骤 2查询索引信息。
    • +
    +
    postgres=# \di+; +
    +

    在这里插入图片描述

    +
      +
    • 步骤 3查询customer_t1_index1索引的信息。
    • +
    +
    postgres=# \di+ customer_t1_index1 +
    +

    在这里插入图片描述

    +

    1.2.4.7 切换数据库 关键字:\c db

    +
      +
    • 步骤 1创建数据库。
    • +
    +
    DROP DATABASE IF EXISTS db_tpcc02; +CREATE DATABASE db_tpcc02; +
    +
      +
    • 步骤 2切换数据库。
    • +
    +
    postgres=# \c db_tpcc02; +
    +

    显示如下:

    +

    在这里插入图片描述

    +
      +
    • 步骤 3退出数据库:
    • +
    +
    postgres=# \q + diff --git "a/content/zh/post/zhengwen2/\343\200\220openGauss\343\200\221gsql\345\256\242\346\210\267\347\253\257\345\267\245\345\205\267\357\274\210\344\272\214\357\274\211gsql\345\256\242\346\210\267\347\253\257\345\267\245\345\205\267\344\271\213Data Studio\345\256\242\346\210\267\347\253\257\345\267\245\345\205\267.md" "b/content/zh/post/zhengwen2/\343\200\220openGauss\343\200\221gsql\345\256\242\346\210\267\347\253\257\345\267\245\345\205\267\357\274\210\344\272\214\357\274\211gsql\345\256\242\346\210\267\347\253\257\345\267\245\345\205\267\344\271\213Data Studio\345\256\242\346\210\267\347\253\257\345\267\245\345\205\267.md" new file mode 100644 index 0000000000000000000000000000000000000000..a669375a461d7bfead0ec0cb909e4ae29a999d67 --- /dev/null +++ "b/content/zh/post/zhengwen2/\343\200\220openGauss\343\200\221gsql\345\256\242\346\210\267\347\253\257\345\267\245\345\205\267\357\274\210\344\272\214\357\274\211gsql\345\256\242\346\210\267\347\253\257\345\267\245\345\205\267\344\271\213Data Studio\345\256\242\346\210\267\347\253\257\345\267\245\345\205\267.md" @@ -0,0 +1,98 @@ ++++ + +title = "【openGauss】gsql客户端工具(二)gsql客户端工具之Data Studio客户端工具" + +date = "2021-07-09" + +tags = [ "【openGauss】gsql客户端工具(二)gsql客户端工具之Data Studio客户端工具"] + +archives = "2021-07" + +author = "SogK1997" + +summary = "【openGauss】gsql客户端工具(二)gsql客户端工具之Data Studio客户端工具" + +img = "/zh/post/zhengwen2/img/img5.png" + +times = "12:30" + ++++ + +# 【openGauss】gsql客户端工具(二)gsql客户端工具之Data Studio客户端工具 + +

    +

    写在前面

    +
    +

    因为博主并没有购买使用华为云的openGauss及openEurler。使用的是再VirtualBox上的镜像搭建起来的openGauss。因此对于1.3.2在华为云上配置安全策略,开放端口的操作等可以忽略。直接跳转到1.3.3软件包下载进行的后序操作。

    +
    +

    1.3 Data Studio客户端工具

    +

    Data Studio是一个集成开发环境(IDE),帮助数据库开发人员便捷地构建应用程序,以图形化界面形式提供数据库关键特性
    数据库开发人员仅需掌握少量的编程知识,即可使用该工具进行数据库对象操作。
    Data Studio提供丰富多样的特性,例如:

    +
    • 创建和管理数据库对象
    • 执行SQL语句/脚本
    • 编辑和执行PL/SQL语句
    • 图形化查看执行计划和开销
    • 导出表数据等
    +

    创建和管理数据库对象包括:

    +
    • 数据库
    • 模式
    • 函数
    • 过程
    • 序列
    • 索引
    • 视图
    • 表空间
    • 同义词
    +

    Data Studio还提供SQL助手用于在SQL终端PL/SQLViewer执行各种查询/过程/函数

    +

    1.3.1 准备连接环境

    +
    • 步骤 1修改数据库的pg_hba.conf文件。
    +

    在GS_HOME中查找pg_hba.conf文件,本实验中数据库GS_HOME设置的为/gaussdb/data/db1(db1修改为自己的数据库名字,例如博主的db1997),实际操作中GS_HOME地址可以查看安装时的配置文件:<PARAM name="dataNode1" value="/gaussdb/data/db1"/>

    +
    [root@db1 ~]# cd /gaussdb/data/db1
    +[root@ecs-b5cb db1]# vi pg_hba.conf
    +
    +

    在这里插入图片描述

    +

    在这里插入图片描述

    +

    将以下内容添加进pg_hba.conf文件。

    +
    host all all 0.0.0.0/0 sha256
    +
    +

    具体如下:

    +

    在这里插入图片描述
    切换至omm用户环境,使用gs_ctl将策略生效。

    +
    [root@db1 db1]#su - omm
    +[omm@db1 ~]$gs_ctl reload -D /gaussdb/data/db1997/
    +
    +

    返回结果为:
    在这里插入图片描述

    +
    • 步骤 2登陆数据库并创建“dboper”用户,密码为“dboper@123”(密码可自定义),同时进行授权,并退出数据库。
    +
    [omm@db1 ~]$gsql -d postgres -p 26000 -r
    +postgres=#CREATE USER dboper IDENTIFIED BY 'dboper@123';
    +CREATE ROLE
    +postgres=#alter user dboper sysadmin;
    +ALTER ROLE
    +postgres=# \q
    +
    +

    在这里插入图片描述

    +

    退出OMM用户环境

    +
    [omm@ecs-b5cb ~]$ exit
    +
    +

    在这里插入图片描述

    +
    • 步骤 3修改数据库监听地址。
      在GS_HOME中,本实验中数据库GS_HOME设置的为/gaussdb/data/db1997
    +
    [root@ecs-b5cb ecs-b5cb]# cd /gaussdb/data/db1997
    +[root@db1 ~]# vi postgresql.conf
    +
    +

    将listen_addresses的值修改成为 *

    +
    listen_addresses = '*'
    +
    +

    在这里插入图片描述

    +

    修改完成后切换至OMM用户环境重启数据库生效(-D后面的数据库默认路径,需要根据自己的数据库名字的实际情况进行修改)。

    +
    [root@db1 db1]#su - omm
    +[omm@db1 ~]$gs_ctl restart -D /gaussdb/data/db1997/
    +
    +

    在这里插入图片描述

    +

    1.3.2 确定26000端口是否放开

    +
    • 步骤 1打开华为云首页,登录后进入“控制台”,点击“弹性云服务器ECS”进入ECS列表。
    • 步骤 2在云服务器控制台找到安装数据库主机的ECS,点击查看基本信息,找到安全组。
    • 步骤 3点击进入安全组,选择“入方向规则”并点“添加规则”,进行26000端口设置。
    +

    1.3.3 软件包下载及安装

    + +
    +

    名字是自己任意取
    主机一定是openGauss数据库安装所在的主机ip
    端口号是26000
    数据库,用户名和密码是三者互相对应的。

    +
    +

    注意不启用SSL

    +
    • 步骤 3连接数据库。
      在Data Studio工具界面上,点击“文件”下的“新建连接”,进入如下设置界面:
      在这里插入图片描述
      连接成功后我们就可以看到了
      在这里插入图片描述
      下面是粗暴的截取了一个短的GIF。
      在这里插入图片描述
    +

    1.3.4 Data Studio用户界面

    +

    Data Studio主界面包括:

    +
    1. 主菜单:提供使用Data Studio的基本操作;
    2. 工具栏:提供常用操作入口;
    3. “SQL终端”页签:在该窗口,可以执行SQL语句和函数/过程;
    4. “PL/SQL Viewer”页签:显示函数/过程信息;
    5. 编辑区域用于进行编辑操作;
    6. “调用堆栈”窗格:显示执行栈;
      7.“断点“窗格:显示断点信息;
    7. “变量”窗格:显示变量及其变量值;
    8. “SQL助手”页签:显示“SQL终端”和“PL/SQL Viewer”页签中输入信息的建议或参考;
    9. “结果”页签:显示所执行的函数/过程或SQL语句的结果;
    10. “消息”页签:显示进程输出。显示标准输入、标准输出和标准错误;
    11. “对象浏览器”窗格:显示数据库连接的层级树形结构和用户有权访问的相关数据库对象;除公共模式外,所有默认创建的模式均分组在“系统模式”下,用户模式分组在相应数据库的“用户模式”下;
    12. “最小化窗口窗格”:用于打开“调用堆栈”和“变量”窗格。该窗格仅在“调用堆栈”、“变量”窗格中的一个或多个窗格最小化时显示。
    13. 搜索工具栏:用于在“对象浏览器”窗格中搜索对象。
      有些项不可见,除非触发特定功能。下图以openGauss界面为例说明:
      在这里插入图片描述
    +

    1.3.5 获取工具使用手册

    +

    在Data Studio主界面的主菜单上点击帮助下的用户手册,具体如下:
    在这里插入图片描述

    +

    点击后即可得到使用手册,如下:
    在这里插入图片描述

    +

    本实验结束。

    + diff --git "a/content/zh/post/zhengwen2/\345\237\272\344\272\216openGauss\346\225\260\346\215\256\345\272\223\350\256\276\350\256\241\344\272\272\345\212\233\350\265\204\346\272\220\347\256\241\347\220\206\347\263\273\347\273\237\345\256\236\351\252\214.md" "b/content/zh/post/zhengwen2/\345\237\272\344\272\216openGauss\346\225\260\346\215\256\345\272\223\350\256\276\350\256\241\344\272\272\345\212\233\350\265\204\346\272\220\347\256\241\347\220\206\347\263\273\347\273\237\345\256\236\351\252\214.md" new file mode 100644 index 0000000000000000000000000000000000000000..8c3d267876b435f2d86eb2d63fbb07feca9b5003 --- /dev/null +++ "b/content/zh/post/zhengwen2/\345\237\272\344\272\216openGauss\346\225\260\346\215\256\345\272\223\350\256\276\350\256\241\344\272\272\345\212\233\350\265\204\346\272\220\347\256\241\347\220\206\347\263\273\347\273\237\345\256\236\351\252\214.md" @@ -0,0 +1,787 @@ ++++ + +title = "基于openGauss数据库设计人力资源管理系统实验" + +date = "2021-07-10" + +tags = ["基于openGauss数据库设计人力资源管理系统实验"] + +archives = "2021-07" + +author = "瓜西西" + +summary = "基于openGauss数据库设计人力资源管理系统实验" + +img = "/zh/post/zhengwen2/img/img33.png" + +times = "12:30" + ++++ + +# 基于openGauss数据库设计人力资源管理系统实验 + +

            本文主要面向openGauss数据库初学者,帮助初学者完成一些简单的数据库管理以及GUI,设计一个简单的人力资源管理系统。本文只包含部分代码,读者需要结合自己的数据库弹性公网、数据库用户及其密码等自身信息做出相应的修改。

    一、实验环

    使用程序:putty.exe; +

    IntelliJ IDEA 2021.1.1; +

    apache-tomcat-9.0.46 +

    服务器名称:ecs-d8b3 +

    弹性公网:121.36.79.196 +

    端口号:26000 +

    表空间名:human_resource_space +

    数据库名称:human_resource +

    员工、部门经理登录账号:其员工ID +

    员工、部门经理登录密码:123456 +

    人事经理登录账号:hr001 +

    人事经理登录密码:hr001 +

    登录入口(需在tomcat启动之后才能运行):http://localhost:8080/gaussdb2_war/login.jsp +

    二、创建和管理openGauss数据库 +

    进行以下步骤前,需预先购买弹性云服务器 ECS ,并把需要的软件以及需要调用的包预先下载好。 +

    2.1 数据库存储管理 +

    2.1.1 连接弹性云服务器 +

    我们使用 SSH 工具PuTTY,从本地电脑通过配置弹性云服务器的弹性公网 IP地址来连接 ECS,并使用 ROOT 用户来登录。 +

    (1)点击putty.exe,打开putty +


    (2)输入弹性公网IP,点击open,连接弹性云服务器 +


    2.1.2 启动、停止和连接数据库

    2.1.1.1 启动数据库 +

    (1)使用root登录 +


    (2)切换至omm操作系统用户环境 +

    使用语句切换至omm操作系统用户环境 +

    su - omm 


    (3)启动数据库 +

    使用语句启动数据库 +

    gs_om -t start 


    +

    2.1.1.2 停止数据库 +

    如有需要,可以使用语句停止数据库 +

     gs_om -t stop 


    2.1.1.3 连接数据库 +

    使用 语句连接数据库。 +

    gsql -d dbname -p port -U username -W password -r 


    其中, -d 数据库名 -p 端口名 -U 用户名 -W 密码 -r 开启客户端操作历史记录功能 +


    图中使用 gsql -d postgres -p 26000 -r 连接postgres数据库。postgres 为 openGauss 安装完成后默认生成的数据库,初始可以连接到此数据库进行新数据库的创建。26000 为数据库主节点的端口号。 +

    2.1.3 创建和管理用户、表空间、数据库和模式 +

    2.1.3.1 创建用户 +

    使用以下语句创建用户。请牢记设置的用户名以及密码,之后需要多次使用。建议将密码都设置为相同的简单密码,方便之后的操作。 +

    CREATE USER user_name PASSWORD pass_word


    2.1.3.2 管理用户 +

    可以使用以下语句对用户进行操作: +

    修改密码:

    ALTER USER a IDENTIFIED BY 'Abcd@123' REPLACE ‘Guass@123'; 


    删除用户:

    DROP USER a CASCADE;


    +

    2.1.3.3 创建表空间 +

    使用以下语句创建表空间。(路径需使用单引号) +

     CREATE TABLESPACE human_resource_space RELATIVE LOCATION 'tablespace/tablespace_2'; 

    创建表空间 human_resource_space,表空间路径为:tablespace/tablespace_2 +


    +

    2.1.3.4 管理表空间 +

    (1)赋予用户表空间访问权限 +

    使用以下语句,数据库系统管理员将human_resource_space表空间的访问权限赋予数据用户 a +

     GRANT CREATE ON TABLESPACE human_resource_space TO a; 


    +

    (2)管理表空间 +

    如有需要,可以使用如下语句 或 \db 语句查询表空间。 +

    SELECT spcname FROM pg_tablespace;


    可使用以下语句删除表空间 +

    DROP TABLESPACE tablespace_1;


    +

    2.1.3.5 创建数据库 +

    为用户a在表空间human_resource_space上创建数据库human_resource +

     CREATE DATABASE human_resource WITH TABLESPACE = human_resource_space OWNER a; 


    +

    2.1.3.6 管理数据库 +

    可以使用以下语句管理数据库: +

    SELECT datname FROM pg_database; 

    或 \l 查看数据库

    DROP DATABASE testdb;

    删除数据库

    2.1.3.7 创建模式 +

    输入 \q 退出postgres数据库。 +


     gsql -d human_resource -p 26000 -U a -W aaa. -r


    连接数据库human_resource。出现如下信息则连接成功: +


    +

    使用语句

     CREATE SCHEMA a AUTHORIZATION a; 

     为用户创建同名模式a +

    +

    2.1.3.8 管理模式

     SET SEARCH_PATH TO a,public; 

    设置模式a为默认查询模式(设置中第一个为默认模式) +

    如有需要,可以使用语句 \dn 查看模式 ,SHOW SEARCH_PATH; 查看模式搜索路径 +

    2.2 数据库对象管理实验 +

    2.2.1 创建表 +

    使用以下语句,在数据库human_resource_space,创建人力资源库的8个基本表。 +

    CREATE TABLE table_name +

    ( col1 datatype constraint, +

    col2 datatype constraint, +

    … +

    coln datatype constraint ); +


    我们为了完成人力资源管理系统,创建雇佣历史表 employment_history 、部门表 sections、创建工作地点表 places、创建区域表 areas 、大学表 college、雇佣表 employments 、国家及地区表 states 、员工表 staffs这8个基本表。 +

    以员工表为例: +


    2.2.2 删除表 +

    如有需要,可以使用

    DROP TABLE sections;

    或 

     DROP TABLE sections CASCADE ;

    语句删除表。 +

    2.3 数据初始化 +

    2.3.1 初始化数据表 +

    我们这里方便操作,根据给定的txt文件初始化数据表,如果不嫌麻烦,也可以使用insert语句一条一条地插入。这两种方法本质上是一样的。 +

    使用 

     INSERT INTO table_name \i /a.sql 

     语句初始化数据表(其中, a.sql是指定路径,执行给定的SQL脚本 ) +

    使用 

    SELECT * from table_name; 

     语句查看数据表信息。 +

    以雇佣表 employments为例: +



    +

    三、数据库应用程序开发 +

    常见的数据库应用程序开发步骤为: +

    (1) 加载驱动 +

    (2) 连接数据库 +

    (3) 执行SQL语句 +

    (4) 处理结果集 +

    (5) 关闭连接 +

    我们根据这5个步骤,实现人力资源管理系统。 +

    3.1 项目框架 +


    3.1.1 BLL +

    业务逻辑层,实现各项操作模块与servlet的接口,对传送数据进行逻辑判断分折,并进行传送正确的值。 +


    3.1.2 Model +

    存放数据库表字段。在程序中,使用到的表有员工历史雇佣信息表、工作地点表、工作部门表、员工表。 +

    这些java文件主要作用是定义各表的set和get函数 +


    3.1.3 cn.UI +

    实现用户界面 +


    3.1.4 Dao +

    实现具体的对数据库的操作,其中包含具体操作的函数以及SQL语句 +


    3.1.4 Util +

    实现获得参数并传递参数以及连接数据库的功能 +


    3.1.4 webapp +

    存放.jsp代码,生成具体页面。其中WEB-INF中存放web.xml文件 +


    登录页面: +


    +

    HRmanager页面: +


    +

    manager页面: +


    +

    staff页面: +


    +

    6.2 修改表staffs +

    为了实现登录功能,我们需要在员工表staffs中增加一列password,为了方便起见,我们设置密码都为123456,当然也可以自己设置差异化的密码。 +

    ALTER TABLE staffs ADD password varchar2(20); +



    UPDATE staffs SET password = 123456; +



    设置hr登录账号为hr001,密码为hr001 +

    +

    6.3 加载驱动&连接数据库 +

    JDBC为JAVA中用来访问数据库的程序接口,我们使用JDBC连接。 +

    文件路径为: +


    源码: +

    package Util; +

    +

    import java.sql.Connection; +

    import java.sql.DriverManager; +

    import java.sql.PreparedStatement; +

    import java.sql.SQLException; +

    import java.sql.Statement; +

    +

    public class connect { //根据用户名与密码,进行数据库的连接以及关闭连接 +

    private static String DBDriver="org.postgresql.Driver"; +

    private static String url="jdbc:postgresql://121.36.79.196:26000/human_resource"; +

    private static String user="a"; +

    private static String password="aaa"; +

    static Connection con=null; +

    static Statement sta=null; +

    static PreparedStatement pst =null; +

    //创建数据库的连接 +

    public static Connection getConnection() +

    { +

    try { +

    Class.forName(DBDriver); +

    try { +

    con = DriverManager.getConnection(url, user, password); +

    return con; +

    } catch (SQLException e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    } catch (ClassNotFoundException e1) { +

    // TODO Auto-generated catch block +

    e1.printStackTrace(); +

    } +

    +

    return null; +

    } +

    +

    public static Statement createStatement() +

    { +

    try { +

    sta=getConnection().createStatement(); +

    return sta; +

    } catch (SQLException e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    return null; +

    } +

    +

    //创造预处理对象 +

    public static PreparedStatement createPreparedStatement(String sql) +

    { +

    try { +

    pst = getConnection().prepareStatement(sql); +

    return pst; +

    } catch (SQLException e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    return pst; +

    } +

    +

    //关闭所有打开的资源 +

    public static void closeOperation() +

    { +

    if(pst ==null) +

    { +

    try { +

    pst.close(); +

    } catch (SQLException e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    } +

    if(sta==null) +

    { +

    try { +

    sta.close(); +

    } catch (SQLException e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    } +

    if(con==null) +

    { +

    try { +

    con.close(); +

    } catch (SQLException e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    } +

    +

    } +

    +

    } +


    +

    6.4 实现具体功能 +

    文件路径: +


    +

    完整源码: +

    package Dao; +

    +

    import java.sql.ResultSet; +

    import java.sql.SQLException; +

    import java.util.ArrayList; +

    import java.util.List; +

    +

    import Model.*; +

    import Util.getInformation; +

    +

    public class operate { +

    +

    //********************************登录************************************** +

    //实现登录操作,登录成功返回true +

    public String login(String staff_id,String password){ +

    if(staff_id.equals("hr001")){ +

    if (password.equals("hr001")){ +

    return staff_id; +

    }else { +

    return null; +

    } +

    +

    }else { +

    String sql="select staff_id,password from staffs "; +

    ResultSet rs=Util.getInformation.executeQuery(sql); +

    try { +

    while(rs.next()){ //用户输入的账号密码和数据库中的信息做比较,判断输入是否正确; +

    Integer id = rs.getInt("staff_id"); +

    String pwd = rs.getString("password"); +

    if(id.equals(new Integer(staff_id)) && pwd.equals(password)){ +

    return staff_id; +

    } +

    } +

    rs.close(); +

    } catch (Exception e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    } +

    return null; +

    } +

    +

    //判断该员工是否为部门经理,返回部门编号 +

    public String isManager(String staff_id){ +

    +

    String sql="select section_id,manager_id from sections"; +

    ResultSet rs=Util.getInformation.executeQuery(sql); +

    try { +

    while(rs.next()){ //用户输入的账号密码和数据库中的信息做比较,判断输入是否正确; +

    Integer id = rs.getInt("manager_id"); +

    String section_id = rs.getString("section_id"); +

    if(id.equals(new Integer(staff_id))){ +

    return section_id; +

    } +

    } +

    rs.close(); +

    } catch (Exception e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    return "null"; +

    } +

    +

    +

    +

    +

    +

    //**********************************员工操作*********************************** +

    +

    //修改电话号码 +

    public void updatePhoneNumber(String phone_number,String staff_id){ +

    String sql = "update staffs set phone_number=? where staff_id=? "; +

    Util.getInformation.executeUpdate(sql, phone_number, new Integer(staff_id)); +

    } +

    +

    +

    //**********************************部门经理********************************** +

    //查询部门所有员工信息(按员工编号升序排列) +

    public List QuerySectionStaffsOrderByStaffId(Integer section_id) +

    { +

    List list=new ArrayList(); //最终返回整个list集合 +

    String sql="select * from staffs where section_id=? order by staff_id asc"; +

    ResultSet rs=getInformation.executeQuery(sql,section_id); +

    try { +

    while(rs.next()) +

    { +

    //保存取出来的每一条记录 +

    staffs staff =new staffs(); +

    staff.setStaff_id(rs.getInt("staff_id")); +

    staff.setFirst_name(rs.getString("first_name")); +

    staff.setLast_name(rs.getString("last_name")); +

    staff.setEmail(rs.getString("email")); +

    staff.setPhone_number(rs.getString("phone_number")); +

    staff.setHire_date(rs.getDate("hire_date")); +

    staff.setEmployment_id(rs.getString("employment_id")); +

    staff.setSalary(rs.getInt("salary")); +

    staff.setCommission_pct(rs.getInt("commission_pct")); +

    staff.setManager_id(rs.getInt("manager_id")); +

    staff.setSection_id(rs.getInt("section_id")); +

    staff.setGraduated_name(rs.getString("graduated_name")); +

    staff.setPassword(rs.getString("password")); +

    list.add(staff); +

    } +

    } catch (SQLException e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    return list; +

    } +

    +

    //查询部门所有员工信息(按工资降序排列) +

    public List QuerySectionStaffsOrderBySalary(Integer section_id) +

    { +

    List list=new ArrayList(); //最终返回整个list集合 +

    String sql="select * from staffs where section_id=? order by salary desc"; +

    ResultSet rs=getInformation.executeQuery(sql,section_id); +

    try { +

    while(rs.next()) +

    { +

    //保存取出来的每一条记录 +

    staffs staff =new staffs(); +

    staff.setStaff_id(rs.getInt("staff_id")); +

    staff.setFirst_name(rs.getString("first_name")); +

    staff.setLast_name(rs.getString("last_name")); +

    staff.setEmail(rs.getString("email")); +

    staff.setPhone_number(rs.getString("phone_number")); +

    staff.setHire_date(rs.getDate("hire_date")); +

    staff.setEmployment_id(rs.getString("employment_id")); +

    staff.setSalary(rs.getInt("salary")); +

    staff.setCommission_pct(rs.getInt("commission_pct")); +

    staff.setManager_id(rs.getInt("manager_id")); +

    staff.setSection_id(rs.getInt("section_id")); +

    staff.setGraduated_name(rs.getString("graduated_name")); +

    staff.setPassword(rs.getString("password")); +

    list.add(staff); +

    } +

    } catch (SQLException e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    return list; +

    } +

    +

    //根据员工号查询部门内员工,然后返回该员工信息 +

    public staffs QuerySectionStaffByStaff_id(Integer staff_id,Integer section_id) +

    { +

    staffs staff =new staffs(); +

    String sql="select * from staffs where staff_id=? and section_id=?"; +

    ResultSet rs=getInformation.executeQuery(sql, staff_id,section_id); +

    try { +

    if(rs.next()) +

    { +

    staff.setStaff_id(rs.getInt("staff_id")); +

    staff.setFirst_name(rs.getString("first_name")); +

    staff.setLast_name(rs.getString("last_name")); +

    staff.setEmail(rs.getString("email")); +

    staff.setPhone_number(rs.getString("phone_number")); +

    staff.setHire_date(rs.getDate("hire_date")); +

    staff.setEmployment_id(rs.getString("employment_id")); +

    staff.setSalary(rs.getInt("salary")); +

    staff.setCommission_pct(rs.getInt("commission_pct")); +

    staff.setManager_id(rs.getInt("manager_id")); +

    staff.setSection_id(rs.getInt("section_id")); +

    staff.setGraduated_name(rs.getString("graduated_name")); +

    staff.setPassword(rs.getString("password")); +

    } +

    } catch (NumberFormatException | SQLException e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    return staff; +

    } +

    +

    //根据员工姓名查询部门内员工,然后返回该员工信息 +

    public staffs QuerySectionStaffByFirstName(String first_name,Integer section_id) +

    { +

    staffs staff =new staffs(); +

    String sql="select * from staffs where first_name=? and section_id=?"; +

    ResultSet rs=getInformation.executeQuery(sql, first_name,section_id); +

    try { +

    if(rs.next()) +

    { +

    staff.setStaff_id(rs.getInt("staff_id")); +

    staff.setFirst_name(rs.getString("first_name")); +

    staff.setLast_name(rs.getString("last_name")); +

    staff.setEmail(rs.getString("email")); +

    staff.setPhone_number(rs.getString("phone_number")); +

    staff.setHire_date(rs.getDate("hire_date")); +

    staff.setEmployment_id(rs.getString("employment_id")); +

    staff.setSalary(rs.getInt("salary")); +

    staff.setCommission_pct(rs.getInt("commission_pct")); +

    staff.setManager_id(rs.getInt("manager_id")); +

    staff.setSection_id(rs.getInt("section_id")); +

    staff.setGraduated_name(rs.getString("graduated_name")); +

    staff.setPassword(rs.getString("password")); +

    } +

    } catch (NumberFormatException | SQLException e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    return staff; +

    } +

    +

    +

    +

    +

    public List SectionStatistics(String section_id) +

    { +

    ArrayList list =new ArrayList(); // 初始化 +

    String sql="select avg(salary),min(salary),max(salary) from staffs where section_id = ?;"; +

    ResultSet rs=getInformation.executeQuery(sql,section_id); +

    try { +

    while(rs.next()) +

    { +

    //保存取出来的每一条记录 +

    list.add(rs.getInt("avg")); +

    list.add(rs.getInt("max")); +

    list.add(rs.getInt("min")); +

    } +

    } catch (SQLException e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    return list; +

    } +

    +

    +

    +

    +

    //******************************人事经理操作***************************************** +

    //根据员工号查询员工,然后返回该员工信息 +

    public staffs QueryStaffByStaff_id(Integer staff_id) +

    { +

    staffs staff =new staffs(); +

    String sql="select * from staffs where staff_id=?"; +

    ResultSet rs=getInformation.executeQuery(sql, staff_id); +

    try { +

    if(rs.next()) +

    { +

    staff.setStaff_id(rs.getInt("staff_id")); +

    staff.setFirst_name(rs.getString("first_name")); +

    staff.setLast_name(rs.getString("last_name")); +

    staff.setEmail(rs.getString("email")); +

    staff.setPhone_number(rs.getString("phone_number")); +

    staff.setHire_date(rs.getDate("hire_date")); +

    staff.setEmployment_id(rs.getString("employment_id")); +

    staff.setSalary(rs.getInt("salary")); +

    staff.setCommission_pct(rs.getInt("commission_pct")); +

    staff.setManager_id(rs.getInt("manager_id")); +

    staff.setSection_id(rs.getInt("section_id")); +

    staff.setGraduated_name(rs.getString("graduated_name")); +

    staff.setPassword(rs.getString("password")); +

    } +

    } catch (NumberFormatException | SQLException e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    return staff; +

    } +

    +

    //根据员工姓名查询员工,然后返回该员工信息 +

    public staffs QueryStaffByFirstName(String first_name) +

    { +

    staffs staff =new staffs(); +

    String sql="select * from staffs where first_name=?"; +

    ResultSet rs=getInformation.executeQuery(sql, first_name); +

    try { +

    if(rs.next()) +

    { +

    staff.setStaff_id(rs.getInt("staff_id")); +

    staff.setFirst_name(rs.getString("first_name")); +

    staff.setLast_name(rs.getString("last_name")); +

    staff.setEmail(rs.getString("email")); +

    staff.setPhone_number(rs.getString("phone_number")); +

    staff.setHire_date(rs.getDate("hire_date")); +

    staff.setEmployment_id(rs.getString("employment_id")); +

    staff.setSalary(rs.getInt("salary")); +

    staff.setCommission_pct(rs.getInt("commission_pct")); +

    staff.setManager_id(rs.getInt("manager_id")); +

    staff.setSection_id(rs.getInt("section_id")); +

    staff.setGraduated_name(rs.getString("graduated_name")); +

    staff.setPassword(rs.getString("password")); +

    } +

    } catch (NumberFormatException | SQLException e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    return staff; +

    } +

    +

    //查询所有员工信息(按员工编号升序排列) +

    public List QueryAllStaffsOrderByStaffId() +

    { +

    List list=new ArrayList(); //最终返回整个list集合 +

    String sql="select * from staffs order by staff_id asc"; +

    ResultSet rs=getInformation.executeQuery(sql); +

    try { +

    while(rs.next()) +

    { +

    //保存取出来的每一条记录 +

    staffs staff =new staffs(); +

    staff.setStaff_id(rs.getInt("staff_id")); +

    staff.setFirst_name(rs.getString("first_name")); +

    staff.setLast_name(rs.getString("last_name")); +

    staff.setEmail(rs.getString("email")); +

    staff.setPhone_number(rs.getString("phone_number")); +

    staff.setHire_date(rs.getDate("hire_date")); +

    staff.setEmployment_id(rs.getString("employment_id")); +

    staff.setSalary(rs.getInt("salary")); +

    staff.setCommission_pct(rs.getInt("commission_pct")); +

    staff.setManager_id(rs.getInt("manager_id")); +

    staff.setSection_id(rs.getInt("section_id")); +

    staff.setGraduated_name(rs.getString("graduated_name")); +

    staff.setPassword(rs.getString("password")); +

    list.add(staff); +

    } +

    } catch (SQLException e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    return list; +

    } +

    +

    //查询所有员工信息(按工资降序排列) +

    public List QueryAllStaffsOrderBySalary() +

    { +

    List list=new ArrayList(); //最终返回整个list集合 +

    String sql="select * from staffs order by salary desc"; +

    ResultSet rs=getInformation.executeQuery(sql); +

    try { +

    while(rs.next()) +

    { +

    //保存取出来的每一条记录 +

    staffs staff =new staffs(); +

    staff.setStaff_id(rs.getInt("staff_id")); +

    staff.setFirst_name(rs.getString("first_name")); +

    staff.setLast_name(rs.getString("last_name")); +

    staff.setEmail(rs.getString("email")); +

    staff.setPhone_number(rs.getString("phone_number")); +

    staff.setHire_date(rs.getDate("hire_date")); +

    staff.setEmployment_id(rs.getString("employment_id")); +

    staff.setSalary(rs.getInt("salary")); +

    staff.setCommission_pct(rs.getInt("commission_pct")); +

    staff.setManager_id(rs.getInt("manager_id")); +

    staff.setSection_id(rs.getInt("section_id")); +

    staff.setGraduated_name(rs.getString("graduated_name")); +

    staff.setPassword(rs.getString("password")); +

    list.add(staff); +

    } +

    } catch (SQLException e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    return list; +

    } +

    +

    public List statistics( ) +

    { +

    ArrayList list =new ArrayList(); // 初始化 +

    String sql="select avg(salary),min(salary),max(salary),section_id from staffs group by section_id;"; +

    ResultSet rs=getInformation.executeQuery(sql); +

    try { +

    while(rs.next()) +

    { +

    //保存取出来的每一条记录 +

    list.add(rs.getInt("section_id")); +

    list.add(rs.getInt("avg")); +

    list.add(rs.getInt("max")); +

    list.add(rs.getInt("min")); +

    } +

    } catch (SQLException e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    return list; +

    } +

    +

    //查询所有部门信息 +

    public List QuerySectionOrderBySectionId() +

    { +

    List list=new ArrayList(); //最终返回整个list集合 +

    String sql="select * from sections order by section_id asc"; +

    ResultSet rs=getInformation.executeQuery(sql); +

    try { +

    while(rs.next()) +

    { +

    //保存取出来的每一条记录 +

    sections sections =new sections(); +

    sections.setSection_id(rs.getInt("section_id")); +

    sections.setSection_name(rs.getString("section_name")); +

    sections.setManager_id(rs.getInt("manager_id")); +

    sections.setPlace_id(rs.getInt("place_id")); +

    list.add(sections); +

    } +

    } catch (SQLException e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    return list; +

    } +

    +

    //查询所有工作地点信息 +

    public List QueryPlaces() +

    { +

    List list=new ArrayList(); //最终返回整个list集合 +

    String sql="select * from places"; +

    ResultSet rs=getInformation.executeQuery(sql); +

    try { +

    while(rs.next()) +

    { +

    //保存取出来的每一条记录 +

    places places = new places(); +

    places.setPlace_id(rs.getInt("place_id")); +

    places.setStreet_address(rs.getString("street_address")); +

    places.setPostal_code(rs.getString("postal_code")); +

    places.setCity(rs.getString("city")); +

    places.setState_province(rs.getString("state_province")); +

    places.setState_id(rs.getString("state_id")); +

    list.add(places); +

    } +

    } catch (SQLException e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    return list; +

    } +

    +

    //修改部门名称 +

    public void updateSectionName(String section_name,Integer section_id){ +

    String sql = "update sections set section_name=? where section_id=? "; +

    Util.getInformation.executeUpdate(sql, section_name, section_id); +

    } +

    +

    //实现添加新工作地点 +

    public void addPlace(places place) +

    { +

    String sql="insert into places (place_id, street_address, postal_code, city, state_province,state_id) values (?,?,?,?,?,?)"; +

    Util.getInformation.executeUpdate(sql, place.getPlace_id(),place.getStreet_address(), place.getPostal_code(), place.getCity(), place.getState_province(), place.getState_id()); +

    } +

    +

    // 查询员工工作信息 +

    public List QueryStaffEmployment(String staff_id) +

    { +

    List list=new ArrayList(); //最终返回整个list集合 +

    String sql="SELECT staff_id,employment_id,section_id\n" + +

    "FROM staffs\n" + +

    "WHERE staff_id = ?\n" + +

    "\n" + +

    "UNION\n" + +

    "\n" + +

    "SELECT staff_id,employment_id,section_id\n" + +

    "FROM employment_history\n" + +

    "WHERE staff_id = ?;"; +

    Integer id = new Integer(staff_id); +

    ResultSet rs=getInformation.executeQuery(sql,id,id); +

    try { +

    while(rs.next()) +

    { +

    //保存取出来的每一条记录 +

    list.add(rs.getString("staff_id")); +

    list.add(rs.getString("employment_id")); +

    list.add(rs.getString("section_id")); +

    } +

    } catch (SQLException e) { +

    // TODO Auto-generated catch block +

    e.printStackTrace(); +

    } +

    return list; +

    } +

    +

    } +


    四、结果展示 +

    运行login.jsp进入登录界面 +

    4.1 以员工身份登录 +

    1)输入staff_id 和 正确的密码,进入员工主页面; +

    输入staff_id=104,密码123456,进入员工页面 +


    +

    2)在员工主页面,可以选择查看员工自己基本信息; +


    3)在员工主页面,修改员工自己的电话号码; +

    选择修改电话号码,填入590.423.4567 +


    可以重新查询,电话号码改变 +


    +

    4.2 以部门经理身份登录 +

    1)输入staff_id 和 正确的密码,进入部门经理主页面; +

    输入staff_id=103,密码123456,进入经理页面 +


    +

    2)在部门经理主页面,可以查看本部门所有员工基本信息(选择按员工编号升序排列,或者按工资降序排列); +

    查看本部门所有员工基本信息: +

    按员工编号升序排列: +


    按工资降序排列: +


    +

    3)在部门经理主页面,可以按员工编号查询员工基本信息; +

    +



    +

    4)在部门经理主页面,可以按员工姓名查询员工基本信息; +



    +

    5)在部门经理主页面,可以统计查询本部门员工最高工资,最低工资以及平均工资; +


    +

    4.3 以人事经理身份登录 +

    1)输入特定编号hr001 和 特定密码,进入人事经理主页面; +

    输入staff_id=hr001,密码hr001,进人事经理主页面 +


    +

    2)在人事经理主页面,可以查看所有员工基本信息(选择按员工编号升序排列,或者按工资降序排列); +

    按员工编号升序排列: +


    +

    按工资降序排列: +


    +

    3)在人事经理主页面,可以按员工编号查询员工基本信息; +



    +

    4)在人事经理主页面,可以按员工姓名查询员工基本信息; +



    +

    5)在人事经理主页面,可以统计各部门员工最高工资,最低工资以及平均工资; +


    +

    6)在人事经理主页面,可以查询各部门基本信息,并可以根据部门编号修改部门名称; +


    修改名称: +




    +

    7)在人事经理主页面,可以各工作地点基本信息,并可以增加新的工作地点; +




    +

    8)在人事经理主页面,可以按员工编号查询员工工作信息,包括其历史工作信息,返回员工编号,职位编号和部门编号; +



    +

    五、可能遇到的问题 +

    5.1 Java开发工具不同:IntelliJ IDEA V.S. Eclipse +

    笔者一开始使用过Eclipse,但是在后期转而使用IntelliJ IDEA,这是因为Eclipse有一些缺陷,比如报错不明显,这对于初学者而言很可能是致命的。IntelliJ IDEA的优势之一是能在右侧Database处直接连接openGauss数据库(需选择PostgreSQL数据库)。而需要注意IntelliJ IDEA只能免费试用一个月。 +


    +

    5.2 连接openGauss数据库报错 +

    第一步连接数据库时,Eclipse出现以下报错,但是它并没有指明究竟是哪里出错。一般出现如下错误,是因为连接openGauss数据库失败,原因可能为以下几点: +


    (1)url使用错误 +

    这里121.36.79.196为弹性公网ip,26000为端口号,human_resource为数据库名称。如果url错误,则会导致数据库无法连接。 +

    url="jdbc:postgresql://121.36.79.196:26000/human_resource"; +

    (2)数据库用户或者密码错误 +

    数据库用户或密码错误也会导致连接出错。所以必须牢记用户名及密码,否则容易使用错误。 +

    private static String user="a"; +

    private static String password="aaa"; +


    (3)java版本错误 +

    openGauss适用于java的版本为1.8,其他版本可能会报错。 +

    (4)调包出错 +

    +

    连接数据库需要调用postgresql.jar包,建议提前配置jar包到项目中。

    + + + + diff --git "a/content/zh/post/zhengwen2/\345\237\272\344\272\216\345\215\216\344\270\272\344\272\221\351\262\262\351\271\217\345\274\271\346\200\247\344\272\221\346\234\215\345\212\241\345\231\250\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223-\345\256\236\351\252\214.md" "b/content/zh/post/zhengwen2/\345\237\272\344\272\216\345\215\216\344\270\272\344\272\221\351\262\262\351\271\217\345\274\271\346\200\247\344\272\221\346\234\215\345\212\241\345\231\250\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223-\345\256\236\351\252\214.md" new file mode 100644 index 0000000000000000000000000000000000000000..5a54f1bf3bd08b668fc356cb975c04436040313d --- /dev/null +++ "b/content/zh/post/zhengwen2/\345\237\272\344\272\216\345\215\216\344\270\272\344\272\221\351\262\262\351\271\217\345\274\271\346\200\247\344\272\221\346\234\215\345\212\241\345\231\250\351\203\250\347\275\262openGauss\346\225\260\346\215\256\345\272\223-\345\256\236\351\252\214.md" @@ -0,0 +1,137 @@ ++++ + +title = "基于华为云鲲鹏弹性云服务器部署openGauss数据库-实验" + +date = "2021-07-10" + +tags = ["基于华为云鲲鹏弹性云服务器部署openGauss数据库-实验"] + +archives = "2021-07" + +author = "许玉冲" + +summary = "基于华为云鲲鹏弹性云服务器部署openGauss数据库-实验" + +img = "/zh/post/zhengwen2/img/img38.png" + +times = "12:30" + ++++ + +# 基于华为云鲲鹏弹性云服务器部署openGauss数据库-实验 + +## 实验目标与基本要求 + +指导用户基于华为云鲲鹏云服务器,部署openGauss单机数据库。通过本实验,您能够: + + 1. 使用openGauss的om工具成功安装openGauss单机数据库。 + 2. 登录到openGauss数据库进行简单的增删改查操作。 + +## 实验步骤: +### 1. 购买鲲鹏云主机 +购买弹性云服务器ECS选型时候,CPU架构需要选择鲲鹏计算。 +操作系统选择:openEuler – openEuler 20.03 64bit with ARM(40GB) +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210706164107342.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +### 2. 登录云主机,下载openGauss镜像并解压。 +```powershell +[root@ecs-opengauss ~]# cd /opt +[root@ecs-opengauss opt]# mkdir /opt/gauss +[root@ecs-opengauss opt]# cd /opt/gauss +[root@ecs-opengauss opt]#wget https://sandbox-experiment-resource-north-4.obs.cn-north-4.myhuaweicloud.com/opengauss-install/openGauss-1.1.0-openEuler-64bit-all.tar.gz +``` +解压完整镜像,解压完整镜像: +```powershell +tar -zxvf openGauss-1.1.0-openEuler-64bit-all.tar.gz +tar -zxvf openGauss-1.1.0-openEuler-64bit-om.tar.gz +``` +### 3. 创建集群的xml配置文件 +```powershell +cd /opt/gauss +vi clusterconfig.xml +``` +修改地点标红 +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210706164202312.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +![在这里插入图片描述](https://img-blog.csdnimg.cn/2021070616422744.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +### 4. 安装数据库 +```powershell +chmod -R 755 /opt/gauss +chmod -R 755 /opt/gauss/script +``` +*** +**说明** +对于openEuler系统,需要修改系统的performance.sh文件中min_free_kbytes的配置。 +```powershell +vi /etc/profile.d/performance.sh +``` +注释掉15行: +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210706164306554.png#pic_center) +*** +1. 重新设置min_free_kbytes: +```powershell +/sbin/sysctl -w vm.min_free_kbytes=767846 +``` +2. 安装依赖包: +```powershell +yum install libaio libaio-devel -y +``` +3. 预安装gs_preinstall +```powershell +cd /opt/gauss/script +./gs_preinstall -U omm -G dbgrp -X /opt/gauss/clusterconfig.xml +``` +成功如下图所示: +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210706164344281.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +### 5. 安装gs_install +```powershell +su - omm +gs_install -X /opt/gauss/clusterconfig.xml +``` +成功如下图所示: +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210706164422906.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +### 6. 安装完成 +1. 检查数据库状态 +```powershell +gs_om -t status --detail +``` +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210706164500718.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +2. 使用gsql命令登录主机数据库 +```powershell +gsql -d postgres -p 15400 -r +``` +![在这里插入图片描述](https://img-blog.csdnimg.cn/20210706164535169.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L0dhdXNzREI=,size_16,color_FFFFFF,t_70#pic_center) +3. 创建数据库和表,增删改查测试: + +```sql +postgres=# create database mydb; +CREATE DATABASE +postgres=# c mydb +Non-SSL connection (SSL connection is recommended when requiring high-security)You are now connected to database "mydb" as user "omm". +mydb=# create table stu(id int, name varchar, age int); +CREATE TABLE +mydb=# d +List of relations Schema | Name | Type | Owner | Storage +--------+------+-------+-------+---------------------------------- +public | stu | table | omm | {orientation=row,compression=no} +(1 row) +mydb=# d stu +Table "public.stu" +Column | Type | Modifiers +--------+-------------------+----------- id | integer | +name | character varying | +age | integer | +mydb=# insert into stu values(1,'xiaoming', 18); + +INSERT 0 1mydb=# insert into stu values(2,'lihua', 24); +INSERT 0 1 +mydb=# select * from stu; +id | name | age + ----+----------+----- +1 | xiaoming | 18 +2 | lihua | 24 +(2 rows) +mydb=# update stu set age=20 where name='lihua'; +UPDATE 1 +mydb=# delete from stu where id=1; +DELETE 1 +mydb=# +``` diff --git "a/content/zh/post/zhengwen2/\350\277\207\347\250\213\345\214\226SQL\344\273\245\345\217\212openGauss\345\255\230\345\202\250\350\277\207\347\250\213\343\200\201\345\207\275\346\225\260\343\200\201\350\247\246\345\217\221\345\231\250.md" "b/content/zh/post/zhengwen2/\350\277\207\347\250\213\345\214\226SQL\344\273\245\345\217\212openGauss\345\255\230\345\202\250\350\277\207\347\250\213\343\200\201\345\207\275\346\225\260\343\200\201\350\247\246\345\217\221\345\231\250.md" new file mode 100644 index 0000000000000000000000000000000000000000..85c058306ae72e313dd670a63d9c13adfc84642c --- /dev/null +++ "b/content/zh/post/zhengwen2/\350\277\207\347\250\213\345\214\226SQL\344\273\245\345\217\212openGauss\345\255\230\345\202\250\350\277\207\347\250\213\343\200\201\345\207\275\346\225\260\343\200\201\350\247\246\345\217\221\345\231\250.md" @@ -0,0 +1,350 @@ ++++ + +title = "过程化SQL以及openGauss存储过程、函数、触发器" + +date = "2021-07-09" + +tags = ["过程化SQL以及openGauss存储过程、函数、触发器"] + +archives = "2021-07" + +author = "CR7" + +summary = "过程化SQL以及openGauss存储过程、函数、触发器" + +img = "/zh/post/zhengwen2/img/img33.png" + +times = "12:30" + ++++ + +# 过程化SQL以及openGauss存储过程、函数、触发器 + +# 一、 过程化SQL + +基本的SQL是高度非过程化的语言。嵌入式SQL将SQL语句嵌入程序设计语言,借助高级语言的控制功能实现过程化。过程化SQL是对SQL的扩展,使其增加了过程化语句功能。 + +简单来说,从标准SQL语句到嵌入式SQL再到过程化SQL,就是使SQL功能不断增强的过程。标准SQL语句相当于是可以操纵数据库的一些“武器”,嵌入式SQL相当于给程序设计语言(如java,C语言等)装备了这些“武器”,而过程化SQL相当于给这些“武器”予以血肉,通过自身带有的流程控制语句操纵数据库。 + +过程化SQL程序的基本结构是块。所有的过程化SQL程序都是由块组成的。这些块之间可以相互嵌套,每个块完成一个逻辑操作。学会用过程化SQL书写存储过程和触发器就会像写C语言代码一样随心所欲。 + +## 1、变量和常量的定义 +# (1)变量定义 +变量名 数据类型 [[NOT NULL] :=初值表达式] 或 +变量名 数据类型 [[NOT NULL] 初值表达式] + +## (2)常量定义 +常量名 数据类型 CONSTANT:=常量表达式 + +【注】常量必须要给一个值,并且该值在存在期间或常量的作用域内不能改变。如果试图修改它,过程化SQL将返回一个异常。 + +## (3)赋值语句 +变量名 :=表达式 + +## 2、选择控制语句 +## (1)IF语句 + +``` +IF condition THEN + Sequence_of_statements; /*条件为真时才执行该条语句*/ +END IF /*条件为假或NULL时什么也不做,控制转移至下一个语句*/ +``` + +## (2)IF-ELSE语句 + +``` +IF condition THEN + Sequence_of_statements1; /*条件为真时语句序列才被执行*/ +ELSE + Sequence_of_statements2; /*条件为假或NULL时才被执行*/ +END IF +``` + +## (3)嵌套的IF语句 +在THEN和ELSE子句中还可以包含IF语句,IF语句可以嵌套,如: + +``` +IF condition THEN + IF condition THEN + Sequence_of_statements1; + END IF +ELSE + IF condition THEN + Sequence_of_statements2; + ELSE + Sequence_of_statements3; + END IF +END IF +``` + +## 3、循环控制语句 +### (1)LOOP循环语句 + +``` +LOOP + Sequence_of_statements; /*循环体,一组过程化SQL语句*/ +END LOOP; +``` + +该循环必须要结合EXIT使用,否则将陷入死循环。 + +举例: + +``` +CREATE OR REPLACE PROCEDURE proc_loop(IN i integer, OUT count integer) +IS BEGIN +count:=0; +LOOP + IF count > i THEN + raise info 'count is %. ', count; + EXIT; + ELSE + count:=count+1; + END IF; +END LOOP; +END; +/ + +CALL proc_loop(10,5); +``` + +### (2)WHERE-LOOP循环语句 + +``` +WHERE condition LOOP + Sequence_of_statements; /*条件为真时执行循环体内的语句序列*/ +END LOOP; +``` + +每次执行循环体语句之前首先对条件进行求值,如果条件为真则执行循环体内的语句序列,如果条件为假则跳过循环并把控制传递给下一个语句。 + +举例: + +``` +CREATE OR REPLACE PROCEDURE proc_while_loop(IN maxval integer) +IS DECLARE +i int :=1; +BEGIN +WHILE i < maxval LOOP + INSERT INTO integertable VALUES(i); + i:=i+1; +END LOOP; +END; +/ +``` + +### (3)FOR-LOOP循环语句 + +``` +FOR count IN [REVERSE] bound1...bound2 LOOP + Sequence_of_statements; +END LOOP; +``` + +将count设置为循环的下界bound1,检查它是否小于上界bound2。当指定REVERSE时则将count设置为循环的上界bound2,检查count是否大于下界bound1。如果越界则执行跳出循环,否则执行循环体,然后按照步长(+1或-1)更新count的值,重新判断条件。 有点像python中的for…in range(i,j)循环。 + +# 二、openGauss下的存储过程、函数、触发器书写 +过程化SQL块主要有命名块和匿名块。匿名块每次执行时都要进行编译,它不能被存储到数据库中,也不能在其他过程化SQL块中调用。过程和函数是命名块,他们被编译后保存在数据库中,称为持久性存储模块(PSM),可以 被反复调用,运行速度较快。 + +## 1、存储过程 +存储过程是由过程化SQL语句书写的过程,这个过程经编译和优化后存储在数据库服务器中,因此称它为存储过程,使用时只要调用即可。 + +### (1)存储过程的优点 +1)由于存储过程不像解释执行的SQL语句那样在提出操作请求时才进行语法分析和优化工作,因而运行效率高,它提供了在服务器端快速执行SQL语句的有效途径。 +2)存储过程降低了客户机和服务器之间的通信量。客户机上的应用程序只要通过网络向服务器发出调用存储过程的名字和参数,就可以让关系数据库管理系统执行其中的多条SQL语句并进行数据处理,只有最终的处理结果才返回客户端。 +3)方便实施企业规则。可以把企业规则的运算程序写成存储过程放入数据库服务器中,由关系数据库管理系统管理,既有利于集中控制,又能够方便地进行维护。当企业规则发生变化时只要修改存储过程即可,无需修改其他应用程序。 + +### (2)创建存储过程 + +``` +CREATE OR REPLACE PROCEDURE 存储过程名 ( [参数1模式 参数1名字 参数1类型] ,..) +IS +BEGIN + <标准SQL或过程化SQL> +END; +/ +(不要忘了最后一行的斜杠) + +调用存储过程: + +CALL 存储过程名(参数1,..); +``` + +举例: + +``` +CREATE OR REPLACE PROCEDURE update_num ( IN daily_id CHAR(4) ) +IS +BEGIN + UPDATE 代理商 + SET 代理客户数 = + (SELECT count(*) FROM 客户 WHERE 代理商编号=daily_id GROUP BY 代理商编号 ) + WHERE 代理商编号=daily_id; +END; +/ + +CALL update_num('01'); +``` + +### (3)修改存储过程 + +``` +ALTER PROCEDURE 过程名1 RENAME TO 过程名2;(重新命名一个存储过程) +ALTER PROCEDURE 过程名 COMPILEL;(重新编译一个存储过程) +``` + +### (4)删除存储过程 + +``` +DROP PROCEDURE 过程名(); +``` + +## 2、函数以及触发器 +函数必须指定返回的类型。openGauss中的触发器要先定义一个触发器函数,再利用这个函数,定义触发器。 + +### (1)创建函数及触发器 +创建触发器函数: + + +``` +CREATE OR REPLACE FUNCTION 触发器名称() +RETURNS TRIGGER AS $$ DECLARE +BEGIN + <标准SQL或过程化SQL> +RETURN ; +END; +$$ LANGUAGE PLPGSQL; +``` + +创建触发器,其中调用上面创建的触发器函数: + + +``` +CREATE TRIGGER 触发器名称 + ON 表名 + +EXECUTE PROCEDURE 触发器函数名(); +``` + +BEFORE:触发器函数是在触发事件发生前执行。 + +AFTER:触发器函数是在触发事件发生后执行,约束触发器只能指定为AFTER。 + +INSTEAD OF:触发器函数直接替代触发事件。 + +FOR EACH ROW:指该触发器是受触发事件影响的每一行触发一次。 + +FOR EACH STATEMENT:指该触发器是每个SQL语句只触发一次。 + +(未指定时默认值为FOR EACH STATEMENT。约束触发器只能指定为FOR EACH ROW ) + +举例: + +``` +CREATE OR REPLACE FUNCTION tri_insert_订货项目_func() +RETURNS TRIGGER AS $$ DECLARE +BEGIN +UPDATE 产品 SET 产品订单数=产品订单数+1,库存量=库存量-NEW.订购数量 WHERE 产品编号=NEW.产品编号; +RETURN NEW; +END; +$$ LANGUAGE PLPGSQL; + + +CREATE TRIGGER after_insert_订货项目 +AFTER insert ON 订货项目 +FOR EACH ROW +EXECUTE PROCEDURE tri_insert_订货项目_func(); +``` + +### (2)善于用过程化SQL书写存储过程和函数 +(往赛程表中插入一条比赛信息,若比分1大于比分2,则球队1的总场数、总胜场+1,球队2的总场数+1…) + +C + +``` +REATE OR REPLACE FUNCTION insert_func2() +RETURNS TRIGGER AS $$ DECLARE +BEGIN +if new.比分1>new.比分2 then + update 球队 set 总场数=总场数+1,总胜场=总胜场+1 where 球队名称=new.球队1名称; + update 球队 set 总场数=总场数+1 where 球队名称=new.球队2名称; +else + if new.比分1new.比分2 then + update 球队 set 小组赛胜场=小组赛胜场+1,小组赛进球数=小组赛进球数+new.比分1,小组赛失球数=小组赛失球数+new.比分2,小组赛积分=小组赛积分+3 where 球队名称=new.球队1名称; + update 球队 set 小组赛负场=小组赛负场+1,小组赛进球数=小组赛进球数+new.比分2,小组赛失球数=小组赛失球数+new.比分1 where 球队名称=new.球队2名称; + else + if new.比分1Gauss松鼠会是汇集数据库爱好者和关注者的大本营, + +
    大家共同学习、探索、分享数据库前沿知识和技术,
    + +
    互助解决问题,共建数据库技术交流圈。
    + +
    +openGauss官网 +
    + + + diff --git a/content/zh/post/zhengxue/images/opengauss_compile/1.1.0.png b/content/zh/post/zhengxue/images/opengauss_compile/1.1.0.png new file mode 100644 index 0000000000000000000000000000000000000000..7927105a7ef6c66fa46a5c442c650de027da46a9 Binary files /dev/null and b/content/zh/post/zhengxue/images/opengauss_compile/1.1.0.png differ diff --git a/content/zh/post/zhengxue/images/opengauss_compile/1.1.1.png b/content/zh/post/zhengxue/images/opengauss_compile/1.1.1.png new file mode 100644 index 0000000000000000000000000000000000000000..d29ff117f31df416bd851e57a7ea7ba2bd6aee46 Binary files /dev/null and b/content/zh/post/zhengxue/images/opengauss_compile/1.1.1.png differ diff --git a/content/zh/post/zhengxue/images/opengauss_compile/1.1.2.png b/content/zh/post/zhengxue/images/opengauss_compile/1.1.2.png new file mode 100644 index 0000000000000000000000000000000000000000..754483dcf95b58a848ee0b765b3fe6e897534a23 Binary files /dev/null and b/content/zh/post/zhengxue/images/opengauss_compile/1.1.2.png differ diff --git a/content/zh/post/zhengxue/images/opengauss_compile/1.1.3.png b/content/zh/post/zhengxue/images/opengauss_compile/1.1.3.png new file mode 100644 index 0000000000000000000000000000000000000000..f8b21926df8bc95777e3ee35fe1c6b605b11a035 Binary files /dev/null and b/content/zh/post/zhengxue/images/opengauss_compile/1.1.3.png differ diff --git a/content/zh/post/zhengxue/images/opengauss_compile/1.1.4.png b/content/zh/post/zhengxue/images/opengauss_compile/1.1.4.png new file mode 100644 index 0000000000000000000000000000000000000000..fb862ee79a45c4f268861162802885bf0ca0bbe0 Binary files /dev/null and b/content/zh/post/zhengxue/images/opengauss_compile/1.1.4.png differ diff --git a/content/zh/post/zhengxue/images/opengauss_compile/1.1.5.png b/content/zh/post/zhengxue/images/opengauss_compile/1.1.5.png new file mode 100644 index 0000000000000000000000000000000000000000..28306c6bf0004deef740f963dbee0c642b3e769d Binary files /dev/null and b/content/zh/post/zhengxue/images/opengauss_compile/1.1.5.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.0.png b/content/zh/post/zhengxue/images/problem/1.1.0.png new file mode 100644 index 0000000000000000000000000000000000000000..60b1a7aee4a829513d76522ed59d1e40a9f223fa Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.0.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.1.png b/content/zh/post/zhengxue/images/problem/1.1.1.png new file mode 100644 index 0000000000000000000000000000000000000000..251a0038dea7056b18bfeb4684f1bbd482cc5449 Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.1.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.10.png b/content/zh/post/zhengxue/images/problem/1.1.10.png new file mode 100644 index 0000000000000000000000000000000000000000..427666652f5a8041523e72aa423ca9688f26d073 Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.10.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.11.png b/content/zh/post/zhengxue/images/problem/1.1.11.png new file mode 100644 index 0000000000000000000000000000000000000000..d272c435c8a98c4d3828e1ad3575efb0550ed033 Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.11.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.12.png b/content/zh/post/zhengxue/images/problem/1.1.12.png new file mode 100644 index 0000000000000000000000000000000000000000..62bf194e62a5196c54c1492f6e199026c593ab33 Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.12.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.13.png b/content/zh/post/zhengxue/images/problem/1.1.13.png new file mode 100644 index 0000000000000000000000000000000000000000..c3040933d4d5be3260c59b3205c47439bf605635 Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.13.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.14.png b/content/zh/post/zhengxue/images/problem/1.1.14.png new file mode 100644 index 0000000000000000000000000000000000000000..1c05e41e901e5ab6de0a0b4079a6289a0ab953d4 Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.14.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.15.png b/content/zh/post/zhengxue/images/problem/1.1.15.png new file mode 100644 index 0000000000000000000000000000000000000000..9443d67acfef75a0b387ec57478614f2a1ffac55 Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.15.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.16.png b/content/zh/post/zhengxue/images/problem/1.1.16.png new file mode 100644 index 0000000000000000000000000000000000000000..73bdcba3710ade52a40b191e43479acdaad85868 Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.16.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.17.png b/content/zh/post/zhengxue/images/problem/1.1.17.png new file mode 100644 index 0000000000000000000000000000000000000000..440e1dcbe92ad97f3e1afb0978674eb918ea79d2 Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.17.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.18.png b/content/zh/post/zhengxue/images/problem/1.1.18.png new file mode 100644 index 0000000000000000000000000000000000000000..13b7546424ec820eed6df6f0da6507db9bfc9894 Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.18.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.19.png b/content/zh/post/zhengxue/images/problem/1.1.19.png new file mode 100644 index 0000000000000000000000000000000000000000..a3e1eda0005bc26359491b1abcffad4d25944262 Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.19.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.2.png b/content/zh/post/zhengxue/images/problem/1.1.2.png new file mode 100644 index 0000000000000000000000000000000000000000..06497c28d0bd2453fb892ec4526c227a7d42c010 Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.2.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.20.png b/content/zh/post/zhengxue/images/problem/1.1.20.png new file mode 100644 index 0000000000000000000000000000000000000000..eda34f49ea7c3362cc6896314881a20f6136f315 Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.20.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.21.png b/content/zh/post/zhengxue/images/problem/1.1.21.png new file mode 100644 index 0000000000000000000000000000000000000000..8b05ba99bd23c66fdc9838b467cea92073dda337 Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.21.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.22.png b/content/zh/post/zhengxue/images/problem/1.1.22.png new file mode 100644 index 0000000000000000000000000000000000000000..e8654cb80643349b425c7dac6a213998baad878d Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.22.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.23.png b/content/zh/post/zhengxue/images/problem/1.1.23.png new file mode 100644 index 0000000000000000000000000000000000000000..022d46c2da73cb3fbea942506f64db8119f8bfee Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.23.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.24.png b/content/zh/post/zhengxue/images/problem/1.1.24.png new file mode 100644 index 0000000000000000000000000000000000000000..5f85551be563d7210002da7ef83425285512daad Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.24.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.3.png b/content/zh/post/zhengxue/images/problem/1.1.3.png new file mode 100644 index 0000000000000000000000000000000000000000..d6196d04cdd335643e0e8944c4ebdcf8e304d1f0 Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.3.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.4.png b/content/zh/post/zhengxue/images/problem/1.1.4.png new file mode 100644 index 0000000000000000000000000000000000000000..3cf4dd43c6c73787cf66919601c3c7513cf4b5eb Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.4.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.5.png b/content/zh/post/zhengxue/images/problem/1.1.5.png new file mode 100644 index 0000000000000000000000000000000000000000..f27396c127a19b09b48fb158ba89d1c6329f6988 Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.5.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.6.png b/content/zh/post/zhengxue/images/problem/1.1.6.png new file mode 100644 index 0000000000000000000000000000000000000000..60aa7351ccd6fe91ec11be11715b0139975bce61 Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.6.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.7.png b/content/zh/post/zhengxue/images/problem/1.1.7.png new file mode 100644 index 0000000000000000000000000000000000000000..5f9b06146be0bb9e33f617ac777f5c4bcb0177a6 Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.7.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.8.png b/content/zh/post/zhengxue/images/problem/1.1.8.png new file mode 100644 index 0000000000000000000000000000000000000000..12e07c0be898b01e1d1eb9b178e1ba4964773a3f Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.8.png differ diff --git a/content/zh/post/zhengxue/images/problem/1.1.9.png b/content/zh/post/zhengxue/images/problem/1.1.9.png new file mode 100644 index 0000000000000000000000000000000000000000..49d5302e5ab2ed702e8d5981884677a36632b9ae Binary files /dev/null and b/content/zh/post/zhengxue/images/problem/1.1.9.png differ diff --git a/content/zh/post/zhengxue/images/third_compile/1.1.0.png b/content/zh/post/zhengxue/images/third_compile/1.1.0.png new file mode 100644 index 0000000000000000000000000000000000000000..f5d64b2046133b985dd5a88ea7b2293887c19167 Binary files /dev/null and b/content/zh/post/zhengxue/images/third_compile/1.1.0.png differ diff --git a/content/zh/post/zhengxue/images/third_compile/1.1.1.png b/content/zh/post/zhengxue/images/third_compile/1.1.1.png new file mode 100644 index 0000000000000000000000000000000000000000..6f02d0068419c27926763d3b7e7e21e5a33ce83a Binary files /dev/null and b/content/zh/post/zhengxue/images/third_compile/1.1.1.png differ diff --git a/content/zh/post/zhengxue/images/upgrade_script/FormData_pg_attribute.png b/content/zh/post/zhengxue/images/upgrade_script/FormData_pg_attribute.png new file mode 100644 index 0000000000000000000000000000000000000000..48fbadbb792bd75155dd3d2f1ef1d48c6cba74b2 Binary files /dev/null and b/content/zh/post/zhengxue/images/upgrade_script/FormData_pg_attribute.png differ diff --git a/content/zh/post/zhengxue/images/upgrade_script/catalogBuildParam.png b/content/zh/post/zhengxue/images/upgrade_script/catalogBuildParam.png new file mode 100644 index 0000000000000000000000000000000000000000..be1b8b5d8404412d7c606815afe6ced4c61fa99c Binary files /dev/null and b/content/zh/post/zhengxue/images/upgrade_script/catalogBuildParam.png differ diff --git a/content/zh/post/zhengxue/images/upgrade_script/catalogBuildParam_struction.png b/content/zh/post/zhengxue/images/upgrade_script/catalogBuildParam_struction.png new file mode 100644 index 0000000000000000000000000000000000000000..e8f482b0bcf500d36bb52608f0d743eb5b16491f Binary files /dev/null and b/content/zh/post/zhengxue/images/upgrade_script/catalogBuildParam_struction.png differ diff --git a/content/zh/post/zhengxue/images/upgrade_script/gs_wlm_get_session_info.png b/content/zh/post/zhengxue/images/upgrade_script/gs_wlm_get_session_info.png new file mode 100644 index 0000000000000000000000000000000000000000..5b09a6d0626ee3a5d6c607096f47bf4029720384 Binary files /dev/null and b/content/zh/post/zhengxue/images/upgrade_script/gs_wlm_get_session_info.png differ diff --git a/content/zh/post/zhengxue/images/upgrade_script/pg_attribute.png b/content/zh/post/zhengxue/images/upgrade_script/pg_attribute.png new file mode 100644 index 0000000000000000000000000000000000000000..961c817c18e40bc1680bcd34e2fba68cf4703fc8 Binary files /dev/null and b/content/zh/post/zhengxue/images/upgrade_script/pg_attribute.png differ diff --git a/content/zh/post/zhengxue/opengauss_compile.md b/content/zh/post/zhengxue/opengauss_compile.md new file mode 100644 index 0000000000000000000000000000000000000000..9d7a8888dbb3d37789bb01423d536d1d3278178f --- /dev/null +++ b/content/zh/post/zhengxue/opengauss_compile.md @@ -0,0 +1,514 @@ ++++ +title = "ubuntu18.04_x86_64系统----openGauss数据库编译指导" +date = "2021-04-20" +tags = ["ubuntu18.04_x86_64系统----openGauss数据库编译指导"] +archives = "2021-04-20" +author = "shirley_zhengx" +summary = "ubuntu18.04_x86_64系统----openGauss数据库编译指导" +img = "/zh/post/zhengxue/title/img1.png" +times = "9:30" ++++ + + + +- [1. 环境描述](#1.环境描述) + - [1.1.平台信息](#1.1.平台信息) + - [1.2.编译版本](#1.2.编译版本) + - [1.3.系统目录软链接](#1.3.系统目录软链接) +- [2. 编译三方库](#2.编译三方库) + - [2.1.编译步骤综述](#2.1.编译步骤概述) + - [2.2.依赖库](#2.2.依赖库) + - [2.3.源码脚本修改](#2.3.源码脚本修改) + - [2.4.三方库编译及结果](#2.4.三方库编译及结果) +- [3. 编译数据库](#3.编译数据库) + - [3.1.准备openGauss-server源码以及代码修改](#3.1.准备openGauss-server源码以及代码修改) + - [3.2.环境变量](#3.2.环境变量) + - [3.3.数据库编译与打包](#3.3.数据库编译与打包) +- [4. 安装数据库](#4.安装数据库) + - [4.1.编译安装](#4.1.编译安装) + - [4.2.OM安装](#4.2.OM安装) +- [5. 下载链接](#5.下载链接) + + + + + +# openGauss数据库编译指导 + +openGauss数据库的编译需要依赖许多三方库,在社区 `openGauss-third_party` 仓库中存放了主要的依赖,首先依赖特定的gcc版本编译完三方库后,就可使用已编译好的三方库来编译数据库。 + + +## 1.环境描述 + + 针对X86平台Ubuntu系统下openGauss二进制安装包的编译作出指导,平台信息描述如下: + +### 1.1.平台信息 + + + + + + + + + + + + + + + + + + + + + + + +
    架构x86_64 (通过uname -p命令查看)
    CPUIntel(R) Xeon(R) Gold 6266C CPU @ 3.00GHz (通过lscpu命令查看)
    硬盘40 + 100 (通过lscpu命令查)
    内存48G (通过lscpu命令查看)
    OSUbuntu18.04.5 LTS (通过ls-release -a命令查看)
    + +### 1.2.编译版本 + + + + + + + + + + + + +
    openGauss-third-party社区仓库下载最新源码
    openGauss-server社区仓库master分支
    + +### 1.3.系统目录软链接 +软链接1:ln -s /usr/lib/x86_64-linux-gnu /usr/lib64/ + +![](.../images/opengauss_compile/1.1.2.png) + +软链接2:ln -s /usr/lib64 /lib64 + +![](../images/opengauss_compile/1.1.3.png) + +## 2.编译三方库 + +针对 `centos`和`openEuler`操作系统已经提供了编译好的二进制三方库,可直接使用社区提供的编译好的文件[openGauss-third_party_binarylibs.tar.gz](https://opengauss.obs.cn-south-1.myhuaweicloud.com/1.1.0/openGauss-third_party_binarylibs.tar.gz)。 + +针对Ubuntu操作系统,社区不提供编译好的三方库二进制文件,需用户自己编译,也可使用博客中已编译好的三方库二进制文件[openGauss-third_party_binarylibs.tar.gz](https://opengauss-beta.obs.cn-north-4.myhuaweicloud.com/binarylibs/binarylibs.tar.gz)。基于Ubuntu系统的三方库编译步骤如下: + +### 2.1.编译步骤概述 + +(1) 编译 `gcc` 和 `cmake` \ +(2) 下载三方库源码,修改脚本增加Ubuntu平台信息 \ +(3) 在`openGauss-third_party/build` 下,运行 `sh build_all.sh` ,即可进行全量三方库的编译。\ + 编译顺序为 `openssl` , `buildtools` , `platform` , `dependency`\ +(4) 编译完成后,编译结果在 `openGauss-third_party/output` 目录下。 + +### 2.2.依赖库 +#### 2.2.1 依赖安装 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    依赖库Ubuntu对应安装版本
    libaioapt install libaio-dev
    ncursesapt install libncurses5-dev
    pamapt install libpam0g-dev
    libffiapt install libffi-dev
    libtoolapt install libtool
    opensslapt install openssl
    rpm2cpioapt install rpm2cpio
    flexapt install flex
    bisonapt install bison
    + +#### 2.2.2 特殊依赖 + +(1) gcc (推荐使用 `7.3.0` 版本) + + gcc编译请参照:[gcc编译指导](/zh/post/xingchen/gcc_compile/) + +(2) cmake (cmake推荐 `>=3.16`(如果系统中的cmake版本>=3.16则直接使用系统自带cmake即可) \ + 下载源码:`https://github.com/Kitware/CMake/releases` \ + 解压源码:tar cmake-3.18.5.tar.gz \ + 编译安装:../configure prefix=/usr/local/cmake318 (prefix为安装路径) \ + make -j && make install -j \ + 导入环境变量: + + export CMAKE_PATH=/usr/local/cmake318 + + export LD_LIBRARY_PATH=$CMAKE_PATH/lib:$LD_LIBRARY_PATH + + export PATH=$CMAKE_PATH/bin:$PATH + + +(3) python \ + 安装:apt install python3 \ + `Tips`: 确保python命令链接的Python版本是python3,通过python --version确定,如果不是python3,可通过软链接修改:ln -s /usr/bin/python3.7 /usr/bin/python,修改之后再次通过python --version确定python版本 + +#### 2.2.3 其它依赖 + +(1) jemalloc 依赖 autoconf : apt install autoconf\ +(2) libthrift 依赖 pkg-config : apt install pkg-config\ +(3) etcd 依赖 golang : apt install golang\ +(4) pljava 依赖 libkrb5-dev: apt install libkrb5-dev + +### 2.3.源码脚本修改 + +(1) 修改 `openGauss-third_party/build/get_PlatForm_str.sh` 增加新的平台,如下图 + +![](../images/third_compile/1.1.0.png) + +$kernel信息可以通过命令获取: +``` +lsb_release -d | awk -F ' ' '{print $2}'| tr A-Z a-z +``` + +(2) 在以下三方依赖中, 在其build.py文件中增加ubuntu平台信息。 +``` +openGauss-third_party/dependency/fio/build.py +openGauss-third_party/dependency/iperf /build.py +openGauss-third_party/dependency/jemalloc/build.py +openGauss-third_party/dependency/kerberos/build.py +openGauss-third_party/dependency/libcgroup/build.py +openGauss-third_party/dependency/libedit/build.py +openGauss-third_party/dependency/nanomsg /build.py +openGauss-third_party/dependency/numactl/build.py +openGauss-third_party/dependency/openssl/build.py +openGauss-third_party/dependency/protobuf/build.py +``` +在build.py/binary_parse函数中增加平台信息: +``` +elif platform_str == 'ubuntu18.04_x86_64': + binary_list.append(platform_str) +``` +以下错误信息均是由于未增加平台信息引起: +``` +Failed +[Error] the plat form is not supported! +[ERROR] Not supported platform type +``` + +(4) 在ubuntu系统中对三方库的特殊修改 + +/openGauss-third_party/dependency/masstree/build.sh修改rename命令 + + + ``` + $PLATFORM = "$(bash $(pwd)/../../build/get_PlatForm_str.sh" + If [ "$PLATFORM"x = "ubuntu18.04_x86_64"x ];then + rename 's/\.cc/\.cpp/' $MASSTREE_MEGRED_SOURCES_DIR/*.cc + else + rename ".cc" ".cpp" $MASSTREE_MEGRED_SOURCES_DIR/*.cc + fi + ``` + +/openGauss-third_party/dependency/snappy/build.sh修改mv命令 + + ``` + LIB_PATH=install_comm/lib64 + if [ -d "$LIB_PATH" ];then + mv install_comm/lib64 install_comm/lib + fi + ``` +/openGauss-third_party/dependency/zstd/build.sh修改mv命令 +``` +LIB_PATH=../../../../install_comm/lib64 +if [ -d "$LIB_PATH" ];then + mv ../../../../install_comm/lib64/libzstd* ../../../../install_comm/lib/ +fi +``` +/openGauss-third_party/dependency/parquet/build.sh修改mv命令 +``` +LIB_PATH=${LOCAL_DIR}/install_comm/lib64 +if [ -d "$LIB_PATH" ]; then + mv ${LOCAL_DIR}/install_comm/lib64 ${LOCAL_DIR}/install_comm/lib +fi +``` +openGauss-third_party/dependency/protobuf/build.py中的build_component函数,修改mv_cmd变量,如下图。 +![](../images/third_compile/1.1.1.png) + +### 2.4.三方库编译及结果 +(1) 进入到`openGauss-third_party/build`目录,运行 `sh build_all.sh`,全量编译三方库。 +如果在编译dependency过程中,某一三方库编译有错误,可进入某一个三方库中单独编译,解决错误后,可以在`dependency/build/build_dependency.sh`中注释掉已经编译好的三方库,只编译还未编译成功的库,分组单独进行编译。 + +***编译dependency*** +``` +cd openGauss-third_party/dependency/build/ +sh build_dependency.sh +``` + +(2) 编译结果在 `openGauss-third_party/output` 之中。 + +将编译好的 `gmp mpfr mpc isl gcc` 目录拷贝到`openGauss-third_party/output/buildtools/${platform}/gcc7.3`下,output目录即为完整的三方库二级制。将output目录拷贝出去,重命名为binarylibs,便可以使用它进行数据库编译。 + +`Tips`: 对于学生,不建议自己编译三方库,可直接使用提供的三方库二进制包,ubuntu三方库二进制包可在博客第5节中下载。 + +## 3.编译数据库 + +以下介绍如何进行openGauss在ubuntu系统上编译 + +### 3.1.准备openGauss-server源码以及代码修改 + +(1) 进入openGauss-server/src/get_PlatForm_str,添加ubuntu平台信息,如下图。 + ![](../images/opengauss_compile/1.1.0.png) + +`Tips`: 如果三方库要自己编译,请保持`openGauss-third_party/build/get_PlatForm_str.sh`和`openGauss-server/src/get_PlatForm_str`的平台信息一致。 + +(2) 进入openGauss-server/src/gausskernel/Makefile,修改代码,如下图。 + + ![](../images/opengauss_compile/1.1.1.png) + +(3) 进入openGauss-server/build/script/mpp_package.sh,添加ubuntu信息。 +``` +if [ X"$kernel" == X"centos" ]; then + dist_version="CentOS" +elif [ X"$kernel" == X"openeuler" ]; then + dist_version="openEuler" +elif [ X"$kernel" == X"euleros" ]; then + dist_version="EulerOS" +elif [ X"$kernel" == X"kylin" ]; then + dist_version="Kylin" +elif [ X"$kernel" == X"ubuntu" ]; then + dist_version="Ubuntu" +else + echo "We only support openEuler(aarch64), EulerOS(aarch64), CentOS, Kylin(aarch64) and Ubuntu(x86) platform." + echo "Kernel is $kernel" + exit 1 +fi +``` +(4) 进入openGauss-server/build/script/package_opengauss.sh,添加Ubuntu信息。 + 同(3)添加信息一样 + +### 3.2.依赖库 +建议使用从列出的操作系统安装盘或安装源中获取的以下依赖软件的默认安装包进行安装。如果不存在以下软件,请参考推荐的软件版本。 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    依赖库Ubuntu对应版本
    libaio0.3.110.-5ubuntu0.1
    ncursesapt install libncurses5-dev 版本6.1-1ubuntu1.18.04
    glibc2.27-3ubuntu1.3
    flexflex2.6.1
    bisonbison3.5.3
    patch2.7.6
    lsb_release9.20170808ubuntu1
    lsb_release9.20170808ubuntu1
    cmake3.16以上版本
    + +查看版本方式: +``` + apt list | grep libaio-dev + flex –version + bison --version + patch --version + apt list | grep lsb + glibc:ldd --version +``` +`Tips`: flex2.6.1和bison3.5.3 版本对应,数据库编译才能成功 +### 3.2.环境变量 +将自己的opengauss源码包和binarylibs路径放入环境变量文件gauss_env,内容如下: + +导入环境变量: +``` +export CODE_BASE=/usr2/compile/openGauss-server +export BINARYLIBS=/usr2/compile/binarylibs +export GAUSSHOME=$CODE_BASE/dest/ +export GCC_PATH=$BINARYLIBS/buildtools/ubuntu18.04_x86_64/gcc7.3/ +export CC=$GCC_PATH/gcc/bin/gcc +export CXX=$GCC_PATH/gcc/bin/g++ +export LD_LIBRARY_PATH=$GAUSSHOME/lib:$GCC_PATH/gcc/lib64:$GCC_PATH/isl/lib:$GCC_PATH/mpc/lib/:$GCC_PATH/mpfr/lib/:$GCC_PATH/gmp/lib/:$LD_LIBRARY_PATH +export PATH=$GAUSSHOME/bin:$GCC_PATH/gcc/bin:$PATH +``` +`Tips`: 环境变量里面一定要写export,即使`echo $GCC_PATH`存在,也要写export才能真正导入路径 + +参数说明: +CODE_BASE 为openGauss-server源码目录; +BINARYLIBS 为第一步编译好的三方库二进制目录; +GAUSSHOME 为编译完成的目标文件路径; +GCC_PATH 二进制中,GCC编译结果存放的路径,在三方库buildtools/ubuntu18.04_x86_64/目录下; + +### 3.3.数据库编译与打包 +#### 3.3.1 手动编译 +(1) 导入3.2节环境变量 +(2) 选择一个版本进行配置 +``` +debug版本: +./configure --gcc-version=7.3.0 CC=g++ CFLAGS='-O0' --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-debug --enable-cassert --enable-thread-safety --without-zlib + +release版本: + +./configure --gcc-version=7.3.0 CC=g++ CFLAGS="-O2 -g3" --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-thread-safety --with-readline --without-zlib + +``` +(3) make -sj && make install –sj + +#### 3.3.2 一键编译 +无需导入环境变量 +``` +sh build.sh -m release -3rd /usr2/compile/binarylibs +``` +#### 3.3.1 二进制打包 +无需导入环境变量 +``` +sh build.sh -m release -3rd /usr3/zxgauss/binarylibs -pkg (参数-pkg表示打包) +``` +打包之后的结果在openGauss-server/output目录下,如下图: + +![](../images/opengauss_compile/1.1.4.png) + +在openGauss-server/output目录下,主要tar包如下图: + +![](../images/opengauss_compile/1.1.5.png) + + +`Tips`: 三方库和数据库编译过程中出现的问题请参考:[编译问题解决](/zh/post/zhengxue/problem_solution/) + +## 4.安装数据库 + +以下介绍如何进行openGauss在ubuntu系统上安装 + +### 4.1.编译安装 + +(1) 切换到普通用户omm,导入3.2节环境变量 +(2) 选择一个版本进行配置 +``` +debug版本: +./configure --gcc-version=7.3.0 CC=g++ CFLAGS='-O0' --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-debug --enable-cassert --enable-thread-safety --without-zlib + +release版本: + +./configure --gcc-version=7.3.0 CC=g++ CFLAGS="-O2 -g3" --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-thread-safety --with-readline --without-zlib + +``` +(3) make -sj && make install –sj + +(4) 初始化数据库 + +``` +mkdir $data/dn1 (其中$data指数据目录) + +gs_initdb $data/dn1 --nodename single_node -w "opengauss@123" +``` + +(5) 启动数据库 + +``` +gaussdb -D $data/dn1 & +``` +`Tips`:端口默认安装时5432,如果启动端口占用,在$data/dn1/postgresql.conf下文件中修改port参数。或者使用如下命令,指定端口启动: + +``` +gaussdb -D $data/dn1 & -p 12345 +``` + +(6) 连接数据库 + +``` +gsql -d postgres -p 12345 -r +``` +### 4.2.OM安装 +(1) 下载OM包,放到指定目录$ompackage中,其中$ompackage是用户自建目录,将OM安装包放入该目录下。OM安装包可自己编译打包出包,或者可直接使用提供的OM安装包,OM安装包可在博客第5节中下载。 + +``` +mkdir -p $ompackage +tar -zvf openGauss-2.0.0-Ubuntu-64bit-all.tar.gz +chmod 755 -R $ompackage +``` + +(2) 准备xml文件,可参照官网安装文档:https://opengauss.org/zh/docs/2.0.0/docs/installation/%E5%88%9B%E5%BB%BAXML%E9%85%8D%E7%BD%AE%E6%96%87%E4%BB%B6.html + +(3) 在root用户下进入$ompackage/script目录下 + +(4) 执行预安装 +``` +gs_preinstall -U omm -G omm -X clusterconfig.xml --sep-env-file=/usr/env +``` +`Tips`:`"--sep-env-file"`参数是分离环境变量,目录可自定义。 + +(5) 切换到omm,导入环境变量 +``` +su - omm +source /usr/env +``` +(6) 执行安装 +``` +gs_install -X clusterconfig.xml +``` +`Tips`:如有问题,安装细节请参照官网:https://opengauss.org/zh/docs/2.0.0/docs/installation/%E5%AE%89%E8%A3%85openGauss.html + + +## 5.下载链接 +针对ubuntu18.04_x86_64系统,提供二进制包如下: + +三方库二进制包: https://opengauss-beta.obs.cn-north-4.myhuaweicloud.com/binarylibs/binarylibs.tar.gz + +OM安装包:https://opengauss-beta.obs.cn-north-4.myhuaweicloud.com/binarylibs/openGauss-2.0.0-Ubuntu-64bit-all.tar.gz + +JDBC二进制包:https://opengauss-beta.obs.cn-north-4.myhuaweicloud.com/binarylibs/openGauss-2.0.0-JDBC.tar.gz + +ODBC二进制包:https://opengauss-beta.obs.cn-north-4.myhuaweicloud.com/binarylibs/GaussDB-Kernel-V500R001C20-UBUNTU-64bit-Odbc.tar.gz + +***Notice:提供的基于ubuntu系统的二进制文件包仅供于学习和使用,不推荐直接用于生产环境*** diff --git a/content/zh/post/zhengxue/problem_solution.md b/content/zh/post/zhengxue/problem_solution.md new file mode 100644 index 0000000000000000000000000000000000000000..33db50654eb2a19087b953cad0d227a0cde44249 --- /dev/null +++ b/content/zh/post/zhengxue/problem_solution.md @@ -0,0 +1,191 @@ ++++ +title = "ubuntu18.04_x86_64系统----openGauss数据库编译问题解决" +date = "2021-04-20" +tags = ["ubuntu18.04_x86_64系统----openGauss数据库编译问题解决"] +archives = "2021-04-20" +author = "shirley_zhengx" +summary = "ubuntu18.04_x86_64系统----openGauss数据库编译问题解决" +img = "/zh/post/zhengxue/title/img1.png" +times = "9:30" ++++ + + + +- [1. 编译三方库的问题解决](#1.编译三方库的问题解决) +- [2. 编译数据库的问题解决](#2.编译数据库的问题解决) + + + + + +# 1.编译三方库的问题解决 +(1) python没有找到 +![](../images/problem/1.1.0.png) +原因:输入命令`python`,发现是python2版本,需要python3 + +解决: +``` +rm -rf /usr/bin/python +ln -s /usr/bin/python3 /usr/bin/python +``` +(2) 编译libcgroup +![](../images/problem/1.1.1.png) +原因分析:去/data/openGauss-third_party/dependency/libcgroup下执行该命令,提示信息: +``` +Command 'rpm2cpip' not found, but can be installed with: +apt install rpm2cpio +``` +解决:apt install rpm2cpio + +(3) 编译cJson + +![](../images/problem/1.1.2.png) + +原因分析:source 没有找到,可能是因为bash的问题,用命令ls -l `which sh` 查看发现是dash,不是bash,如下图: + +![](../images/problem/1.1.3.png) + +解决:sudo dpkg-reconfigure dash 重新配置dash,选择no则是bash + +(4) 编译cffi + +![](../images/problem/1.1.4.png) + +解决:安装apt install libffi-dev,apt install libssl-dev,如果安装之后还报错误,换一个窗口执行 + +(5) 编译masstree + +![](../images/problem/1.1.5.png) + +解决:apt install rename + +(6) 编译libthrift + +![](../images/problem/1.1.6.png) + +解决:apt install pkg-config + +(7) 编译libthrift + +![](../images/problem/1.1.7.png) + +原因分析:依赖的问题,boost要在libthrift之前编译,libthrift编译依赖boost + +解决: 编译libthrift之前确保boost、openSSL已编译完成。 + +(8) 编译parquet + +出现关于boost、zlib包的问题 + +解决:parquet依赖boost、zlib,编译arquet之前确保boost、zlib已编译完成 + +(9) 编译parquet + +![](../images/problem/1.1.8.png) + +原因分析: 查看log,发现cmake问题 + +![](../images/problem/1.1.9.png) + +解决: 安装cmake3.16版本以上,并导入环境变量 + +(10) 编译libxml2 + +![](../images/problem/1.1.10.png) + +原因分析:用file命令(辨识文件类型:file 文件名),执行 `file libxml2-2.9.9.tar.gz`,如下图,发现包类型不对,包与社区源码的大小不一样,是因为包没有下载好。 + +![](../images/problem/1.1.11.png) + +解决:用root用户重新git clone,如果还是包大小不对,则去gitee仓库页面下载。 + +(11) 编译pljava + +![](../images/problem/1.1.12.png) + +原因分析:同(10)一样 + +解决:root用户重新git clone,如果还是包大小不对,则去gitee仓库页面下载。 + +(12) 编译pljava + +![](../images/problem/1.1.13.png) + +解决: apt install libkrb5-dev + +(13) + +![](../images/problem/1.1.14.png) + +解决: apt install libjsoncpp-dev + +(14) + +![](../images/problem/1.1.15.png) + +原因分析: 原因1:查看是不是python3 + 原因2:查看:boost_1_72_0/tools/build/src/tools/python.jam,如下图: + ![](../images/problem/1.1.16.png) + +解决: 修改为includes ?= $(prefix)/include/python$(version)m + + +# 2.编译数据库的问题解决 + +(1) + +![](../images/problem/1.1.17.png) + +原因: felx和bison版本不一致引起 + +解决: 需安装flex和bison对应版本,安装flex2.6.1 和 bison3.5.3,并导入环境变量。 + 此错误一旦出现,安装flex和bison之后,make distclean无法清除所有残留文件,再次编译会同样报错,建议重新下载源码编译。 + +(2) + +![](../images/problem/1.1.18.png) + +去掉s,重新make,可以看到详细信息 + +![](../images/problem/1.1.19.png) + +解决:安装apt install libstdc++-8-dev,一定要make clean之后再重新编译 + +(3) + +![](../images/problem/1.1.20.png) + +![](../images/problem/1.1.23.png) + +原因分析:编译中需要usr/bin/flex +解决: apt install flex apt install bison + +(4) + +![](../images/problem/1.1.21.png) + +原因分析: 查看config.log,如下图: + +![](../images/problem/1.1.22.png) + +解决: +``` +cd /usr/include +ln -s x86_64-linux-gnu/asm asm +``` +(5) + +![](../images/problem/1.1.24.png) + +解决:apt install libedit-dev + + + + + + + + + + + diff --git a/content/zh/post/zhengxue/title/img1.png b/content/zh/post/zhengxue/title/img1.png new file mode 100644 index 0000000000000000000000000000000000000000..65e2d4c4751f069c64357704715e2ba99beb511a Binary files /dev/null and b/content/zh/post/zhengxue/title/img1.png differ diff --git a/content/zh/post/zhengxue/upgrade_primer.md b/content/zh/post/zhengxue/upgrade_primer.md new file mode 100644 index 0000000000000000000000000000000000000000..36a68d3b465464148fa00b4e967d17dd591b926c --- /dev/null +++ b/content/zh/post/zhengxue/upgrade_primer.md @@ -0,0 +1,250 @@ ++++ +title = "openGauss升级入门学习" +date = "2021-12-20" +tags = ["openGauss升级入门学习"] +archives = "2021-12-20" +author = "shirley_zhengx" +summary = "openGauss升级入门学习" +img = "/zh/post/zhengxue/title/img1.png" +times = "9:30" + ++++ + +[TOC] + + +## 1. 升级前准备 +**@~@ 升级前一些注意事项 @~@ ** +``` +1.建议在数据库系统空闲情况下进行升级,尽量避开业务繁忙的时间段(可按照经验判断,如节假日等)。 +2.升级前尽可能保证数据库正常。可以通过gs_om -t status查询,查询结果的cluster_state为Normal代表数据库正常。 +3.升级前保证数据库互信正常,可以在任意节点上,通过ssh hostname命令,连接另外一个节点进行验证。如果各机器间互连不用输入密码,说明互信正常(通常数据库状态正常时,互信一般都是正常的)。 +4.升级前后,数据库的部署方式(配置文件)不能发生变化。升级前会对部署方式进行校验,如果改变,会报错。 +5.升级前要保证操作系统处于健康状态,通过gs_checkos工具可以完成操作系统状态检查 +6.数据库运行正常且主DN的数据完全同步到备DN。 +7.升级需要guc参数enable_stream_replication=on,该参数为off时不允许升级。 +8.若在openGauss2.1.0之前的版本中使用了MOT表,则不支持升级到openGauss2.1.0版本。 +```` + +## 2. 升级操作 +**@~@ 升级过程中一些注意事项 @~@ ** +``` +1.升级操作不能和扩容、缩容同时执行。 +2.升级过程中,不允许对wal_level,max_connections,max_prepared_transactions,max_locks_per_transaction这四个GUC参数的值进行修改。如果修改,会导致回滚后实例启动异常。 +3.升级过程中不允许打开kerberos开关。 +4.请不要修改安装包中解压出来的version.cfg文件。 +5.如果升级过程中出现异常导致升级失败,需用户手动回滚,并且必须回滚成功后才能进行下一次升级,下一次升级必须重新执行预安装。 +6.如果升级回滚成功后,再次升级成功,未提交阶段设置的GUC参数将失效。 +7.执行升级的过程中请不要手动设置GUC参数。 +8.灰度升级中,升级的时候都会产生不超过10s的业务中断. +9.升级过程中,必须保持内核版本与om版本一致才可执行om操作。这里的一致是指,内核代码和om代码都来自同一个软件包。如果执行了升级包的前置脚本却没有升级,或者升级回滚后没有执行基线包的前置脚本,就会造成内核代码和om代码的不一致。 +10.升级过程中如果系统表新增了字段,升级后通过\d命令将查看不到这些新增的字段。此时通过select命令可以查到这些新增的字段。 +11.灰度升级中,业务并发要小于200并发读加200并发写的情况。 +12.执行gs_upgradectl -t auto-upgrade 之后,没有提交之前,不能执行快照生成,即升级过程中不能执行快照生成。 +``` + +**@~@ 升级策略 @~@ ** +``` +就地升级: + 升级期间需停止业务进行,一次性升级所有节点 +灰度升级: + 灰度升级支持全业务操作,也是一次性升级所有节点,升级的时候都会产生不超过10s的业务中断。(openGauss1.1.0版本之后的版本支持该功能) +``` + +**@~@ 升级操作步骤 @~@ ** +``` +1.以root身份登录节点。 + 创建新包目录:mkdir -p /opt/software/gaussdb_upgrade + 将需要更新的新包上传至目录“/opt/software/gaussdb_upgrade”并解压 + 进入安装包解压出的script目录下: + cd /opt/software/gaussdb_upgrade/script +2.在就地升级或灰度升级前执行前置脚本gs_preinstall + ./gs_preinstall -U omm -G dbgrp -X /opt/software/GaussDB_Kernel/clusterconfig.xml +3.切换至omm用户,执行升级命令 + su - omm + source gauss_env + 数据库状态正常时,使用如下命令进行就地升级或者灰度升级 + 示例一:使用gs_upgradectl脚本执行就地升级。 + gs_upgradectl -t auto-upgrade -X /opt/software/GaussDB_Kernel/clusterconfig.xml + 示例二:使用gs_upgradectl脚本执行灰度升级。 + gs_upgradectl -t auto-upgrade -X /opt/software/GaussDB_Kernel/clusterconfig.xml --grey +4.回滚 + 如果不想升级,或者升级失败,执行回滚命令 + gs_upgradectl -t auto-rollback -X /opt/software/GaussDB_Kernel/clusterconfig.xml + + Notes:回滚之后,如果想再次升级,需从预安装开始 +5.提交 + gs_upgradectl -t auto-commit -X /opt/software/GaussDB_Kernel/clusterconfig.xml + + Notes:提交之后,不能回滚 + +``` +**@~@ 升级过程出现的问题 @~@ ** +``` +1.升级步骤中预安装gs_preinstall出错 + 问题:报错安装目录不为空 + 原因:om预安装如果没有环境变量分离,环境变量会默认写预安装-U参数后面的用户的/home/omm/.bashrc。旧包中环境变量分离,因此会将环境变量写入用户指定的文件中,而当新包升级步骤中预安装没有环境变量分离,系统会自动去找用户下面的/home/omm/.bashrc,此时这里没有GAUSS_ENV变量,环境默认此次预安装是第一次,预安装会检查如果是第一次预安装则安装目录必须为空,但是在旧包安装的时候,安装目录就已经有东西,因此此时会报错。 + 解决:如果旧包预安装的时候环境变量分离,升级步骤中的预安装操作也必须要环境变量分离,这样就可以在用户指定的环境变量文件中发现参数是GAUSS_ENV=1,此时可继续执行预安装。 + +2.gs_upgradectl出错 + 问题:新包和旧包commitID一样 + Failed to upgrade strategy: New cluster commitid cannot be same with old cluster commitid + 原因:包有问题,新包没有最新的提交 + 解决:检车并替换新包 + +3.gs_upgradectl -t auto-upgrade + 问题:升级执行未提交状态,快照无法生成 + 原因:内核快照线程会判断upgrade_mode这个参数,如果upgrade_mode!=0,也就是在升级过程中,快照线程防止数据错误,所欲会退出线程,不生成快照 + +4.升级过程中开发线程池参数,导致升级失败 + 问题:enable_thread_pool=on,灰度升级时报错 + 原因:线程池开启的情况下,用于主备复制的端口只能是数据库监听端口+1。监听端口号即postgresql.conf里面的replconninfoX对应的localport(localport = port + 1)。如果线程池模式开启的情况下,localport不是port+1得到,日志会报错replication should connect HA port in thread pool。 + 解决:开启线程池,就将localport设置为port + 1,如果线程池关闭,则localport可以设置为其他数字 + +``` + +## 3. 升级后 +**@~@ 一些注意事项 @~@ ** +``` +1.查看数据库状态是否正常 + gs_om -t status --detail +2.连接数据库,测试新增功能是否支持 +``` + +## 4. 升级脚本介绍 + +**重要提示**: + +升级过程通过执行升级sql脚本实现系统表变更,这些脚本必须由开发人员在修改系统表的同时一并提供升级sql脚本,请将这些脚本代码提交至openGauss-server/src/include/catalog/upgrade_sql目录下,该目录结构如下所示。 + +### 1.1.升级目录介绍 + +```c++ +src/include/catalog/upgrade_sql/ +├── upgrade_catalog_maindb/ -- 1.存放在首个数据库上执行的系统表变更sql文件(一般指postgres数据库) + │├── upgrade_catalog_maindb_92_308.sql -- 前置脚本 + │├── upgrade-post_catalog_maindb_92_308.sql -- 后置脚本 +├── upgrade_catalog_otherdb/ -- 2.存放其它数据库系统上执行的系统表变更sql文件(一般指除postgres数据库之外的数据库) + │├── upgrade_catalog_otherdb_92_308.sql -- 前置脚本 + │├── upgrade-post_catalog_otherdb_92_308.sql -- 后置脚本 +├── rollback_catalog_maindb/ -- 3.存放在首个数据库上执行的系统表变更失败回退所用sql文件 + │├── rollback_catalog_maindb_92_308.sql + │├── rollback-post_catalog_maindb_92_308.sql +├── rollback_catalog_otherdb/ -- 4.存放其它数据库系统上执行的系统表变更失败回退所用sql文件 + │├── rollback_catalog_otherdb_92_308.sql + │├── rollback-post_catalog_otherdb_92_308.sql +├── check_upgrade/ -- 5.存放系统表变更完成之后校验变更是否成功的sql文件 + │├── check_upgrade_67_014.sql + + +``` + +<**问题集合**> + +- 目录中的5类脚本分别是什么? + +​ 第1、第2类脚本是系统表变更脚本,第3、第4类脚本是系统表变更的回退脚本,第5类是系统表变更的检查脚本,检查脚本由开发根据需要提交。 + +​ **切记切记切记!!!** 前4类必须要有 + +- 前置脚本和后置脚本的区别? + +​ 执行对象不同,前置脚本在旧版本数据库上执行,后置脚本在新版本上执行 + +- 变更和回退脚本都各有两类,一类是maindb,一类是otherdb? + +​ 是因为当要创建共享系统表或共享对象时,我们只需要在对第一个库执行变更脚本的过程中创建物理表文件或共享对象即可。我们通过GUC参数控制是否创建共享系统表的物理文件;我们只在第一个库的系统表变更脚本中添加共享对象的创建语句。因此,对于第一个库(maindb)和剩余的库(otherdb),系统表变更脚本会略有不同。除此以外,没有其它差别。对于回退脚本,亦是如此。 + +### 1.2.升级脚本文件命名规则 +- 类型前缀为upgrade_catalog_maindb、upgrade_catalog_oterdb、rollback_catalog_maindb、rollback_catalog_otherdb、check_upgrade、upgrade-post_catalog_maindb、upgrade-post_catalog_oterdb、rollback-post_catalog_maindb、rollback-post_catalog_otherdb之一 + +- 脚本类型后缀是(文件version.cfg内核版本号的整数位)_(文件version.cfg内核版本号的小数位).sql + + ```c++ + 例如:文件version.cfg内容 + 2.0.0 --写入发布版本号 + 92.298 --内核版本号 + 68362709 --最后一次提交号 + ``` + + ```c++ + 脚本命名:upgrade_catalog_maindb_92_298.sql + ``` + + + +<**问题集合**> + +- 想一想文件version.cfg从何而来? + + 很神奇的是从内核源码打包生成verion.cfg文件。version.cfg为开发合入本次代码之后的产品版本号,存在build/script/package_internal.sh的read_mpp_version()函数中。 + + < **不容易看到的额外扩展哦!!!**>:打包主要过程为build.sh ---> package_internal.sh ---> read_mpp_version[写入发布版本号2.0.0] ---> read_mpp_number[写入内核版本号92.298] ---> + +- 什么时候version.cfg的内核版本号会修改? + +​ 如果开发本次合入的代码不涉及系统表修改,那么不用修改内核版本号,继而打包的时候version.cfg文件中的内核版本号也不会改变; +​ 如果开发本次合入的代码修改了系统表,并且遵循升级约束,那么需要修改内核版本号(在内核源码中对内核版本号数值增加0.001),继而打包的时候version.cfg的数值会被修改,同时,请提交上述至少4类的系统表变更和回退脚本,打包的时候会以新的version.cfg来进行命名。 + +​ < **思维很混乱,需要急救,那开发到底需要做些什么呢!!!**>:如果开发本次合入的代码不涉及系统表修改,那么开发会很窃喜,因为什么都不用做@开发者。如果开发本次合入的代码修改了系统表,开发者可得长点心了,天降大任于你,必先使其暴跳如雷。此时是需要适配升级的,主要要做三件事,第1,修改内核版本号,第2,添加升级脚本,第3,验证升级。 + +​ 预知更多,突破自己,请看下节分享-----------内核版本号 + +------ + + + + +## 5.升级版本控制 + +### 5.1.内核版本号 + +在openGauss-server/src/backend/utils/init/globals.cpp中,定义常量GRAND_VERSION_NUM的定义值,该版本号标示内核代码实际对应的版本,该版本号为uint32型。 + +**¥-¥- 敲黑板,划重点啦@新秀开发者 -¥-¥** + +首先,gaussdb进程运行中,有一个全局变量WorkingGrandVersionNum,用于标志gaussdb进程实际应该遵循的版本行为。 + +在正常情况下, WorkingGrandVersionNum等于GRAND_VERSION_NUM, + +在升级过程中, WorkingGrandVersionNum等于老版本的版本号(此时,应小于或等于新版本的GRAND_VERSION_NUM )。对于每一个后台线程,在启动时会将当时的WorkingGrandVersionNum赋值给t_thrd.proc->workingVersionNum,表示数据库当前运行版本是什么。 + +<**后期通过内核代码详解版本号,这里会有新发现,敬请关注shirley_zhengx**> + +### 5.2 内核版本号前向兼容 + +内核版本号用于内核前向兼容时使用。主要使用的方法和场景如下: +1、**给系统表新增列**:假设包版本2.0.0(内核版本号29.298),在2.0.1版本中(内核版本号29.299)在系统表pg_authid后新增1列parentid,通过heap_open和heap_getattr函数读取新增的列,如下GetUserDataFromCatalog函数中: + +```c +/* Before GrandVersionNum 29298, pg_authid does not have following columns */ +if (t_thrd.proc->workingVersionNum >= 29298) { + Datum authidparentidDatum = heap_getattr(tup, Anum_pg_authid_rolparentid), pg_authid_des, &isNull); +} +``` + + + +2、**新增系统表**:通过内核版本号来避免在老版本模式下对新增系统的访问。例如新版本开发的定时任务功能中新增pg_job系统表,在老版本模式下,我们需要通过禁止定时任务特性的开启,以避免对新增系统表的访问。 + +```c +/* Before GrandVersionNum 29298, we do not support scheduled job */ +if (IS_PGXC_COORDINATOR && PgJobSchdPID == 0 && pmState == PM_RUN && (job_queue_processes||start_job_scheduler) && t_thrd.proc->workingVersionNum >= 29298) { + PgJobSchdPID = StartJobScheduler(); + if (PgjobSchdPID != 0) { + start_job_scheduler = false; + elog(LOG, "job scheduler started, pid=%lu", PgJobSchdPID) + } +``` + + + +- [ ] **武林秘笈之内核版本号**:内核版本号保证新增系统表修改的前向兼容性 + + + +## 6.系统表变更--升级脚本撰写 +麻烦小伙伴们移步至shirley_zhengx先生呕心沥血的升级脚本撰写:[升级脚本撰写](/zh/post/zhengxue/upgrade_script/) + +**@~@ 一些注意事项 @~@ ** + diff --git a/content/zh/post/zhengxue/upgrade_script.md b/content/zh/post/zhengxue/upgrade_script.md new file mode 100644 index 0000000000000000000000000000000000000000..a91b33de62269e8313804b4c0516c2a15c8f96a4 --- /dev/null +++ b/content/zh/post/zhengxue/upgrade_script.md @@ -0,0 +1,418 @@ ++++ +title = "openGauss升级脚本撰写" +date = "2021-09-30" +tags = ["openGauss升级脚本撰写"] +archives = "2021-09-30" +author = "shirley_zhengx" +summary = "openGauss升级脚本撰写" +img = "/zh/post/zhengxue/title/img1.png" +times = "9:30" + ++++ + +[TOC] + + + +## 1. 概述 + +**重要提示**: + +升级过程通过执行升级sql脚本实现系统表变更,这些脚本必须由开发人员在修改系统表的同时一并提供升级sql脚本,请将这些脚本代码提交至openGauss-server/src/include/catalog/upgrade_sql目录下,该目录结构如下所示。 + +### 1.1.升级目录介绍 + +```c++ +src/include/catalog/upgrade_sql/ +├── upgrade_catalog_maindb/ -- 1.存放在首个数据库上执行的系统表变更sql文件(一般指postgres数据库) + │├── upgrade_catalog_maindb_92_308.sql -- 前置脚本 + │├── upgrade-post_catalog_maindb_92_308.sql -- 后置脚本 +├── upgrade_catalog_otherdb/ -- 2.存放其它数据库系统上执行的系统表变更sql文件(一般指除postgres数据库之外的数据库) + │├── upgrade_catalog_otherdb_92_308.sql -- 前置脚本 + │├── upgrade-post_catalog_otherdb_92_308.sql -- 后置脚本 +├── rollback_catalog_maindb/ -- 3.存放在首个数据库上执行的系统表变更失败回退所用sql文件 + │├── rollback_catalog_maindb_92_308.sql + │├── rollback-post_catalog_maindb_92_308.sql +├── rollback_catalog_otherdb/ -- 4.存放其它数据库系统上执行的系统表变更失败回退所用sql文件 + │├── rollback_catalog_otherdb_92_308.sql + │├── rollback-post_catalog_otherdb_92_308.sql +├── check_upgrade/ -- 5.存放系统表变更完成之后校验变更是否成功的sql文件 + │├── check_upgrade_67_014.sql + + +``` + +<**问题集合**> + +- 目录中的5类脚本分别是什么? + +​ 第1、第2类脚本是系统表变更脚本,第3、第4类脚本是系统表变更的回退脚本,第5类是系统表变更的检查脚本,检查脚本由开发根据需要提交。 + +​ **切记切记切记!!!** 前4类必须要有 + +- 前置脚本和后置脚本的区别? + +​ 执行对象不同,前置脚本在旧版本数据库上执行,后置脚本在新版本上执行 + +- 变更和回退脚本都各有两类,一类是maindb,一类是otherdb? + +​ 是因为当要创建共享系统表或共享对象时,我们只需要在对第一个库执行变更脚本的过程中创建物理表文件或共享对象即可。我们通过GUC参数控制是否创建共享系统表的物理文件;我们只在第一个库的系统表变更脚本中添加共享对象的创建语句。因此,对于第一个库(maindb)和剩余的库(otherdb),系统表变更脚本会略有不同。除此以外,没有其它差别。对于回退脚本,亦是如此。 + +### 1.2.升级脚本文件命名规则 +- 类型前缀为upgrade_catalog_maindb、upgrade_catalog_oterdb、rollback_catalog_maindb、rollback_catalog_otherdb、check_upgrade、upgrade-post_catalog_maindb、upgrade-post_catalog_oterdb、rollback-post_catalog_maindb、rollback-post_catalog_otherdb之一 + +- 脚本类型后缀是(文件version.cfg内核版本号的整数位)_(文件version.cfg内核版本号的小数位).sql + + ```c++ + 例如:文件version.cfg内容 + 2.0.0 --写入发布版本号 + 92.298 --内核版本号 + 68362709 --最后一次提交号 + ``` + + ```c++ + 脚本命名:upgrade_catalog_maindb_92_298.sql + ``` + + + +<**问题集合**> + +- 想一想文件version.cfg从何而来? + + 很神奇的是从内核源码打包生成verion.cfg文件。version.cfg为开发合入本次代码之后的产品版本号,存在build/script/package_internal.sh的read_mpp_version()函数中。 + + < **不容易看到的额外扩展哦!!!**>:打包主要过程为build.sh ---> package_internal.sh ---> read_mpp_version[写入发布版本号2.0.0] ---> read_mpp_number[写入内核版本号92.298] + +- 什么时候version.cfg的内核版本号会修改? + +​ 如果开发本次合入的代码不涉及系统表修改,那么不用修改内核版本号,继而打包的时候version.cfg文件中的内核版本号也不会改变; +​ 如果开发本次合入的代码修改了系统表,并且遵循升级约束,那么需要修改内核版本号(在内核源码中对内核版本号数值增加0.001),继而打包的时候version.cfg的数值会被修改,同时,请提交上述至少4类的系统表变更和回退脚本,打包的时候会以新的version.cfg来进行命名。 + +​ < **思维很混乱,需要急救,那开发到底需要做些什么呢!!!**>:如果开发本次合入的代码不涉及系统表修改,那么开发会很窃喜,因为什么都不用做@开发者。如果开发本次合入的代码修改了系统表,开发者可得长点心了,天降大任于你,必先使其暴跳如雷。此时是需要适配升级的,主要要做三件事,第1,修改内核版本号,第2,添加升级脚本,第3,验证升级。 + +​ 预知更多,突破自己,请看下节分享-----------内核版本号 + +------ + + + + +## 2.升级版本控制 + +### 2.1.内核版本号 + +在openGauss-server/src/backend/utils/init/globals.cpp中,定义常量GRAND_VERSION_NUM的定义值,该版本号标示内核代码实际对应的版本,该版本号为uint32型。 + +**¥-¥- 敲黑板,划重点啦@新秀开发者 -¥-¥** + +首先,gaussdb进程运行中,有一个全局变量WorkingGrandVersionNum,用于标志gaussdb进程实际应该遵循的版本行为。 + +在正常情况下, WorkingGrandVersionNum等于GRAND_VERSION_NUM, + +在升级过程中, WorkingGrandVersionNum等于老版本的版本号(此时,应小于或等于新版本的GRAND_VERSION_NUM )。对于每一个后台线程,在启动时会将当时的WorkingGrandVersionNum赋值给t_thrd.proc->workingVersionNum,表示数据库当前运行版本是什么。 + +<**后期通过内核代码详解版本号,这里会有新发现,敬请关注shirley_zhengx**> + +### 2.2 内核版本号前向兼容 + +内核版本号用于内核前向兼容时使用。主要使用的方法和场景如下: +1、**给系统表新增列**:假设包版本2.0.0(内核版本号29.298),在2.0.1版本中(内核版本号29.299)在系统表pg_authid后新增1列parentid,通过heap_open和heap_getattr函数读取新增的列,如下GetUserDataFromCatalog函数中: + +```c +/* Before GrandVersionNum 29298, pg_authid does not have following columns */ +if (t_thrd.proc->workingVersionNum >= 29298) { + Datum authidparentidDatum = heap_getattr(tup, Anum_pg_authid_rolparentid), pg_authid_des, &isNull); +} +``` + + + +2、**新增系统表**:通过内核版本号来避免在老版本模式下对新增系统的访问。例如新版本开发的定时任务功能中新增pg_job系统表,在老版本模式下,我们需要通过禁止定时任务特性的开启,以避免对新增系统表的访问。 + +```c +/* Before GrandVersionNum 29298, we do not support scheduled job */ +if (IS_PGXC_COORDINATOR && PgJobSchdPID == 0 && pmState == PM_RUN && (job_queue_processes||start_job_scheduler) && t_thrd.proc->workingVersionNum >= 29298) { + PgJobSchdPID = StartJobScheduler(); + if (PgjobSchdPID != 0) { + start_job_scheduler = false; + elog(LOG, "job scheduler started, pid=%lu", PgJobSchdPID) + } +``` + + + +- [ ] **武林秘笈之内核版本号**:内核版本号保证新增系统表修改的前向兼容性 + + + +## 3.系统表变更--升级脚本撰写 + +### 3.1.GUC参数说明 + +撰写系统表变更脚本时,会使用到一个GUC参数inplace_upgrade_next_system_object_oids,用于对新增的系统对象指定oid 。该GUC参数为string类型,根据不同的对象类型,该GUC参数具有不同的字段数量,各字段间以逗号隔开。具体用法如下: + +1、系统表table + +| 对象类型 | | 字段1 | 字段2 | 字段3 | 字段4 | 字段5 | 字段6 | 字段7 | +| -------- | ------ | :---------: | ------------------ | ------------ | ------------- | --------------------------- | ----------------------------------- | --------- | +| 系统表 | 取值 | IUO_CATALOG | true / false | true / false | 0 - 16384 | 0 - 16384 | 0 - 16384 | 0 - 16384 | +| 涵义 | 表对象 | 是否共享表 | 是否创建物理表文件 | 表的oid | 复合类型的oid | TOAST表的oid
    若无则为0 | TOAST表索引表的oid;
    若无则为0 | | + +**【共享表】**:系统表存在共享表之分,粗暴地理解就是:对于一个系统表pg_table1(假设的系统表,实际上没有),不同数据库(maindb,otherdb)都是共用这个系统表说明这个系统表是共享表,不同数据库有自己的这个系统表(maindb有一个自己的pg_table1,otherdb有一个自己的pg_table1) + +2、索引表 + +| 对象类型 | | 字段1 | 字段2 | 字段3 | 字段4 | 字段5 | 字段6 | 字段7 | +| -------- | ---- | :---------: | ------------ | ------------------ | ------ | ---------------- | --------------- | --------- | +| 索引表 | 取值 | IUO_CATALOG | true / false | true / false | 0 | 0 | 0 | 0 - 16384 | +| | 涵义 | 表对象 | 是否共享表 | 是否创建物理表文件 | 非主表 | 索引表无复合类型 | 索引表无TOAST表 | 索引表oid | + +3、函数function + +| 对象类型 | | 字段1 | 字段2 | 字段3 | 字段4 | 字段5 | 字段6 | 字段7 | +| -------- | ---- | :------: | --------- | ----- | ----- | ----- | ----- | ----- | +| 函数 | 取值 | IUO_PROC | 0 - 16384 | | | | | | +| | 涵义 | 函数对象 | 函数oid | | | | | | + +4、类型type +| 对象类型 | | 字段1 | 字段2 | 字段3 | 字段4 | 字段5 | 字段6 | 字段7 | +| -------- | ---- | :------: | --------- | ---------------------- | ------------------ | ----- | ----- | ----- | +| 类型 | 取值 | IUO_TYPE | 0 - 16384 | 0 - 16384 | b / p | | | | +| | 涵义 | 类型对象 | 类型oid | 数组类型oid;若无则为0 | b:基础类型p:伪类型 | | | | + +5、其它通用类型 + +| 对象类型 | | 字段1 | 字段2 | 字段3 | 字段4 | 字段5 | 字段6 | 字段7 | +| ------------ | ---- | :---------: | --------- | ----- | ----- | ----- | ----- | ----- | +| 其它通用对象 | 取值 | IUO_GENERAL | 0 - 16384 | | | | | | +| | 涵义 | 一般对象 | 对象oid | | | | | | + +**温馨提示**:目前只显示支持表、函数和类型的显示oid指定,这三类对象也是开发人员最经常修改的; +另外,通用对象的oid指定是为了那些没有DDL语句支持的系统对象指定oid,目前主要包括pg_am,pg_amop,pg_amproc,pg_pltemplate这几个表 + +### 3.2.系统表变更撰写顺序 + +**正向脚本**: + +新增系统表 写在前置非post脚本中 +新增函数 写在后置post脚本中 +新增视图 写在后置post脚本中 + +其它新增系统对象请按依赖关系添加至系统表变更脚本中 + + + +**回滚脚本**: +删除视图 写在后置post脚本中 +删除函数 写在后置post脚本中 +删除系统表 写在前置非post脚本中 + +其它新增系统对象请按依赖关系(一般逆着正向脚本的顺序)添加至系统表回滚脚本中 + + + +### 3.3. 新增系统表 + +(以共享的系统表pg_job和索引表pg_job_oid_index为例) + +- (1) 正向脚本:(对应upgrade_catalog_maindb_xxx_xxx.sql和upgrade_catalog_otherdb_xxx_xxx.sql,两者内容几乎一样,只是GUC参数略有差异) + +```sql +--第一步:设置GUC参数,指定oid和其它一些信息 +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, true, true, 9022, 3796, 0, 0; + +--第二步:建表DDL语句 +CREATE TABLE pg_catalog.pg_job +( + job_id INT8 NOCOMPRESS NOT NULL, + ……, + --以上为定长列,以下为变长列 + interval TEXT NOCOMPRESS, + …… +) WITH OIDS TABLESPACE pg_global; + +--第三步:设置GUC参数,指定索引表的oid和其它一些信息 +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, true, true, 0, 0, 0, 3453; + +--第四步:建索引DDL语句 +CREATE UNIQUE INDEX pg_job_oid_index ON pg_catalog.pg_job USING BTREE(oid OID_OPS); + +--第五步:恢复GUC的默认参数 +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +``` + +**温馨提示**: + 1.如果一次增加了多张系统表,只需在最后恢复GUC的默认参数即可(但需要在新增视图之前)。 + + 2.宏CATALOG_NUM数量加1 + + 3.由于系统表进行了硬编码的处理,所以新增系统表头文件中需要加入BKI_SCHEMA_MACRO + + 4.在relcache.cpp文件开头加上新增系统表的FormData_pg_attribute数组,如下图所示,Schema_pg_job为编译生成在schemapg.h中的数组,数组中存的是这张系统表在pg_attribute中的记录 + +![](../images/upgrade_script/FormData_pg_attribute.png) + +5.在relcache.cpp文件catalogBuildParam数组中加入新增系统表的信息(按照其他系统表格式来加即可),按照oid升序的方式有序加入,如下图所示,各项意义如下图所示: + +![](../images/upgrade_script/catalogBuildParam.png) + +![](../images/upgrade_script/catalogBuildParam_struction.png) + + + +- (2) 回退脚本:(对应rollback_catalog_maindb_xxx_xxx.sql和rollback_catalog_otherdb_xxx_xxx.sql,两者内容相同) + +```sql +--第一步:删除索引表 +DROP INDEX IF EXISTS pg_catalog.pg_job_oid_index; + +--第二步:删除复合类型 +DROP TYPE IF EXISTS pg_catalog.pg_job; + +--第三步:删除主表 +DROP TABLE IF EXISTS pg_catalog.pg_job; + +``` + + + +**重点----回滚顺序**:若新增的是系统表附带TOAST表,那么回滚脚本中首先需要删除TOAST表的索引表、类型和TOAST表本身;然后再删除主表上的索引表、复合类型和主表本身 + + + +### 3.4.系统表追加列 + +由于系统表做了硬编码的处理,以后对系统表追加列无需再写升级和回滚脚本。 + +**特殊处理**:对于pg_attribute系统表追加列需要在src/common/backend/catalog/genbki.pl文件中对新增列的默认值进行赋值,如下图所示: + +![](../images/upgrade_script/pg_attribute.png) + + + +### 3.5.新增系统函数(以gs_wlm_get_session_info为例) + +- (1) 正向脚本:(对应upgrade_catalog_maindb_xxx_xxx.sql和upgrade_catalog_otherdb_xxx_xxx.sql,两者内容完全相同) + +![](../images/upgrade_script/gs_wlm_get_session_info.png) + +- (2) 回退脚本:(对应rollback_catalog_maindb_xxx_xxx.sql和rollback_catalog_otherdb_xxx_xxx.sql,两者内容相同) + +```sql +--第一步:删除函数 +DROP FUNCTION IF EXISTS pg_catalog.gs_wlm_get_session_info(int8); +``` + + + +### 3.6.修改系统函数 + +一般,我们只建议修改函数的内部实现或出参名称,其它属性不建议修改,在这种情况下,只需要使用CREATE OR REPLACE FUNCTION语句就可以实现修改,写法与新增系统函数一样,只是增加了OR REPLACE关键字,并且可以不需要设置GUC参数; + +如果确有需要更改其它属性,需要通过先删除、后新增的方式来创建。删除的时候,请带上CASCADE关键字 + + + +### 3.7 新增或修改系统视图 + +(1)新增系统视图 + +新增系统视图的DDL语句和system_views.sql中的完全一致 +回退脚本使用DROP VIEW IF EXISTS语句即可 + +修改系统视图使用CREATE OR REPLACE VIEW语句,只能支持在原有系统视图最后增加列的修改方式;此时,回退脚本也使用CREATE OR REPLACE VIEW语句 + +若需要实现其它修改方式,只能先删除原视图,再重新创建。删除的时候,请带上CASCADE关键字 + +(2) 修改系统视图 + +### 3.8 新增系统类型 + +(以event_trigger为例) + +- (1) 正向脚本:(对应upgrade_catalog_maindb_xxx_xxx.sql和upgrade_catalog_otherdb_xxx_xxx.sql,两者内容完全相同) + +```sql +--第一步:设置GUC参数,指定oid +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_TYPE, 3838, 0, p; + +--第二步:创建shell类型 +CREATE TYPE pg_catalog.event_trigger; + +-- 第三步:创建event_trigger类型的input和output函数 +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3594; +CREATE FUNCTION pg_catalog.event_trigger_in(cstring) RETURNS pg_catalog.event_trigger LANGUAGE INTERNAL IMMUATABLE AS 'event_trigger_in'; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3595; +CREATE FUNCTION pg_catalog.event_trigger_out(pg_catalog.event_trigger) RETURNS CSTRING LANGUAGE INTERNAL IMMUTABLE STRICT AS 'event_trigger_out'; + +-- 第四步:再次创建该类型,完善所有信息 +CREATE TYPE pg_catalog.event_trigger (input=event_trigger_in,output=event_trigger_out,internallength=4,passedbyvalue,CATEGORY=p); + +``` + +- (2) 回退脚本:(对应rollback_catalog_maindb_xxx_xxx.sql和rollback_catalog_otherdb_xxx_xxx.sql,两者内容相同) + +```sql +--第一步:删除输入输出函数 +DROP FUNCTION IF EXISTS pg_catalog.event_trigger_in(cstring); +DROP FUNCTION IF EXISTS pg_catalog.event_trigger_out(pg_catalog.event_trigger); + +--第二步:删除类型 +DROP FUNCTION IF EXISTS pg_catalog.event_trigger; + +``` + + + +### 3.9.新增通用对象(无DDL语句支持的对象,以pg_am中新增一行记录为例) + +- (1) 正向脚本:(对应upgrade_catalog_maindb_xxx_xxx.sql和upgrade_catalog_otherdb_xxx_xxx.sql,pg_am不是共享表,因此两者内容完全相同) + +```sql +--第一步:创建插入记录的临时函数 +CREATE OR REPLACE FUNCTION Insert_pg_am_temp() +RETURNS void +AS $$ +DECLARE +row_name record; +query_str text; +query_str_nodes text; +BEGIN +query_str_nodes := 'SELECT node_name,node_host,node_port FROM pgxc_node'; +FOR row_name IN EXECUTE(query_str_nodes) LOOP +query_str := 'EXECUTE DIRECT ON (' || row_name.node_name || ') ''insert into pg_am values ('''‘fake_btree'''',5,2,true,false,true,true,true,true,true,true,false,true,true,0,331,''''btbeginscan'''',''''btgettuple'''',''''btgetbitmap'''',''''btrescan'''',''''btendscan'''',''''btmarkpos'''',''''btrestrpos'''',''''btmerge'''',''''btbuild'''',''''btbuildempty'''',''''btbulkdelete'''',''''btvacuumcleanup'''',''''btcanreturn'''',''''btcostestimate'''',''''btoptions'''')'''; +EXECUTE(query_str); +END LOOP; +return; +END; $$ +LANGUAGE 'plpgsql'; + +--第二步:设置GUC参数,指定oid +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_GENERAL, 5033; + +-- 第三步:执行插入函数 +SELECT Insert_pg_am_temp(); + +-- 第四步:删除临时函数 +DROP FUNCTION Insert_pg_am_temp(); + +``` + + + + + +**@~@ 一些注意事项 @~@ ** + +```sql +1、所有对象的删除,请根据前述要求适当添加IF EXISTS关键字和CASCADE关键字; + +2、如果某次代码提交涉及多个对象的新增和修改,请按照依赖关系排序;在回滚脚本中,需要逆序删除 + +3、包版本号和内核版本号,以及前向兼容性,请务必要注意 + +4、本地和门禁的fastcheck中,均已经集成就地升级 +``` + diff --git "a/content/zh/post/zhengxue/upgrade_script\347\273\217\351\252\214\346\200\273\347\273\223.md" "b/content/zh/post/zhengxue/upgrade_script\347\273\217\351\252\214\346\200\273\347\273\223.md" new file mode 100644 index 0000000000000000000000000000000000000000..6930430e076eda7334f68b19b47af4200e6afaaa --- /dev/null +++ "b/content/zh/post/zhengxue/upgrade_script\347\273\217\351\252\214\346\200\273\347\273\223.md" @@ -0,0 +1,198 @@ ++++ +title = "openGauss升级脚本撰写经验总结" +date = "2021-09-30" +tags = ["openGauss升级脚本撰写经验总结"] +archives = "2021-09-30" +author = "shirley_zhengx" +summary = "openGauss升级脚本撰写经验总结" +img = "/zh/post/zhengxue/title/img1.png" +times = "9:30" + ++++ + +[TOC] + + + +## 1. Function + +正向 + +```sql +DROP FUNCTION IF EXISTS pg_catalog.gin_compare_jsonb(text, text) CASCADE; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3498; + +CREATE FUNCTION pg_catalog.gin_compare_jsonb ( + text, text +) RETURNS integer LANGUAGE INTERNAL IMMUTABLE STRICT as 'gin_compare_jsonb'; +``` + + + +反向 + +``` +DROP FUNCTION IF EXISTS pg_catalog.gin_compare_jsonb(text, text) CASCADE; +``` + + + +语法: http://postgres.cn/docs/12/sql-createtype.html + +系统表: http://postgres.cn/docs/12/catalog-pg-type.html + +注意函数属性、函数参数,可以参考------升级脚本生成sql模板。 + +函数参数列表基本使用 \df 查看到的结果就可以。但是drop语句中的参数列表不可以带默认值。 + + + + +## 2.Type + +正向 + +```sql +--第一步:设置GUC参数,指定oid +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_TYPE, 3838, 0, p; -- oid, 数组类型oid,type类型 +--第二步:创建shell类型 +CREATE TYPE pg_catalog.event_trigger; +-- 第三步:创建event_trigger类型的input、output、send、recv函数等,参考function +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3594; +CREATE FUNCTION pg_catalog.event_trigger_in(cstring)… … +-- 第四步:再次创建该类型,完善所有信息 +CREATE TYPE pg_catalog.event_trigger (input=event_trigger_in,output=event_trigger_out,internallength=4,passedbyvalue,CATEGORY=p); +``` + +反向 + +```sql +--第一步:删除输入输出函数 +DROP FUNCTION IF EXISTS pg_catalog.event_trigger_in(cstring); +DROP FUNCTION IF EXISTS pg_catalog.event_trigger_out(pg_catalog.event_trigger); +--第二步:删除类型 +DROP FUNCTION IF EXISTS pg_catalog.event_trigger; +``` + + + +语法: http://postgres.cn/docs/12/sql-createtype.html + +系统视图: http://postgres.cn/docs/12/catalog-pg-type.html + + + +## 3.Aggregate +正向 + +```sql +-- 第一步,创建agg的阶段函数,参考function +CREATE FUNCTION pg_catalog.json_object_agg_transfn… +-- 第二步,创建agg函数 +drop aggregate if exists pg_catalog.json_object_agg("any", "any"); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3403; +create aggregate pg_catalog.json_object_agg("any", "any") (SFUNC=json_object_agg_transfn, STYPE= internal, finalfunc = json_object_agg_finalfn); +``` + +反向 + +``` +-- 第一步,删除agg函数 +drop aggregate if exists pg_catalog.json_object_agg("any", "any"); +-- 第二步,删除agg阶段函数,参考function +DROP FUNCTION IF EXISTS pg_catalog.json_object_agg_transfn… +``` + + + +语法: http://postgres.cn/docs/12/sql-createaggregate.html + +系统表: http://postgres.cn/docs/12/catalog-pg-aggregate.html + + + +## 4.通用对象插入函数模板: + +```sql +CREATE OR REPLACE FUNCTION Insert_pg_opclass_temp( +IN icmethod integer, +IN icname text, +IN icnamespace integer, +IN icowner integer, +IN icfamily integer, +IN icintype integer, +IN icdefault boolean, +IN ickeytype integer +) +RETURNS void +AS $$ +DECLARE + row_name record; + query_str_nodes text; +BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + insert into pg_catalog.pg_opclass values (icmethod, icname, icnamespace, icowner, icfamily, icintype, icdefault, ickeytype); + END LOOP; + return; +END; $$ +LANGUAGE 'plpgsql'; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_GENERAL, 4033; +select Insert_pg_opclass_temp(403, 'jsonb_ops', 11, 10, 4033, 3802, true, 0); +DROP FUNCTION Insert_pg_opclass_temp(); +``` + + + +## 5.通用对象删除函数模板: + +```sql +CREATE OR REPLACE FUNCTION Delete_pg_opclass_temp() +RETURNS void +AS $$ +DECLARE +row_name record; +query_str text; +query_str_nodes text; +BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + delete from pg_catalog.pg_opclass where opcfamily in (4033, 4034, 4035, 4036, 4037); + END LOOP; +return; +END; +$$ LANGUAGE 'plpgsql'; +SELECT Delete_pg_opclass_temp(); +DROP FUNCTION Delete_pg_opclass_temp(); +``` + + + +## 6.升级脚本生成sql模板 + +``` +create table addfuncs(name varchar(100)); insert into addfuncs values('gin_compare_jsonb'), … +SELECT format( +'DROP FUNCTION IF EXISTS pg_catalog.%s(%s) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, %s; +CREATE FUNCTION pg_catalog.%s ( +%s +) RETURNS %s LANGUAGE INTERNAL %s %s as ''%s''; +%s', + proname, + pg_catalog.pg_get_function_arguments(oid), + oid, + proname, + pg_catalog.pg_get_function_arguments(oid), + pg_catalog.pg_get_function_result(oid), + case when provolatile='i' then 'IMMUTABLE' when provolatile='s' then 'STABLE' when provolatile='v' then 'VOLATILE' END, + case when proisstrict = 't' then 'STRICT' else '' end, + prosrc, + case when proisagg='t' or proiswindow='t' then '--ERROR: THIS FUNCTION ABOVE IS AN AGG OR WINDOW' end) +FROM pg_proc +WHERE proname in (select name from addfuncs); +``` + + + diff --git a/content/zh/post/zhou-yuxiang/.keep b/content/zh/post/zhou-yuxiang/.keep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/content/zh/post/zhou-yuxiang/img/.keep b/content/zh/post/zhou-yuxiang/img/.keep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/content/zh/post/zhou-yuxiang/img/01/clip_image001.png b/content/zh/post/zhou-yuxiang/img/01/clip_image001.png new file mode 100644 index 0000000000000000000000000000000000000000..a6c35f897e2c030c3f98bc0e0797f3255a3d01ba Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/01/clip_image001.png differ diff --git a/content/zh/post/zhou-yuxiang/img/01/clip_image002.jpg b/content/zh/post/zhou-yuxiang/img/01/clip_image002.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b507d4f936a99b2d50f06e725314e739d6b6cec8 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/01/clip_image002.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/01/clip_image003.png b/content/zh/post/zhou-yuxiang/img/01/clip_image003.png new file mode 100644 index 0000000000000000000000000000000000000000..535a3cc7b1052a940e5c8c5b3a6d8106a4c3528a Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/01/clip_image003.png differ diff --git a/content/zh/post/zhou-yuxiang/img/01/clip_image004.jpg b/content/zh/post/zhou-yuxiang/img/01/clip_image004.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6801f5376691cdd8aa20152b21a1d0f38371bc49 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/01/clip_image004.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/01/clip_image005.png b/content/zh/post/zhou-yuxiang/img/01/clip_image005.png new file mode 100644 index 0000000000000000000000000000000000000000..ad7a9f819733e1ab5ff4975ca8b375d86ac016e0 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/01/clip_image005.png differ diff --git a/content/zh/post/zhou-yuxiang/img/01/clip_image006.jpg b/content/zh/post/zhou-yuxiang/img/01/clip_image006.jpg new file mode 100644 index 0000000000000000000000000000000000000000..999b92aaf0ea0bccb699074a521bc2812d210f89 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/01/clip_image006.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/01/clip_image007.png b/content/zh/post/zhou-yuxiang/img/01/clip_image007.png new file mode 100644 index 0000000000000000000000000000000000000000..ff2bf33f60a6fc8eb15fd71a83b865f0d753fdff Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/01/clip_image007.png differ diff --git a/content/zh/post/zhou-yuxiang/img/01/clip_image008.jpg b/content/zh/post/zhou-yuxiang/img/01/clip_image008.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c00c0544c06467ce1bac88454f6a2ad23baab64e Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/01/clip_image008.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/01/clip_image009.png b/content/zh/post/zhou-yuxiang/img/01/clip_image009.png new file mode 100644 index 0000000000000000000000000000000000000000..a00981e6d697454e65631491913f084329a8e86a Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/01/clip_image009.png differ diff --git a/content/zh/post/zhou-yuxiang/img/01/clip_image010.jpg b/content/zh/post/zhou-yuxiang/img/01/clip_image010.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3dca7820d4283df6236c01801507d0935931c532 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/01/clip_image010.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/01/clip_image011.png b/content/zh/post/zhou-yuxiang/img/01/clip_image011.png new file mode 100644 index 0000000000000000000000000000000000000000..87c6c69539affc99f5292dc7b6b66682feab72f3 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/01/clip_image011.png differ diff --git a/content/zh/post/zhou-yuxiang/img/01/clip_image012.jpg b/content/zh/post/zhou-yuxiang/img/01/clip_image012.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bda5eb2f1e15337e615d325594a9a85143407dd8 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/01/clip_image012.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/01/clip_image013.png b/content/zh/post/zhou-yuxiang/img/01/clip_image013.png new file mode 100644 index 0000000000000000000000000000000000000000..3cb540ecb4c9894d997c830d69311e44e99d0651 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/01/clip_image013.png differ diff --git a/content/zh/post/zhou-yuxiang/img/01/clip_image014.jpg b/content/zh/post/zhou-yuxiang/img/01/clip_image014.jpg new file mode 100644 index 0000000000000000000000000000000000000000..91e8300005a3967924f6f7892671f52f13eb6d14 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/01/clip_image014.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/01/clip_image015.png b/content/zh/post/zhou-yuxiang/img/01/clip_image015.png new file mode 100644 index 0000000000000000000000000000000000000000..68607e9a2e1869aa5329a963e401d9d383fc7f41 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/01/clip_image015.png differ diff --git a/content/zh/post/zhou-yuxiang/img/01/clip_image016.jpg b/content/zh/post/zhou-yuxiang/img/01/clip_image016.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0bb8777dc2f39ab45ec13c9a3c40ba83a915504a Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/01/clip_image016.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/01/clip_image017.png b/content/zh/post/zhou-yuxiang/img/01/clip_image017.png new file mode 100644 index 0000000000000000000000000000000000000000..110a60a65cfadf27ca9e30faaa381b9fbfa09382 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/01/clip_image017.png differ diff --git a/content/zh/post/zhou-yuxiang/img/01/clip_image018.jpg b/content/zh/post/zhou-yuxiang/img/01/clip_image018.jpg new file mode 100644 index 0000000000000000000000000000000000000000..73359803032b8f52f4a05ce151cf22e3e75034ae Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/01/clip_image018.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/01/clip_image019.png b/content/zh/post/zhou-yuxiang/img/01/clip_image019.png new file mode 100644 index 0000000000000000000000000000000000000000..e003a2c8d60506e66f20ce151e30766a666f60df Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/01/clip_image019.png differ diff --git a/content/zh/post/zhou-yuxiang/img/01/clip_image020.jpg b/content/zh/post/zhou-yuxiang/img/01/clip_image020.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5616e2b0469242632676c2bd51d7da9088adefe7 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/01/clip_image020.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/02/.keep b/content/zh/post/zhou-yuxiang/img/02/.keep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/content/zh/post/zhou-yuxiang/img/02/clip_image021.png b/content/zh/post/zhou-yuxiang/img/02/clip_image021.png new file mode 100644 index 0000000000000000000000000000000000000000..6e5a85f84c64ca4b9a908bb37380ae520bab639b Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/02/clip_image021.png differ diff --git a/content/zh/post/zhou-yuxiang/img/02/clip_image022.jpg b/content/zh/post/zhou-yuxiang/img/02/clip_image022.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8fc3ff2963e953877498427e23d057cc96acc831 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/02/clip_image022.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/02/clip_image023.png b/content/zh/post/zhou-yuxiang/img/02/clip_image023.png new file mode 100644 index 0000000000000000000000000000000000000000..9f1a433f70561b85a93f9d849c43a0f90bbf4225 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/02/clip_image023.png differ diff --git a/content/zh/post/zhou-yuxiang/img/02/clip_image024.jpg b/content/zh/post/zhou-yuxiang/img/02/clip_image024.jpg new file mode 100644 index 0000000000000000000000000000000000000000..df9d43e6c2b574cb93d289f5ac977388de54dd22 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/02/clip_image024.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/02/clip_image025.png b/content/zh/post/zhou-yuxiang/img/02/clip_image025.png new file mode 100644 index 0000000000000000000000000000000000000000..7800baadea8717b703b5d105f553278c574dc512 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/02/clip_image025.png differ diff --git a/content/zh/post/zhou-yuxiang/img/02/clip_image026.jpg b/content/zh/post/zhou-yuxiang/img/02/clip_image026.jpg new file mode 100644 index 0000000000000000000000000000000000000000..37b8b30b8800c5ce59202fb93eaaa3a00168f586 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/02/clip_image026.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/02/clip_image027.png b/content/zh/post/zhou-yuxiang/img/02/clip_image027.png new file mode 100644 index 0000000000000000000000000000000000000000..b69b2809c087f7d9ff095e22872c1fb94f762aee Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/02/clip_image027.png differ diff --git a/content/zh/post/zhou-yuxiang/img/02/clip_image028.jpg b/content/zh/post/zhou-yuxiang/img/02/clip_image028.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a5f019396fe02eaa92c588bef9db4a3663394166 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/02/clip_image028.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/02/clip_image029.png b/content/zh/post/zhou-yuxiang/img/02/clip_image029.png new file mode 100644 index 0000000000000000000000000000000000000000..fca8bc42f60a33367b895586b276d5aec5467bda Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/02/clip_image029.png differ diff --git a/content/zh/post/zhou-yuxiang/img/02/clip_image030.jpg b/content/zh/post/zhou-yuxiang/img/02/clip_image030.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c54452df47c058e93e7b0ca1bbef1119d4c61e5 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/02/clip_image030.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/02/clip_image031.png b/content/zh/post/zhou-yuxiang/img/02/clip_image031.png new file mode 100644 index 0000000000000000000000000000000000000000..7f937d527c8c0d5a48e70131c6bbedf7a31bdff1 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/02/clip_image031.png differ diff --git a/content/zh/post/zhou-yuxiang/img/02/clip_image032.jpg b/content/zh/post/zhou-yuxiang/img/02/clip_image032.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e912b16d33258d017a9857950f244e2237f3d364 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/02/clip_image032.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/02/clip_image033.png b/content/zh/post/zhou-yuxiang/img/02/clip_image033.png new file mode 100644 index 0000000000000000000000000000000000000000..f76e336daba4d741bf3ddf86a2bfde56948366be Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/02/clip_image033.png differ diff --git a/content/zh/post/zhou-yuxiang/img/02/clip_image034.jpg b/content/zh/post/zhou-yuxiang/img/02/clip_image034.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a1f2c5bdc5cb3fda73ab5dfa1cf40efdb850a84 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/02/clip_image034.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/02/clip_image035.png b/content/zh/post/zhou-yuxiang/img/02/clip_image035.png new file mode 100644 index 0000000000000000000000000000000000000000..ce48b420a427720474cb36dc4dc09599e221c765 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/02/clip_image035.png differ diff --git a/content/zh/post/zhou-yuxiang/img/02/clip_image036.jpg b/content/zh/post/zhou-yuxiang/img/02/clip_image036.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d685749af895d931e31b77d7dc4cfc96792e02ed Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/02/clip_image036.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/02/clip_image037.png b/content/zh/post/zhou-yuxiang/img/02/clip_image037.png new file mode 100644 index 0000000000000000000000000000000000000000..a60dde9fa3cda323b83c99f248ae1232f70f088b Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/02/clip_image037.png differ diff --git a/content/zh/post/zhou-yuxiang/img/02/clip_image038.jpg b/content/zh/post/zhou-yuxiang/img/02/clip_image038.jpg new file mode 100644 index 0000000000000000000000000000000000000000..408046c6bb907773c068d42879b19add5733b0d6 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/02/clip_image038.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/02/clip_image039.png b/content/zh/post/zhou-yuxiang/img/02/clip_image039.png new file mode 100644 index 0000000000000000000000000000000000000000..cc5140a74f855483ac258dc1ee0b90f4e81c2dc6 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/02/clip_image039.png differ diff --git a/content/zh/post/zhou-yuxiang/img/02/clip_image040.jpg b/content/zh/post/zhou-yuxiang/img/02/clip_image040.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b8b19b70e4e0b5d7101decc0bcf08fa474498b3 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/02/clip_image040.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/03/.keep b/content/zh/post/zhou-yuxiang/img/03/.keep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/content/zh/post/zhou-yuxiang/img/03/clip_image041.png b/content/zh/post/zhou-yuxiang/img/03/clip_image041.png new file mode 100644 index 0000000000000000000000000000000000000000..a1932633f52f20b0ad6b75a3dce71c045b4c5b49 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/03/clip_image041.png differ diff --git a/content/zh/post/zhou-yuxiang/img/03/clip_image042.jpg b/content/zh/post/zhou-yuxiang/img/03/clip_image042.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a8683f1102703171959d29df94cad6246c61123 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/03/clip_image042.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/03/clip_image043.png b/content/zh/post/zhou-yuxiang/img/03/clip_image043.png new file mode 100644 index 0000000000000000000000000000000000000000..a6572f3ab870acf5f17aa335c8285742165d1ca4 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/03/clip_image043.png differ diff --git a/content/zh/post/zhou-yuxiang/img/03/clip_image044.jpg b/content/zh/post/zhou-yuxiang/img/03/clip_image044.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6c98dd1af235ce16cb6594d6f1b808df514c7e23 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/03/clip_image044.jpg differ diff --git a/content/zh/post/zhou-yuxiang/img/03/clip_image045.png b/content/zh/post/zhou-yuxiang/img/03/clip_image045.png new file mode 100644 index 0000000000000000000000000000000000000000..9f20ba11906ae6060a5bcb6a6cd66e6778484767 Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/03/clip_image045.png differ diff --git a/content/zh/post/zhou-yuxiang/img/03/clip_image046.png b/content/zh/post/zhou-yuxiang/img/03/clip_image046.png new file mode 100644 index 0000000000000000000000000000000000000000..286652e99fe8c8386b090f583c6e3d5f63a955fe Binary files /dev/null and b/content/zh/post/zhou-yuxiang/img/03/clip_image046.png differ diff --git "a/content/zh/post/zhou-yuxiang/opengauss\347\216\257\345\242\203\351\205\215\347\275\256.md" "b/content/zh/post/zhou-yuxiang/opengauss\347\216\257\345\242\203\351\205\215\347\275\256.md" new file mode 100644 index 0000000000000000000000000000000000000000..fb244e89963dfe2b5fbc94a07a995620f44c149e --- /dev/null +++ "b/content/zh/post/zhou-yuxiang/opengauss\347\216\257\345\242\203\351\205\215\347\275\256.md" @@ -0,0 +1,341 @@ ++++ + +title = "OpenGauss环境配置实验" +date = "2021-12-05" +tags = ["OpenGauss环境配置实验"] +archives = "2021-12" +author = "zhou-yuxiang" +summary = "openGauss社区开发入门" +times = "17:30" + ++++ + + + +一、首先在虚拟机装入centos系统 + + + +参考链接 + + + +[openGauss——VMware安装 | C1everF0x's Blog](https://c1everf0x.github.io/2021/04/10/openGauss——VMware安装/) + + + +创建用户 + +![img](../img/01/clip_image002.png) + + + +安装好后查看系统的版本 + + + +![img](../img/01/clip_image004.jpg) + + + +![img](../img/01/clip_image006.png) + + + +二、网络配置 + + + +点一下安装位置然后点完成退出来,默认设置就行,再点 “网络和主机名”,打开以太网的开关 + + + +主机名字自己定一个,ip地址也要记住,两个信息都要记住。 + + + +![img](../img/01/clip_image008.png) + + + +![img](../img/01/clip_image010.png) + + + +![img](../img/01/clip_image012.png) + + + + + +问题一:虚拟机能够ping通过主机、主机ping不通虚拟机。 + + + +参考链接: + + + +https://blog.csdn.net/weixin_43837229/article/details/94733475?utm_medium=distribute.pc_relevant.none-task-blog-2~default~baidujs_title~default-1.control&spm=1001.2101.3001.4242 + + + +虚拟机能够ping通过主机 + +![img](../img/01/clip_image014.png) + + + +本机ping虚拟机ip,无法通信 + +![img](../img/01/clip_image016.png) + + + +解决方式: + +在本机查看虚拟机ip,和虚拟机本身的ip不符合 + +![img](../img/01/clip_image018.jpg) + + + + + + + +以win10为例,打开电脑设置=》网络和lnelnternet=》网络和共享中心=》更高适配器设置,找到如下虚拟机 + + + +![img](../img/01/clip_image020.jpg) + +右键点击属性,找到 + +![img](../img/02/clip_image022.jpg) + +右键点击属性,找到 + + + +![img](../img/02/clip_image024.png) + + + + + +这时不管是主机ping虚拟机还是虚拟机ping主机都通了 + + + +实验结果: + + + +![img](../img/02/clip_image026.png) + + + + + +问题二 :ssh连接不了 + + + +失败: + + + +![img](../img/02/clip_image028.jpg) + + + +经过查询资料问题解决,主要是使用ssh命令并不代表开启了ssh服务器,我们通常在powershell中直接使用的ssh命令其实是win10专业版默认开启了OpenSSH客户端(OpenSSH Client),而现在想要远程ssh登录到win10,则需要开启ssh服务端。 + + + +解决步骤: + + + +1、打开设置——应用,找到可选功能,点击进入 + + + +![img](../img/02/clip_image030.png) + + + +2、在可选功能页面,点击添加功能,找到OpenSSH 服务器并安装 + +![img](../img/02/clip_image032.jpg) + + + + + +3、接下来启动sshserver服务,按win+r打开运行,输入services.msc,并回车键打开 + +![img](../img/02/clip_image034.png) + + + + + +4、在服务中找到OpenSSH SSH Server 和 OpenSSH Authentication Agent 两个服务,启动它们并右键——属性,设置为自动启动 + + + + + +![img](../img/02/clip_image036.jpg) + + + + + + + +成功 + +![img](../img/02/clip_image038.jpg) + + + + + +问题三:ssh服务器拒绝了密码,请再试一次 + + + +![img](../img/02/clip_image040.jpg) + +虚拟机用ssh连接自己可以连接上,但是主机的ssh连接不上虚拟机。并且密码正确。 + +在查找多种解决办法,经过多次尝试都没有用处的情况下,我准备换一种方式。 + +最终解决办法: + + + +利用容器安装OpenGauss数据库: + + + +1、安装curl + +​ sudo apt install curl + + + +2、安装docker + +​ curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun + + + +3、运行 opengauss 镜像 + +​ sudo docker run --name opengauss --privileged=true -d -p 5432:5432 -e GS_PASSWORD=Enmo@123 enmotech/opengauss:latest + + + +4、进入容器 + +​ sudo docker exec -it opengauss bash + + + +5、连接数据库 ,切换到omm用户 ,用gsql连接到数据库 + + + +![img](../img/03/clip_image042.jpg) + + + + + + + +第二次启动镜像. + +先启动容器,然后进入shell + + + +1、必须先启动容器 + + + +sudo docker start “容器ID” + + + +2、然后使用下边的命令进入shell + + + +sudo docker exec -it “容器ID” bash + + + +3、将主机的文件复制到容器里 + + + +sudo docker cp 主机目录 容器ID:容器目录 + + + +如果要编辑里边的配置文件,例如编辑nginx的配置文件,docker容器里没有默认的编辑工具,需要安装 + + + +sudo apt-get update + + + +sudo apt-get install vim + + + + + +也可以通过替换的方式,编辑文件 + + + +sudo docker cp :/path/to/file.ext . // 复制出来修改 + + + +sudo docker cp file.ext :/path/to/file.ext //修改完复制回去 + + + +4、编辑完容器之后,将改动嵌入到镜像里,因为下次更新站点的话,是首先更新镜像,然后创建新容器的 + + + +sudo docker commit 容器ID 镜像名称 + + + +![img](../img/03/clip_image044.jpg) + + + + + + + +使用: + + + +连接成功 + +![img](../img/03/clip_image046.jpg) + + diff --git "a/content/zh/post/zhou-yuxiang/opengauss\350\247\243\346\236\220\345\231\250.md" "b/content/zh/post/zhou-yuxiang/opengauss\350\247\243\346\236\220\345\231\250.md" new file mode 100644 index 0000000000000000000000000000000000000000..053ba411f530054d5f890f9bee70084a57b42ae4 --- /dev/null +++ "b/content/zh/post/zhou-yuxiang/opengauss\350\247\243\346\236\220\345\231\250.md" @@ -0,0 +1,366 @@ ++++ + +title = "OpenGauss解析器" +date = "2021-12-05" +tags = ["OpenGauss解析器"] +archives = "2021-12" +author = "zhou-yuxiang" +summary = "openGauss社区开发入门" +times = "17:30" + ++++ + +openGauss解析器实验报告 + + + + + +一、词法分析: + + + + + +l 文件位置: + + + +src/common/backend/parser/scan.l 定义词法结构,采用Lex编译后生成scan.cpp文件 + + + + + +l 原理:根据SQL语言标准对SQL语言中的关键字、标识符、操作符、常量、终结符进行了定义和识别。并且能够进行更精确的检查和操作。词法分析将一个SQL划分成多个不同的token,每个token会有自己的词性 + + + + + +l 代码如下: + + + +\1. 定义的形式如下: + + + +![img](../img/01/clip_image002.jpg) + + + + + +\2. 检查的形式如下: + + + +![img](../img/01/clip_image004.jpg) + +可以看到 当遇到identifier类型的时候,会进行更进一步的检查和操作。首先调用函数确定它是否是从关键字表中查找关键字,如果是则返回关键字的类型。否则调用函数将大写转换成小写。 + + + + + +l 用到的函数有: + +1、**char*** **downcase_truncate_identifier**(**const** **char*** ident, **int** len, **bool** warn) + + + +将字符都转化成小写,利用大写字母和小写字母之间的差值 + +![img](../img/01/clip_image006.jpg) + + + + + +2、**bool** **scanner_isspace**(**char** ch) + + + +如果找到的是空格,则返回true + + + +![img](../img/01/clip_image008.jpg) + + + + + +3、**void** **truncate_identifier**(**char*** ident, **int** len, **bool** warn) + + + +截断标识符 + +![img](../img/01/clip_image010.jpg) + + + + + +二 、语法分析 + + + +l 文件位置: + + + +src/common/backend/parser/scan.l 定义语法结构,采用Yacc编译后生成gram.cpp文件 + + + + + +l 原理:根据SQL语言的不同定义了一系列表达Statement的结构体(这些结构体通常以Stmt作为命名后缀),用来保存语法分析结果。 + + + +l 结构体如下: + +![img](../img/01/clip_image012.jpg) + + + + + +结构体中的每一项都对应一个子结构,程序根据不同的情况对其赋值: + +情况有: + +![img](../img/01/clip_image014.jpg) + + + +![img](../img/01/clip_image016.jpg) + + + + + +![img](../img/01/clip_image018.jpg) + + + + + + + +这些形式会进一步的递归处理,最终转换为基本的simple_select形式。代码如下:simple_select语法分析结构可以看出,一条简单的查询语句由以下子句组成:去除行重复的distinctClause、目标属性targetList、SELECT INTO子句intoClause、FROM子句fromClause、WHERE子句whereClause、GROUP BY子句groupClause、HAVING子句havingClause、窗口子句windowClause和plan_hint子句。在成功匹配simple_select语法结构后,将会创建一个Statement结构体,将各个子句进行相应的赋值。 + + + +simple_select的其他子句,如distinctClause、groupClause、havingClause等,语法分析方式类似。而其他SQL命令,如CREATE、INSERT、UPDATE、DELETE等,处理方式与SELECT命令类似 + + + +l 使用的函数: + + + + + +![img](../img/01/clip_image020.jpg) + + + +逻辑:创建SelectStmt结构体后,向结构体中填充参数。语法分析树 + + + +它产生的函数在在文件src/common/backend/parser/parser.cpp文件中的row_parser中被调用: + +![img](../img/02/clip_image022.jpg) + + + + + +最后返回,用于后面的语义分析、查询重写等步骤,该List中的每个ListCell包含一个语法树。 + +![img](../img/02/clip_image024.jpg) + + + +三、语义分析 + + + +l 文件位置 + + + +主入口文件src/common/backend/parser/analyze.cpp,入口函数是parse_analyze + + + + + +l 原理:语义分析模块在词法分析和语法分析之后执行,用于检查SQL命令是否符合语义规定,能否正确执行。负责语义分析的是parse_analyze函数,位于analyze.cpp下。parse_analyze会根据词法分析和语法分析得到的语法树,生成一个ParseState结构体用于记录语义分析的状态,再调用transformStmt函数,根据不同的命令类型进行相应的处理,最后生成查询树。 + + + +l ParseState保存了许多语义分析的中间信息,如原始SQL命令、范围表、连接表达式、原始WINDOW子句、FOR UPDATE/FOR SHARE子句等。该结构体在语义分析入口函数parse_analyze下被初始化,在transformStmt函数下根据不同的Stmt存储不同的中间信息,完成语义分析后再被释放。ParseState结构如下。 + + + +![img](../img/02/clip_image026.jpg) + + + +在语义分析过程中,语法树parseTree使用Node节点进行包装。Node结构只有一个类型为NodeTag枚举变量的字段,用于识别不同的处理情况。比如SelectStmt 对应的NodeTag值为T_SelectStmt。Node结构如下。 + + + +![img](../img/02/clip_image028.jpg) + + + +transformStmt函数会根据NodeTag的值,将语法树转化为不同的Stmt结构体,调用对应的语义分析函数进行处理。 + + + +![img](../img/02/clip_image030.jpg) + + + + + + + +openGauss在语义分析阶段处理的NodeTag情况有九种 + + + +| T_InsertStmt | transformInsertStmt | +| ------------------- | -------------------------- | +| T_DeleteStmt | transformDeleteStmt | +| T_UpdateStmt | transformUpdateStmt | +| T_MergeStmt | transformMergeStmt | +| T_SelectStmt | transformSelectStmt | +| T_DeclareCursorStmt | transformDeclareCursorStmt | +| T_ExplainStmt | transformExplainStmt | +| T_CreateTableAsStmt | transformCreateTableAsStmt | +| T_CreateModelStmt | transformCreateModelStmt | + +transformSelectStmt: + +![img](../img/02/clip_image032.jpg) 调用关系 + + + +![img](../img/02/clip_image034.jpg) + + + +处理对应句子的流程。 + + + + + +以处理基本SELECT命令的transformSelectStmt函数为例,其处理流程如下。 + +(1) 创建一个新的Query节点,设置commandType为CMD_SELECT。 + +(2) 检查SelectStmt是否存在WITH子句,存在则调用transformWithClause处理。 + +(3) 调用transformFromClause函数处理FROM子句。 + +(4) 调用transformTargetList函数处理目标属性。 + +(5) 若存在操作符“+”则调用transformOperatorPlus转为外连接。 + +(6) 调用transformWhereClause函数处理WHERE子句和HAVING子句。 + +(7) 调用transformSortClause函数处理ORDER BY子句。 + +(8) 调用transformGroupClause函数处理GROUP BY子句。 + +(9) 调用transformDistinctClause函数或者transformDistinctOnClause函数处理DISTINCT 子句。 + +(10)调用transformLimitClause函数处理LIMIT和OFFSET子句。 + +(11)调用transformWindowDefinitions函数处理WINDOWS子句。 + +(12)调用resolveTargetListUnknowns函数将其他未知类型作为text处理。 + +(13)调用transformLockingClause函数处理FOR UPDATE子句。 + +(14)处理其他情况,如insert语句、foreign table等。 + +(15)返回查询树。 + + + + + +四、总体的入口函数: + + + +![img](../img/02/clip_image036.jpg) + + + +l 位置:\src\gausskernel\process\tcop\postgres.cpp + + + + + +1、调用 pg_parse_query 函数,参数 用户输入的命令,生成 parsetree_list + + + +![img](../img/02/clip_image038.jpg) + + + + + +pg_parse_query部分代码: + +![img](../img/02/clip_image040.jpg) + + + +2、再调用 pg_analyze_and_rewrite 函数,参数 语法树链表,返回 查询树链表。进行语义分析。 + + + + + +![img](../img/03/clip_image042.jpg) + + + + + + + +3、pg_analyze_and_rewrite 函数调用parse_analyze 函数进行语义分析。 + + + +![img](../img/03/clip_image044.jpg) + + + +调用流程图 + + + +![img](../img/03/clip_image046.png) + + + + + + \ No newline at end of file diff --git a/content/zh/prepare/prepare.md b/content/zh/prepare/prepare.md index f713a4aecbd00a3a777ded2a39f3a7ea62ad5bac..a188c00b94daded79ba19431c5616412cb426e16 100644 --- a/content/zh/prepare/prepare.md +++ b/content/zh/prepare/prepare.md @@ -1,5 +1,5 @@ -1. 请参阅http://git.mydoc.io/?t=179267来注册Gitee账户。 +1. 请参阅https://gitee.com/help/articles/4113来注册Gitee账户。 2. 在gitee中(http://gitee.com/profile/emails)设置您的主邮箱。 3. 在https://opengauss.org/zh/cla.html上签署CLA。 -4. 参考http://git.mydoc.io/?t=180692准备git环境。 +4. 参考https://gitee.com/help/articles/4107准备git环境。 5. 了解博客格式。 \ No newline at end of file diff --git a/themes/hugo-blog-jeffprod/layouts/_default/single.html b/themes/hugo-blog-jeffprod/layouts/_default/single.html index b1220f9f993fe437fdc8db42ed901299ddab5cc6..ab149dc7cca55fd5274778b4c45490e3b1ca046b 100644 --- a/themes/hugo-blog-jeffprod/layouts/_default/single.html +++ b/themes/hugo-blog-jeffprod/layouts/_default/single.html @@ -31,10 +31,6 @@
    {{ i18n "blog_declare" }}{{ i18n "blog_declare_content" }}
    -
    - - {{ partial "comments.html" . }} -
    diff --git a/themes/hugo-blog-jeffprod/layouts/index.html b/themes/hugo-blog-jeffprod/layouts/index.html index e6b3a701a35269500946a03b84fb09d6973951df..116752a3d2e2f691d6ab1b9dfdff24fac32d6b15 100644 --- a/themes/hugo-blog-jeffprod/layouts/index.html +++ b/themes/hugo-blog-jeffprod/layouts/index.html @@ -57,7 +57,15 @@ {{ . }} {{ end }}
    - + + + {{ if .Params.img}} + + {{ else }} + + {{ end }} + +

    {{ .Params.summary }}...

    {{ i18n "blog_view_more" }}
    diff --git a/themes/hugo-blog-jeffprod/static/css/blog.css b/themes/hugo-blog-jeffprod/static/css/blog.css index 7e20c14aee9d9dbee4eb3b9721b42aef04951baf..fd5611715d0bc970ba70ef1ea0da2d2b6b4f1265 100644 --- a/themes/hugo-blog-jeffprod/static/css/blog.css +++ b/themes/hugo-blog-jeffprod/static/css/blog.css @@ -419,11 +419,12 @@ li{ font-family: FZLTHJW; } .container .tags { - margin-bottom: 20px; + margin-bottom: 10px; + max-height: 600px; + overflow: hidden auto; } .container .tags img { - vertical-align: text-bottom; - margin-right: 10px; + vertical-align: text-bottom; } .container .tags a { text-decoration: none; @@ -525,7 +526,8 @@ li{ .blog-detail-prop span { margin-right: 40px; } -.blog-detail-tags { +.blog-detail-header { + padding-bottom: 20px; margin-bottom: 40px; } .blog-content a { @@ -559,10 +561,23 @@ li{ } .blog-content p { font-size: 14px; - line-height: 32px; + line-height:24px; color: #4d4d4d; word-break: break-all; white-space: pre-wrap; + margin-bottom: 6px; +} +.blog-content p ~ul{ + margin-top: 16px; + margin-bottom: 16px; +} +.blog-content ul li,.blog-content ol li{ + line-height: 24px; + margin-bottom: 6px; +} +.blog-content ul li img,.blog-content ol li img{ + display: block; + margin: 10px 0; } .blog-content pre, .blog-content code { word-break: break-all; @@ -791,4 +806,53 @@ li{ .is-9 .tile, .guidance { margin-top: 40px; } +} + + + +pre { + display: block; + padding: 16px; + margin: 0 0 10px; + font-size: 14px; + line-height: 1.42857143; + color: #333; + word-break: break-all; + word-wrap: break-word; + background-color: #f6f8fa; + border: 1px solid #eaeaea; + border-radius: 3px; +} + +pre code { + padding: 0; + font-size: inherit; + color: inherit; + white-space: pre-wrap; + background-color: transparent; + border-radius: 0 +} +.blog-detail-header{ + border-bottom: 1px solid; +} + +table { + max-width: 100%; + background-color: transparent; + border-collapse: collapse; + border-spacing: 0; + border: 1px solid #ddd; + margin: 10px 0; +} + +table th,table td { + padding: 8px; + line-height: 20px; + text-align: left; + vertical-align: top; + border: 1px solid #ddd; + white-space: inherit; +} +table tbody>tr:nth-child(2n)>td{ + background-color: rgba(102,128,153,.05) } \ No newline at end of file diff --git a/themes/hugo-blog-jeffprod/static/img/default.png b/themes/hugo-blog-jeffprod/static/img/default.png new file mode 100644 index 0000000000000000000000000000000000000000..70823878926531de19293cb21a7b135cd8f59798 Binary files /dev/null and b/themes/hugo-blog-jeffprod/static/img/default.png differ